arch: arc: bug fixes and optimization in exception handling

The original exception handling has space to optimize and
and some bugs need to be fixed.

* define NANO_ESF
   * add the definition of NANO_ESF which is an irq_stack_frame
   * add the corresponding codes in exception entry and handler
   * remove _default_esf
* implement the _ARCH_EXCEPT
   * use trap exception to raise exception by kernel
   * add corresponding trap exception entry
   * add _do_kernel_oops to handle the exception raised by
     _ARCH_EXCEPT.
* add the thread context switch in exception return
   * case: kernel oops may raise thread context switch
   * case: some tests will re-implement SysFatalHandler to raise
     thread context switch.
   * as the exception and isr are handled in kernel isr stack, so
     the thread context switch must be in the return of exception/isr
     , and the exception handler must return, should not be decorated
     with FUNC_NORETURN
* for arc, _is_in_isr should consider the case of exception

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2018-04-12 13:13:08 +08:00 committed by Andrew Boie
commit 3d9ba10b5c
8 changed files with 132 additions and 110 deletions

View file

@ -18,10 +18,6 @@
#include <arch/cpu.h>
#include <misc/printk.h>
const NANO_ESF _default_esf = {
0xdeaddead, /* placeholder */
};
/**
*
* @brief Kernel fatal error handler
@ -37,14 +33,14 @@ const NANO_ESF _default_esf = {
*
* @return This function does not return.
*/
FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
void _NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
{
switch (reason) {
case _NANO_ERR_HW_EXCEPTION:
break;
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING)
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING) \
|| defined(CONFIG_STACK_SENTINEL)
case _NANO_ERR_STACK_CHK_FAIL:
printk("***** Stack Check Fail! *****\n");
break;
@ -66,10 +62,13 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
printk("**** Unknown Fatal Error %d! ****\n", reason);
break;
}
printk("Current thread ID = %p\n"
"Faulting instruction address = 0x%lx\n",
k_current_get(),
_arc_v2_aux_reg_read(_ARC_V2_ERET));
printk("Current thread ID = %p\n", k_current_get());
if (reason == _NANO_ERR_HW_EXCEPTION) {
printk("Faulting instruction address = 0x%lx\n",
_arc_v2_aux_reg_read(_ARC_V2_ERET));
}
/*
* Now that the error has been reported, call the user implemented
@ -80,11 +79,12 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
*/
_SysFatalErrorHandler(reason, pEsf);
for (;;)
;
}
void _do_kernel_oops(const NANO_ESF *esf)
{
_NanoFatalErrorHandler(esf->r0, esf);
}
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
{

View file

@ -29,7 +29,7 @@
*
* @return This function does not return.
*/
void _Fault(void)
void _Fault(const NANO_ESF *esf)
{
u32_t vector, code, parameter;
u32_t exc_addr = _arc_v2_aux_reg_read(_ARC_V2_EFA);
@ -47,8 +47,9 @@ void _Fault(void)
* check violation
*/
if (vector == 6 && parameter == 2) {
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, &_default_esf);
_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
return;
}
#endif
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
}

View file

@ -17,7 +17,7 @@
#include <swap_macros.h>
GTEXT(_Fault)
GTEXT(_do_kernel_oops)
GTEXT(__reset)
GTEXT(__memory_error)
GTEXT(__instruction_error)
@ -47,9 +47,6 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi)
#if !defined(CONFIG_IRQ_OFFLOAD) && !defined(CONFIG_USERSPACE)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
#endif
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_div_zero)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_dc_error)
@ -79,7 +76,6 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned)
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
#ifndef CONFIG_USERSPACE
ld r1, [exc_nest_count]
add r0, r1, 1
st r0, [exc_nest_count]
@ -92,34 +88,64 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_maligned)
ld sp, [r1, _kernel_offset_to_irq_stack]
exc_nest_handle:
push_s r0
#endif
jl _Fault
#ifndef CONFIG_USERSPACE
_exc_return:
pop sp
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
cmp r0, 0
bne.d _exc_return_from_exc
st r0, [r1]
#endif
/* if _Fault returns, restore the registers */
_pop_irq_stack_frame
#ifdef CONFIG_PREEMPT_ENABLED
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq r0, r2, _exc_return
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
/* clear AE bit to forget this was an exception */
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~_ARC_V2_STATUS32_AE)
kflag r3
/* pretend lowest priority interrupt happened to use common handler */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
or r3,r3,(1<<(CONFIG_NUM_IRQ_PRIO_LEVELS-1)) /* use lowest */
sr r3, [_ARC_V2_AUX_IRQ_ACT]
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
#endif
_exc_return_from_exc:
_pop_irq_stack_frame
rtie
#ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload);
#endif
#if defined(CONFIG_IRQ_OFFLOAD) || defined(CONFIG_USERSPACE)
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
#ifdef CONFIG_USERSPACE
/* get the id of trap_s */
lr ilink, [_ARC_V2_ECR]
and ilink, ilink, 0x3f
cmp ilink, 0x3
#ifdef CONFIG_USERSPACE
cmp ilink, _TRAP_S_CALL_SYSTEM_CALL
bne _do_other_trap
/* do sys_call */
mov ilink, _SYSCALL_LIMIT
@ -182,58 +208,19 @@ _do_other_trap:
trap_nest_handle:
push_s r0
mov blink, _exc_return
cmp ilink, _TRAP_S_CALL_RUNTIME_EXCEPT
beq _oops
#ifdef CONFIG_IRQ_OFFLOAD
jl _irq_do_offload
cmp ilink, _TRAP_S_SCALL_IRQ_OFFLOAD
bne _trap_fault
j _irq_do_offload
#endif
pop sp
_trap_fault:
j _Fault
/* check if we're a nested interrupt: if so, let the
* interrupted interrupt handle the reschedule
*/
mov r1, exc_nest_count
ld r0, [r1]
sub r0, r0, 1
cmp r0, 0
beq.d _trap_check_for_swap
st r0, [r1]
_trap_return:
_pop_irq_stack_frame
rtie
.balign 4
_trap_check_for_swap:
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/*
* Non-preemptible thread ? Do not schedule (see explanation of
* preempt field in kernel_struct.h).
*/
ldh_s r0, [r2, _thread_offset_to_preempt]
brhs r0, _NON_PREEMPT_THRESHOLD, _trap_return
/* check if the current thread needs to be rescheduled */
ld_s r0, [r1, _kernel_offset_to_ready_q_cache]
breq r0, r2, _trap_return
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
/* clear AE bit to forget this was an exception */
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~_ARC_V2_STATUS32_AE)
kflag r3
/* pretend lowest priority interrupt happened to use common handler */
lr r3, [_ARC_V2_AUX_IRQ_ACT]
or r3,r3,(1<<(CONFIG_NUM_IRQ_PRIO_LEVELS-1)) /* use lowest */
sr r3, [_ARC_V2_AUX_IRQ_ACT]
/* Assumption: r2 has current thread */
b _rirq_common_interrupt_swap
#endif /* CONFIG_IRQ_OFFLOAD || CONFIG_USERSPACE */
_oops:
j _do_kernel_oops

View file

@ -37,15 +37,22 @@
*
* @return N/A
*/
FUNC_NORETURN __weak void _SysFatalErrorHandler(unsigned int reason,
__weak void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF *pEsf)
{
ARG_UNUSED(pEsf);
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING) \
|| defined(CONFIG_STACK_SENTINEL)
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
goto hang_system;
}
#endif
if (reason == _NANO_ERR_KERNEL_PANIC) {
goto hang_system;
}
if (k_is_in_isr() || _is_thread_essential()) {
printk("Fatal fault in %s! Spinning...\n",
k_is_in_isr() ? "ISR" : "essential thread");

View file

@ -42,20 +42,6 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
thread->arch.return_value = value;
}
static ALWAYS_INLINE int _is_in_isr(void)
{
u32_t act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
#if CONFIG_IRQ_OFFLOAD
/* Check if we're in a TRAP_S exception as well */
if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE &&
_ARC_V2_ECR_VECTOR(_arc_v2_aux_reg_read(_ARC_V2_ECR)) == EXC_EV_TRAP
) {
return 1;
}
#endif
return ((act & 0xffff) != 0);
}
/**
*
* @brief Indicates the interrupt number of the highest priority
@ -70,6 +56,7 @@ static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
return irq_num;
}
#define _is_in_isr _arc_v2_irq_unit_is_in_isr
extern void _thread_entry_wrapper(void);
extern void _user_thread_entry_wrapper(void);

View file

@ -48,7 +48,8 @@ extern "C" {
* @return N/A
*/
static inline void _arc_v2_irq_unit_irq_enable_set(
static ALWAYS_INLINE
void _arc_v2_irq_unit_irq_enable_set(
int irq,
unsigned char enable
)
@ -65,7 +66,8 @@ static inline void _arc_v2_irq_unit_irq_enable_set(
* @return N/A
*/
static inline void _arc_v2_irq_unit_int_enable(int irq)
static ALWAYS_INLINE
void _arc_v2_irq_unit_int_enable(int irq)
{
_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE);
}
@ -78,7 +80,8 @@ static inline void _arc_v2_irq_unit_int_enable(int irq)
* @return N/A
*/
static inline void _arc_v2_irq_unit_int_disable(int irq)
static ALWAYS_INLINE
void _arc_v2_irq_unit_int_disable(int irq)
{
_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE);
}
@ -91,7 +94,8 @@ static inline void _arc_v2_irq_unit_int_disable(int irq)
* @return N/A
*/
static inline void _arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
static ALWAYS_INLINE
void _arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
{
_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
#ifdef CONFIG_ARC_HAS_SECURE
@ -114,12 +118,33 @@ static inline void _arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
* @return N/A
*/
static inline void _arc_v2_irq_unit_sensitivity_set(int irq, int s)
static ALWAYS_INLINE
void _arc_v2_irq_unit_sensitivity_set(int irq, int s)
{
_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
_arc_v2_aux_reg_write(_ARC_V2_IRQ_TRIGGER, s);
}
/*
* @brief Check whether processor in interrupt/exception state
*
* Check whether processor in interrupt/exception state
*
* @return N/A
*/
static ALWAYS_INLINE
int _arc_v2_irq_unit_is_in_isr(void)
{
unsigned int act = _arc_v2_aux_reg_read(_ARC_V2_AUX_IRQ_ACT);
/* in exception ?*/
if (_arc_v2_aux_reg_read(_ARC_V2_STATUS32) & _ARC_V2_STATUS32_AE) {
return 1;
}
return ((act & 0xffff) != 0);
}
/*
* @brief Sets an IRQ line to level/pulse trigger
*

View file

@ -22,8 +22,7 @@ extern "C" {
#ifndef _ASMLANGUAGE
#include <toolchain/gcc.h>
extern FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int,
const NANO_ESF*);
extern void _NanoFatalErrorHandler(unsigned int, const NANO_ESF*);
extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
#endif
@ -38,6 +37,28 @@ extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
#define _TRAP_S_CALL_RUNTIME_EXCEPT 2
#define _TRAP_S_CALL_SYSTEM_CALL 3
/*
* the exception caused by kernel will be handled in interrupt context
* when the processor is already in interrupt context, no need to raise
* a new exception; when the processor is in thread context, the exception
* will be raised
*/
#define _ARCH_EXCEPT(reason_p) do { \
if (_arc_v2_irq_unit_is_in_isr()) { \
printk("@ %s:%d:\n", __FILE__, __LINE__); \
_NanoFatalErrorHandler(reason_p, 0); \
} else {\
__asm__ volatile ( \
"mov r0, %[reason]\n\t" \
"trap_s %[id]\n\t" \
: \
: [reason] "i" (reason_p), \
[id] "i" (_TRAP_S_CALL_RUNTIME_EXCEPT) \
: "memory"); \
CODE_UNREACHABLE; \
} \
} while (0)
#ifdef __cplusplus
}
#endif

View file

@ -20,13 +20,7 @@ extern "C" {
#ifdef _ASMLANGUAGE
#else
struct __esf {
/* XXX - not defined yet */
int placeholder;
};
typedef struct __esf NANO_ESF;
extern const NANO_ESF _default_esf;
typedef struct _irq_stack_frame NANO_ESF;
#endif
#ifdef __cplusplus