arm: exception-assisted kernel panic/oops support
Put the reason code in r0 and make a SVC #2 call, which will be propagated to _fatal_error_handler as an exception. The _is_in_isr() implementation had to be tweaked a bit. User-generated SVC exception no longer just used for irq_offload(); just because we are in it does not mean we are in interrupt context. Instead, have the irq_offload code set and clear the offload_routine global; it will be non-NULL only if it's in use. Upcoming changes to support memory protection (which will require system calls) will need this too. We free up some small amount of ROM deleting _default_esf struct as it's no longer needed. Issue: ZEP-843 Change-Id: Ie82bd708575934cffe41e64f5c128c8704ca4e48 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
7827b7bf4a
commit
75caa2b084
6 changed files with 60 additions and 38 deletions
|
@ -19,28 +19,6 @@
|
||||||
#include <kernel_structs.h>
|
#include <kernel_structs.h>
|
||||||
#include <misc/printk.h>
|
#include <misc/printk.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
|
|
||||||
* the caller does not have a NANO_ESF to pass
|
|
||||||
*/
|
|
||||||
const NANO_ESF _default_esf = {
|
|
||||||
{0xdeaddead}, /* r0/a1 */
|
|
||||||
{0xdeaddead}, /* r1/a2 */
|
|
||||||
{0xdeaddead}, /* r2/a3 */
|
|
||||||
{0xdeaddead}, /* r3/a4 */
|
|
||||||
{0xdeaddead}, /* r12/ip */
|
|
||||||
{0xdeaddead}, /* r14/lr */
|
|
||||||
{0xdeaddead}, /* r15/pc */
|
|
||||||
0xdeaddead, /* xpsr */
|
|
||||||
#ifdef CONFIG_FLOAT
|
|
||||||
{0xdeaddead, 0xdeaddead, 0xdeaddead, 0xdeaddead, /* s0 .. s3 */
|
|
||||||
0xdeaddead, 0xdeaddead, 0xdeaddead, 0xdeaddead, /* s4 .. s7 */
|
|
||||||
0xdeaddead, 0xdeaddead, 0xdeaddead, 0xdeaddead, /* s8 .. s11 */
|
|
||||||
0xdeaddead, 0xdeaddead, 0xdeaddead, 0xdeaddead}, /* s12 .. s15 */
|
|
||||||
0xdeaddead, /* fpscr */
|
|
||||||
0xdeaddead, /* undefined */
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -108,3 +86,8 @@ void _NanoFatalErrorHandler(unsigned int reason,
|
||||||
|
|
||||||
_SysFatalErrorHandler(reason, pEsf);
|
_SysFatalErrorHandler(reason, pEsf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void _do_kernel_oops(const NANO_ESF *esf)
|
||||||
|
{
|
||||||
|
_NanoFatalErrorHandler(esf->r0, esf);
|
||||||
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include <kernel.h>
|
#include <kernel.h>
|
||||||
#include <irq_offload.h>
|
#include <irq_offload.h>
|
||||||
|
|
||||||
static irq_offload_routine_t offload_routine;
|
volatile irq_offload_routine_t offload_routine;
|
||||||
static void *offload_param;
|
static void *offload_param;
|
||||||
|
|
||||||
/* Called by __svc */
|
/* Called by __svc */
|
||||||
|
@ -28,7 +28,12 @@ void irq_offload(irq_offload_routine_t routine, void *parameter)
|
||||||
offload_routine = routine;
|
offload_routine = routine;
|
||||||
offload_param = parameter;
|
offload_param = parameter;
|
||||||
|
|
||||||
__asm__ volatile ("svc #1" : : : "memory");
|
__asm__ volatile ("svc %[id]"
|
||||||
|
:
|
||||||
|
: [id] "i" (_SVC_CALL_IRQ_OFFLOAD)
|
||||||
|
: "memory");
|
||||||
|
|
||||||
|
offload_routine = NULL;
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ GTEXT(__svc)
|
||||||
#error Unknown ARM architecture
|
#error Unknown ARM architecture
|
||||||
#endif /* CONFIG_ARMV6_M */
|
#endif /* CONFIG_ARMV6_M */
|
||||||
GTEXT(__pendsv)
|
GTEXT(__pendsv)
|
||||||
|
GTEXT(_do_kernel_oops)
|
||||||
GDATA(_k_neg_eagain)
|
GDATA(_k_neg_eagain)
|
||||||
|
|
||||||
GDATA(_kernel)
|
GDATA(_kernel)
|
||||||
|
@ -193,32 +194,39 @@ _thread_irq_disabled:
|
||||||
|
|
||||||
SECTION_FUNC(TEXT, __svc)
|
SECTION_FUNC(TEXT, __svc)
|
||||||
|
|
||||||
#if CONFIG_IRQ_OFFLOAD
|
|
||||||
tst lr, #0x4 /* did we come from thread mode ? */
|
tst lr, #0x4 /* did we come from thread mode ? */
|
||||||
ite eq /* if zero (equal), came from handler mode */
|
ite eq /* if zero (equal), came from handler mode */
|
||||||
mrseq r0, MSP /* handler mode, stack frame is on MSP */
|
mrseq r0, MSP /* handler mode, stack frame is on MSP */
|
||||||
mrsne r0, PSP /* thread mode, stack frame is on PSP */
|
mrsne r0, PSP /* thread mode, stack frame is on PSP */
|
||||||
|
|
||||||
ldr r0, [r0, #24] /* grab address of PC from stack frame */
|
ldr r1, [r0, #24] /* grab address of PC from stack frame */
|
||||||
/* SVC is a two-byte instruction, point to it and read encoding */
|
/* SVC is a two-byte instruction, point to it and read encoding */
|
||||||
ldrh r0, [r0, #-2]
|
ldrh r1, [r1, #-2]
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* grab service call number: if zero, it's a context switch; if not,
|
* grab service call number:
|
||||||
* it's an irq offload
|
* 0: context switch
|
||||||
|
* 1: irq_offload (if configured)
|
||||||
|
* 2: kernel panic or oops (software generated fatal exception)
|
||||||
|
* Planned implementation of system calls for memory protection will
|
||||||
|
* expand this case.
|
||||||
*/
|
*/
|
||||||
ands r0, #0xff
|
ands r1, #0xff
|
||||||
beq _context_switch
|
beq _context_switch
|
||||||
|
|
||||||
|
cmp r1, #2
|
||||||
|
beq _oops
|
||||||
|
|
||||||
|
#if CONFIG_IRQ_OFFLOAD
|
||||||
push {lr}
|
push {lr}
|
||||||
blx _irq_do_offload /* call C routine which executes the offload */
|
blx _irq_do_offload /* call C routine which executes the offload */
|
||||||
pop {lr}
|
pop {lr}
|
||||||
|
|
||||||
/* exception return is done in _IntExit() */
|
/* exception return is done in _IntExit() */
|
||||||
b _IntExit
|
b _IntExit
|
||||||
|
#endif
|
||||||
|
|
||||||
_context_switch:
|
_context_switch:
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlock interrupts:
|
* Unlock interrupts:
|
||||||
|
@ -235,6 +243,12 @@ _context_switch:
|
||||||
|
|
||||||
/* handler mode exit, to PendSV */
|
/* handler mode exit, to PendSV */
|
||||||
bx lr
|
bx lr
|
||||||
|
|
||||||
|
_oops:
|
||||||
|
push {lr}
|
||||||
|
blx _do_kernel_oops
|
||||||
|
pop {pc}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#error Unknown ARM architecture
|
#error Unknown ARM architecture
|
||||||
#endif /* CONFIG_ARMV6_M */
|
#endif /* CONFIG_ARMV6_M */
|
||||||
|
|
|
@ -28,14 +28,19 @@ extern "C" {
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#include <arch/arm/cortex_m/cmsis.h>
|
#include <arch/arm/cortex_m/cmsis.h>
|
||||||
|
#include <irq_offload.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
|
extern volatile irq_offload_routine_t offload_routine;
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @brief Find out if running in an ISR context
|
* @brief Find out if running in an ISR context
|
||||||
*
|
*
|
||||||
* The current executing vector is found in the IPSR register. We consider the
|
* The current executing vector is found in the IPSR register. We consider the
|
||||||
* IRQs (exception 16 and up), and the SVC, PendSV, and SYSTICK exceptions,
|
* IRQs (exception 16 and up), and the PendSV and SYSTICK exceptions to be
|
||||||
* to be interrupts. Taking a fault within an exception is also considered in
|
* interrupts. Taking a fault within an exception is also considered in
|
||||||
* interrupt context.
|
* interrupt context.
|
||||||
*
|
*
|
||||||
* @return 1 if in ISR, 0 if not.
|
* @return 1 if in ISR, 0 if not.
|
||||||
|
@ -44,8 +49,12 @@ static ALWAYS_INLINE int _IsInIsr(void)
|
||||||
{
|
{
|
||||||
u32_t vector = _IpsrGet();
|
u32_t vector = _IpsrGet();
|
||||||
|
|
||||||
/* IRQs + PendSV (14) + SVC (11) + SYSTICK (15) are interrupts. */
|
/* IRQs + PendSV (14) + SYSTICK (15) are interrupts. */
|
||||||
return (vector > 10)
|
return (vector > 13)
|
||||||
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
|
/* Only non-NULL if currently running an offloaded function */
|
||||||
|
|| offload_routine != NULL
|
||||||
|
#endif
|
||||||
#if defined(CONFIG_ARMV6_M)
|
#if defined(CONFIG_ARMV6_M)
|
||||||
/* On ARMv6-M there is no nested execution bit, so we check
|
/* On ARMv6-M there is no nested execution bit, so we check
|
||||||
* exception 3, hard fault, to a detect a nested exception.
|
* exception 3, hard fault, to a detect a nested exception.
|
||||||
|
|
|
@ -32,6 +32,19 @@ extern void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
||||||
#define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */
|
#define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */
|
||||||
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
|
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
|
||||||
|
|
||||||
|
#define _SVC_CALL_IRQ_OFFLOAD 1
|
||||||
|
#define _SVC_CALL_RUNTIME_EXCEPT 2
|
||||||
|
|
||||||
|
#define _ARCH_EXCEPT(reason_p) do { \
|
||||||
|
__asm__ volatile ( \
|
||||||
|
"mov r0, %[reason]\n\t" \
|
||||||
|
"svc %[id]\n\t" \
|
||||||
|
: \
|
||||||
|
: [reason] "i" (reason_p), [id] "i" (_SVC_CALL_RUNTIME_EXCEPT) \
|
||||||
|
: "memory"); \
|
||||||
|
CODE_UNREACHABLE; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -63,8 +63,6 @@ struct __esf {
|
||||||
|
|
||||||
typedef struct __esf NANO_ESF;
|
typedef struct __esf NANO_ESF;
|
||||||
|
|
||||||
extern const NANO_ESF _default_esf;
|
|
||||||
|
|
||||||
extern void _ExcExit(void);
|
extern void _ExcExit(void);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue