x86: exception-assisted panic/oops support

We reserve a specific vector in the IDT to trigger when we want to
enter a fatal exception state from software.

Disabled for drivers/build_all tests as we were up to the ROM limit
on Quark D2000.

Issue: ZEP-843
Change-Id: I4de7f025fba0691d07bcc3b3f0925973834496a0
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-04-18 15:22:54 -07:00 committed by Anas Nashif
commit 7827b7bf4a
7 changed files with 89 additions and 23 deletions

View file

@ -256,6 +256,26 @@ config IRQ_OFFLOAD_VECTOR
where there is a fixed IRQ-to-vector mapping another value may be
needed to avoid collision.
config X86_KERNEL_OOPS
bool "Enable handling of kernel oops as an exception"
default y
help
Enable handling of k_oops() API as a CPU exception, which will provide
extra debugging information such as program counter and register
values when the oops is triggered. Requires an entry in the IDT.
config X86_KERNEL_OOPS_VECTOR
int "IDT vector to use for kernel oops"
default 62 if MVIC
default 33 if !MVIC
range 32 255
depends on X86_KERNEL_OOPS
help
Specify the IDT vector to use for the kernel oops exception handler.
The default should be fine for most arches, but on systems like MVIC
where there is a fixed IRQ-to-vector mapping another value may be
needed to avoid collision.
config XIP
default n

View file

@ -23,8 +23,11 @@
/* exports (internal APIs) */
GTEXT(_exception_enter)
GTEXT(_kernel_oops_handler)
/* externs (internal APIs) */
GTEXT(_do_kernel_oops)
/**
*
* @brief Inform the kernel of an exception
@ -226,3 +229,9 @@ nestedException:
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
iret
#if CONFIG_X86_KERNEL_OOPS
SECTION_FUNC(TEXT, _kernel_oops_handler)
push $0 /* dummy error code */
push $_do_kernel_oops
jmp _exception_enter
#endif

View file

@ -24,24 +24,6 @@
__weak void _debug_fatal_hook(const NANO_ESF *esf) { ARG_UNUSED(esf); }
/*
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
* the caller does not have a NANO_ESF to pass
*/
const NANO_ESF _default_esf = {
0xdeaddead, /* ESP */
0xdeaddead, /* EBP */
0xdeaddead, /* EBX */
0xdeaddead, /* ESI */
0xdeaddead, /* EDI */
0xdeaddead, /* EDX */
0xdeaddead, /* ECX */
0xdeaddead, /* EAX */
0xdeaddead, /* error code */
0xdeaddead, /* EIP */
0xdeaddead, /* CS */
0xdeaddead, /* EFLAGS */
};
/**
*
@ -110,9 +92,9 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
}
printk("Current thread ID = %p\n"
"Faulting segment:address = 0x%x:0x%x\n"
"eax: 0x%x, ebx: 0x%x, ecx: 0x%x, edx: 0x%x\n"
"esi: 0x%x, edi: 0x%x, ebp: 0%x, esp: 0x%x\n"
"Faulting segment:address = 0x%04x:0x%08x\n"
"eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x\n"
"esi: 0x%08x, edi: 0x%08x, ebp: 0x%08x, esp: 0x%08x\n"
"eflags: 0x%x\n",
k_current_get(),
pEsf->cs & 0xFFFF, pEsf->eip,
@ -130,6 +112,45 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
_SysFatalErrorHandler(reason, pEsf);
}
#ifdef CONFIG_X86_KERNEL_OOPS
/* The reason code gets pushed onto the stack right before the exception is
* triggered, so it would be after the nano_esf data
*/
struct oops_esf {
NANO_ESF nano_esf;
unsigned int reason;
};
FUNC_NORETURN void _do_kernel_oops(const struct oops_esf *esf)
{
_NanoFatalErrorHandler(esf->reason, &esf->nano_esf);
}
extern void (*_kernel_oops_handler)(void);
NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
CONFIG_X86_KERNEL_OOPS_VECTOR / 16,
CONFIG_X86_KERNEL_OOPS_VECTOR, 0);
#else
/*
* Define a default ESF for use with _NanoFatalErrorHandler() in the event
* the caller does not have a NANO_ESF to pass
*/
const NANO_ESF _default_esf = {
0xdeaddead, /* ESP */
0xdeaddead, /* EBP */
0xdeaddead, /* EBX */
0xdeaddead, /* ESI */
0xdeaddead, /* EDI */
0xdeaddead, /* EDX */
0xdeaddead, /* ECX */
0xdeaddead, /* EAX */
0xdeaddead, /* error code */
0xdeaddead, /* EIP */
0xdeaddead, /* CS */
0xdeaddead, /* EFLAGS */
};
#endif /* CONFIG_X86_KERNEL_OOPS */
#if CONFIG_EXCEPTION_DEBUG
static FUNC_NORETURN void generic_exc_handle(unsigned int vector,

View file

@ -498,11 +498,25 @@ extern u32_t _timer_cycle_get_32(void);
/** kernel provided routine to report any detected fatal error. */
extern FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,
const NANO_ESF * pEsf);
/** User provided routine to handle any detected fatal error post reporting. */
extern FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
const NANO_ESF * pEsf);
#if CONFIG_X86_KERNEL_OOPS
#define _ARCH_EXCEPT(reason_p) do { \
__asm__ volatile( \
"push %[reason]\n\t" \
"int %[vector]\n\t" \
: \
: [vector] "i" (CONFIG_X86_KERNEL_OOPS_VECTOR), \
[reason] "i" (reason_p)); \
CODE_UNREACHABLE; \
} while (0)
#else
/** Dummy ESF for fatal errors that would otherwise not have an ESF */
extern const NANO_ESF _default_esf;
#endif /* CONFIG_X86_KERNEL_OOPS */
#endif /* !_ASMLANGUAGE */

View file

@ -13,3 +13,4 @@ CONFIG_SERIAL=n
CONFIG_IPM=n
CONFIG_GPIO=n
CONFIG_ERRNO=n
CONFIG_X86_KERNEL_OOPS=n

View file

@ -8,3 +8,4 @@ CONFIG_PWM=y
CONFIG_SERIAL=y
CONFIG_SPI=y
CONFIG_WATCHDOG=y
CONFIG_X86_KERNEL_OOPS=n

View file

@ -23,8 +23,8 @@
#endif
/* These vectors are somewhat arbitrary. We try and use unused vectors */
#define TEST_SOFT_INT 62
#define TEST_SPUR_INT 63
#define TEST_SOFT_INT 60
#define TEST_SPUR_INT 61
#define MY_STACK_SIZE 2048