kernel: consolidate error handling code
* z_NanoFatalErrorHandler() is now moved to common kernel code and renamed z_fatal_error(). Arches dump arch-specific info before calling. * z_SysFatalErrorHandler() is now moved to common kernel code and renamed k_sys_fatal_error_handler(). It is now much simpler; the default policy is simply to lock interrupts and halt the system. If an implementation of this function returns, then the currently running thread is aborted. * New arch-specific APIs introduced: - z_arch_system_halt() simply powers off or halts the system. * We now have a standard set of fatal exception reason codes, namespaced under K_ERR_* * CONFIG_SIMPLE_FATAL_ERROR_HANDLER deleted * LOG_PANIC() calls moved to k_sys_fatal_error_handler() Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
81245a0193
commit
71ce8ceb18
53 changed files with 397 additions and 1183 deletions
|
@ -241,15 +241,6 @@ config DYNAMIC_OBJECTS
|
|||
API call, or when the number of references to that object drops to
|
||||
zero.
|
||||
|
||||
config SIMPLE_FATAL_ERROR_HANDLER
|
||||
bool "Simple system fatal error handler"
|
||||
default y if !MULTITHREADING
|
||||
help
|
||||
Provides an implementation of _SysFatalErrorHandler() that hard hangs
|
||||
instead of aborting the faulting thread, and does not print anything,
|
||||
for footprint-concerned systems. Only enable this option if you do not
|
||||
want debug capabilities in case of system fatal error.
|
||||
|
||||
if ARCH_HAS_NOCACHE_MEMORY_SUPPORT
|
||||
|
||||
config NOCACHE_MEMORY
|
||||
|
|
|
@ -14,7 +14,6 @@ zephyr_library_sources(
|
|||
isr_wrapper.S
|
||||
regular_irq.S
|
||||
switch.S
|
||||
sys_fatal_error_handler.c
|
||||
prep_c.c
|
||||
reset.S
|
||||
vector_table.c
|
||||
|
|
|
@ -19,74 +19,18 @@
|
|||
#include <sys/printk.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
||||
switch (reason) {
|
||||
case _NANO_ERR_HW_EXCEPTION:
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_ARC_STACK_CHECKING) \
|
||||
|| defined(CONFIG_STACK_SENTINEL) || defined(CONFIG_MPU_STACK_GUARD)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
|
||||
printk("Current thread ID = %p\n", k_current_get());
|
||||
|
||||
if (reason == _NANO_ERR_HW_EXCEPTION) {
|
||||
if (reason == K_ERR_CPU_EXCEPTION) {
|
||||
printk("Faulting instruction address = 0x%lx\n",
|
||||
z_arc_v2_aux_reg_read(_ARC_V2_ERET));
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that the error has been reported, call the user implemented
|
||||
* policy
|
||||
* to respond to the error. The decisions as to what responses are
|
||||
* appropriate to the various errors are something the customer must
|
||||
* decide.
|
||||
*/
|
||||
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
z_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
LOG_PANIC();
|
||||
z_SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
z_arc_fatal_error(K_ERR_KERNEL_OOPS, ssf_ptr);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -371,7 +371,7 @@ static void dump_exception_info(u32_t vector, u32_t cause, u32_t parameter)
|
|||
*
|
||||
* This routine is called when fatal error conditions are detected by hardware
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* invokes the user provided routine k_sys_fatal_error_handler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*/
|
||||
void _Fault(NANO_ESF *esf)
|
||||
|
@ -391,7 +391,6 @@ void _Fault(NANO_ESF *esf)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
LOG_PANIC();
|
||||
|
||||
vector = Z_ARC_V2_ECR_VECTOR(ecr);
|
||||
cause = Z_ARC_V2_ECR_CODE(ecr);
|
||||
|
@ -399,7 +398,7 @@ void _Fault(NANO_ESF *esf)
|
|||
|
||||
/* exception raised by kernel */
|
||||
if (vector == ARC_EV_TRAP && parameter == _TRAP_S_CALL_RUNTIME_EXCEPT) {
|
||||
z_NanoFatalErrorHandler(esf->r0, esf);
|
||||
z_arc_fatal_error(esf->r0, esf);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -417,7 +416,7 @@ void _Fault(NANO_ESF *esf)
|
|||
* parameter = 0x2 | [0x4 | 0x8 | 0x1]
|
||||
*/
|
||||
if (vector == ARC_EV_PROT_V && parameter & 0x2) {
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -426,10 +425,10 @@ void _Fault(NANO_ESF *esf)
|
|||
if (vector == ARC_EV_PROT_V && ((parameter == 0x4) ||
|
||||
(parameter == 0x24))) {
|
||||
if (z_check_thread_stack_fail(exc_addr, arc_exc_saved_sp)) {
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
z_arc_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
|
||||
z_arc_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief ARCv2 system fatal error handler
|
||||
*
|
||||
* This module provides the z_SysFatalErrorHandler() routine for ARCv2 BSPs.
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <sys/printk.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason the fatal error reason
|
||||
* @param pEsf pointer to exception stack frame
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
__weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#if defined(CONFIG_STACK_SENTINEL)
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
|
||||
if (z_is_thread_essential()) {
|
||||
printk("Fatal fault in essential thread! Spinning...\n");
|
||||
goto hang_system;
|
||||
}
|
||||
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
|
||||
k_thread_abort(_current);
|
||||
|
||||
return;
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
}
|
|
@ -61,7 +61,7 @@ extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
|
|||
|
||||
|
||||
extern void z_arch_switch(void *switch_to, void **switched_from);
|
||||
|
||||
extern void z_arc_fatal_error(unsigned int reason, const NANO_ESF *esf);
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -19,7 +19,6 @@ zephyr_library_sources(
|
|||
cpu_idle.S
|
||||
fault_s.S
|
||||
fatal.c
|
||||
sys_fatal_error_handler.c
|
||||
thread_abort.c
|
||||
)
|
||||
|
||||
|
|
|
@ -20,93 +20,16 @@
|
|||
#include <sys/printk.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* Unlike other arches, this function may return if z_SysFatalErrorHandler
|
||||
* determines that only the current thread should be aborted and the CPU
|
||||
* was in handler mode. PendSV will be asserted in this case and the current
|
||||
* thread taken off the run queue. Leaving the exception will immediately
|
||||
* trigger a context switch.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
const char *thread_name = k_thread_name_get(k_current_get());
|
||||
#endif
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
switch (reason) {
|
||||
case _NANO_ERR_HW_EXCEPTION:
|
||||
printk("***** Hardware exception *****\n");
|
||||
break;
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL) || \
|
||||
defined(CONFIG_HW_STACK_PROTECTION) || \
|
||||
defined(CONFIG_USERSPACE)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif /* CONFIG_STACK_CANARIES */
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
printk("Current thread ID = %p"
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
" (%s)"
|
||||
#endif
|
||||
"\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
k_current_get(),
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
thread_name ? thread_name : "unknown",
|
||||
#endif
|
||||
pEsf->basic.pc);
|
||||
|
||||
/*
|
||||
* Now that the error has been reported, call the user implemented
|
||||
* policy
|
||||
* to respond to the error. The decisions as to what responses are
|
||||
* appropriate to the various errors are something the customer must
|
||||
* decide.
|
||||
*/
|
||||
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
printk("Faulting instruction address = 0x%x\n",
|
||||
esf->basic.pc);
|
||||
z_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
void z_do_kernel_oops(const NANO_ESF *esf)
|
||||
{
|
||||
z_NanoFatalErrorHandler(esf->basic.r0, esf);
|
||||
z_arm_fatal_error(esf->basic.r0, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
|
@ -114,8 +37,6 @@ FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
|||
u32_t *ssf_contents = ssf_ptr;
|
||||
NANO_ESF oops_esf = { 0 };
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
oops_esf.basic.pc = ssf_contents[3];
|
||||
|
||||
z_do_kernel_oops(&oops_esf);
|
||||
|
|
|
@ -213,7 +213,7 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
|
|||
*/
|
||||
static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
u32_t mmfar = -EINVAL;
|
||||
|
||||
PR_FAULT_INFO("***** MPU FAULT *****\n");
|
||||
|
@ -304,7 +304,7 @@ static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
|||
*/
|
||||
__set_PSP(min_stack_ptr);
|
||||
|
||||
reason = _NANO_ERR_STACK_CHK_FAIL;
|
||||
reason = K_ERR_STACK_CHK_FAIL;
|
||||
} else {
|
||||
__ASSERT(0,
|
||||
"Stacking error not a stack fail\n");
|
||||
|
@ -336,7 +336,7 @@ static u32_t MpuFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
|||
*/
|
||||
static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
|
||||
PR_FAULT_INFO("***** BUS FAULT *****\n");
|
||||
|
||||
|
@ -456,7 +456,7 @@ static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
|||
__set_PSP(min_stack_ptr);
|
||||
|
||||
reason =
|
||||
_NANO_ERR_STACK_CHK_FAIL;
|
||||
K_ERR_STACK_CHK_FAIL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -490,7 +490,7 @@ static int BusFault(NANO_ESF *esf, int fromHardFault, bool *recoverable)
|
|||
*/
|
||||
static u32_t UsageFault(const NANO_ESF *esf)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
|
||||
PR_FAULT_INFO("***** USAGE FAULT *****\n");
|
||||
|
||||
|
@ -512,7 +512,7 @@ static u32_t UsageFault(const NANO_ESF *esf)
|
|||
* on the reported faulty instruction address, to determine
|
||||
* the instruction that triggered the stack overflow.
|
||||
*/
|
||||
reason = _NANO_ERR_STACK_CHK_FAIL;
|
||||
reason = K_ERR_STACK_CHK_FAIL;
|
||||
#endif /* CONFIG_BUILTIN_STACK_GUARD */
|
||||
}
|
||||
#endif /* CONFIG_ARMV8_M_MAINLINE */
|
||||
|
@ -605,7 +605,7 @@ static void DebugMonitor(const NANO_ESF *esf)
|
|||
*/
|
||||
static u32_t HardFault(NANO_ESF *esf, bool *recoverable)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
|
||||
PR_FAULT_INFO("***** HARD FAULT *****\n");
|
||||
|
||||
|
@ -657,7 +657,8 @@ static void ReservedException(const NANO_ESF *esf, int fault)
|
|||
/* Handler function for ARM fault conditions. */
|
||||
static u32_t FaultHandle(NANO_ESF *esf, int fault, bool *recoverable)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
|
||||
*recoverable = false;
|
||||
|
||||
switch (fault) {
|
||||
|
@ -761,8 +762,8 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
|
|||
* error handling policy allows the system to recover from the error),
|
||||
* - reporting the error information,
|
||||
* - determining the error reason to be provided as input to the user-
|
||||
* provided routine, z_NanoFatalErrorHandler().
|
||||
* The z_NanoFatalErrorHandler() is invoked once the above operations are
|
||||
* provided routine, k_sys_fatal_error_handler().
|
||||
* The k_sys_fatal_error_handler() is invoked once the above operations are
|
||||
* completed, and is responsible for implementing the error handling policy.
|
||||
*
|
||||
* The provided ESF pointer points to the exception stack frame of the current
|
||||
|
@ -782,12 +783,10 @@ static void SecureStackDump(const NANO_ESF *secure_esf)
|
|||
*/
|
||||
void _Fault(NANO_ESF *esf, u32_t exc_return)
|
||||
{
|
||||
u32_t reason = _NANO_ERR_HW_EXCEPTION;
|
||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
|
||||
bool recoverable;
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
||||
if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
|
||||
EXC_RETURN_INDICATOR_PREFIX) {
|
||||
|
@ -866,7 +865,7 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
|
|||
defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
||||
_exit_fatal:
|
||||
#endif
|
||||
z_NanoFatalErrorHandler(reason, esf);
|
||||
z_arm_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Wind River Systems, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief ARM Cortex-M system fatal error handler
|
||||
*
|
||||
* This module provides the z_SysFatalErrorHandler() routine for Cortex-M
|
||||
* platforms.
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <sys/printk.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason fatal error reason
|
||||
* @param pEsf pointer to exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
void __weak z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
k_thread_abort(_current);
|
||||
return;
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
CODE_UNREACHABLE;
|
||||
}
|
|
@ -146,6 +146,8 @@ extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
|
|||
u32_t stack_end,
|
||||
u32_t stack_start);
|
||||
|
||||
extern void z_arm_fatal_error(unsigned int reason, const NANO_ESF *esf);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,55 +32,9 @@ const NANO_ESF _default_esf = {
|
|||
0xdeadbaad
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This routine is called when a fatal error condition is detected by either
|
||||
* hardware or software.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or call _Fault instead.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
switch (reason) {
|
||||
case _NANO_ERR_CPU_EXCEPTION:
|
||||
case _NANO_ERR_SPURIOUS_INT:
|
||||
break;
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack overflow *****\n");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %u! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Subtract 4 from EA since we added 4 earlier so that the faulting
|
||||
* instruction isn't retried.
|
||||
*
|
||||
|
@ -88,21 +42,20 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
|||
* We may want to introduce a config option to save and dump all
|
||||
* registers, at the expense of some stack space.
|
||||
*/
|
||||
printk("Current thread ID: %p\n"
|
||||
"Faulting instruction: 0x%x\n"
|
||||
printk("Faulting instruction: 0x%x\n"
|
||||
" r1: 0x%x r2: 0x%x r3: 0x%x r4: 0x%x\n"
|
||||
" r5: 0x%x r6: 0x%x r7: 0x%x r8: 0x%x\n"
|
||||
" r9: 0x%x r10: 0x%x r11: 0x%x r12: 0x%x\n"
|
||||
" r13: 0x%x r14: 0x%x r15: 0x%x ra: 0x%x\n"
|
||||
"estatus: %x\n", k_current_get(), esf->instr - 4,
|
||||
"estatus: %x\n", esf->instr - 4,
|
||||
esf->r1, esf->r2, esf->r3, esf->r4,
|
||||
esf->r5, esf->r6, esf->r7, esf->r8,
|
||||
esf->r9, esf->r10, esf->r11, esf->r12,
|
||||
esf->r13, esf->r14, esf->r15, esf->ra,
|
||||
esf->estatus);
|
||||
#endif
|
||||
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
z_fatal_error(reason, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_EXTRA_EXCEPTION_INFO) && defined(CONFIG_PRINTK) \
|
||||
|
@ -194,62 +147,15 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
#endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_nios2_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason the fatal error reason
|
||||
* @param pEsf pointer to exception stack frame
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
k_thread_abort(_current);
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
#ifdef ALT_CPU_HAS_DEBUG_STUB
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
z_nios2_break();
|
||||
#endif
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
#include <kswap.h>
|
||||
#include <debug/tracing.h>
|
||||
|
||||
void z_irq_spurious(void *unused)
|
||||
FUNC_NORETURN void z_irq_spurious(void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
printk("Spurious interrupt detected! ipending: %x\n",
|
||||
z_nios2_creg_read(NIOS2_CR_IPENDING));
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
z_nios2_fatal_error(K_ERR_SPURIOUS_IRQ, &_default_esf);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -41,6 +41,9 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
|||
thread->callee_saved.retval = value;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_nios2_fatal_error(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
|
|
|
@ -17,115 +17,11 @@ const NANO_ESF _default_esf = {
|
|||
0xdeadbaad
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This routine is called when a fatal error condition is detected
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or call _Fault instead.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
{
|
||||
LOG_PANIC();
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
switch (reason) {
|
||||
case _NANO_ERR_CPU_EXCEPTION:
|
||||
case _NANO_ERR_SPURIOUS_INT:
|
||||
break;
|
||||
|
||||
case _NANO_ERR_INVALID_TASK_EXIT:
|
||||
printk("***** Invalid Exit Software Error! *****\n");
|
||||
break;
|
||||
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack overflow *****\n");
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %u! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf);
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
posix_print_error_and_exit("Exiting due to fatal error\n");
|
||||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* If CONFIG_ARCH_POSIX_STOP_ON_FATAL_ERROR is not set,
|
||||
* it will attempt to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* If CONFIG_ARCH_POSIX_STOP_ON_FATAL_ERROR is set, or the thread is an
|
||||
* essential thread or interrupt, the execution will be terminated, and an error
|
||||
* code will be returned to the invoking shell
|
||||
*
|
||||
* @param reason the fatal error reason
|
||||
* @param pEsf pointer to exception stack frame
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
posix_print_error_and_exit(
|
||||
"Fatal fault in %s! Stopping...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARCH_POSIX_STOP_ON_FATAL_ERROR)) {
|
||||
k_thread_abort(_current);
|
||||
}
|
||||
|
||||
hang_system:
|
||||
|
||||
posix_print_error_and_exit(
|
||||
"Stopped in z_SysFatalErrorHandler()\n");
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -38,64 +38,15 @@ const NANO_ESF _default_esf = {
|
|||
#endif
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine is called when a fatal error condition is detected by either
|
||||
* hardware or software.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or call _Fault instead.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param esf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
FUNC_NORETURN void z_riscv32_fatal_error(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
||||
switch (reason) {
|
||||
case _NANO_ERR_CPU_EXCEPTION:
|
||||
case _NANO_ERR_SPURIOUS_INT:
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif /* CONFIG_STACK_CANARIES */
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
|
||||
printk("Current thread ID = %p\n"
|
||||
"Faulting instruction address = 0x%x\n"
|
||||
printk("Faulting instruction address = 0x%x\n"
|
||||
" ra: 0x%x gp: 0x%x tp: 0x%x t0: 0x%x\n"
|
||||
" t1: 0x%x t2: 0x%x t3: 0x%x t4: 0x%x\n"
|
||||
" t5: 0x%x t6: 0x%x a0: 0x%x a1: 0x%x\n"
|
||||
" a2: 0x%x a3: 0x%x a4: 0x%x a5: 0x%x\n"
|
||||
" a6: 0x%x a7: 0x%x\n",
|
||||
k_current_get(),
|
||||
(esf->mepc == 0xdeadbaad) ? 0xdeadbaad : esf->mepc,
|
||||
esf->ra, esf->gp, esf->tp, esf->t0,
|
||||
esf->t1, esf->t2, esf->t3, esf->t4,
|
||||
|
@ -103,70 +54,10 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
|||
esf->a2, esf->a3, esf->a4, esf->a5,
|
||||
esf->a6, esf->a7);
|
||||
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
/* spin forever */
|
||||
for (;;) {
|
||||
__asm__ volatile("nop");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason fatal error reason
|
||||
* @param esf pointer to exception stack frame
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
ARG_UNUSED(esf);
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
k_thread_abort(_current);
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
z_fatal_error(reason, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
|
||||
static char *cause_str(u32_t cause)
|
||||
{
|
||||
switch (cause) {
|
||||
|
@ -187,7 +78,6 @@ static char *cause_str(u32_t cause)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
||||
{
|
||||
u32_t mcause;
|
||||
|
@ -197,5 +87,5 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
|
|||
mcause &= SOC_MCAUSE_EXP_MASK;
|
||||
printk("Exception cause %s (%d)\n", cause_str(mcause), (int)mcause);
|
||||
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_riscv32_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <kernel_structs.h>
|
||||
#include <sys/printk.h>
|
||||
|
||||
void z_irq_spurious(void *unused)
|
||||
FUNC_NORETURN void z_irq_spurious(void *unused)
|
||||
{
|
||||
u32_t mcause;
|
||||
|
||||
|
@ -25,8 +25,7 @@ void z_irq_spurious(void *unused)
|
|||
riscv_plic_get_irq());
|
||||
}
|
||||
#endif
|
||||
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
|
||||
z_riscv32_fatal_error(K_ERR_SPURIOUS_IRQ, &_default_esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_INTERRUPTS
|
||||
|
|
|
@ -37,9 +37,8 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
|||
thread->arch.swap_return_value = value;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
FUNC_NORETURN void z_riscv32_fatal_error(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
#define z_is_in_isr() (_kernel.nested != 0U)
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ zephyr_library_sources(
|
|||
ia32/intstub.S
|
||||
ia32/irq_manage.c
|
||||
ia32/swap.S
|
||||
ia32/sys_fatal_error_handler.c
|
||||
ia32/thread.c
|
||||
ia32/spec_ctrl.c
|
||||
)
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
/**
|
||||
* @file
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This module provides the z_NanoFatalErrorHandler() routine.
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
|
@ -118,111 +116,51 @@ static void unwind_stack(u32_t base_ptr, u16_t cs)
|
|||
}
|
||||
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Kernel fatal error handler
|
||||
*
|
||||
* This routine is called when a fatal error condition is detected by either
|
||||
* hardware or software.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
#ifdef CONFIG_BOARD_QEMU_X86
|
||||
FUNC_NORETURN void z_arch_system_halt(unsigned int reason)
|
||||
{
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
const char *thread_name = k_thread_name_get(k_current_get());
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
/* Causes QEMU to exit. We passed the following on the command line:
|
||||
* -device isa-debug-exit,iobase=0xf4,iosize=0x04
|
||||
*/
|
||||
sys_out32(0, 0xf4);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
#endif
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
z_debug_fatal_hook(pEsf);
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
|
||||
/* Display diagnostic information about the error */
|
||||
|
||||
switch (reason) {
|
||||
case _NANO_ERR_CPU_EXCEPTION:
|
||||
break;
|
||||
|
||||
case _NANO_ERR_SPURIOUS_INT: {
|
||||
int vector = z_irq_controller_isr_vector_get();
|
||||
|
||||
printk("***** Unhandled interrupt vector ");
|
||||
if (vector >= 0) {
|
||||
printk("%d ", vector);
|
||||
}
|
||||
printk("*****\n");
|
||||
break;
|
||||
}
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL) || \
|
||||
defined(CONFIG_HW_STACK_PROTECTION) || \
|
||||
defined(CONFIG_USERSPACE)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif /* CONFIG_STACK_CANARIES */
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
|
||||
printk("Current thread ID = %p"
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
" (%s)"
|
||||
#endif
|
||||
"\n"
|
||||
"eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x\n"
|
||||
FUNC_NORETURN void z_x86_fatal_error(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
printk("eax: 0x%08x, ebx: 0x%08x, ecx: 0x%08x, edx: 0x%08x\n"
|
||||
"esi: 0x%08x, edi: 0x%08x, ebp: 0x%08x, esp: 0x%08x\n"
|
||||
"eflags: 0x%08x cs: 0x%04x\n"
|
||||
#ifdef CONFIG_EXCEPTION_STACK_TRACE
|
||||
"call trace:\n"
|
||||
#endif
|
||||
"eip: 0x%08x\n",
|
||||
k_current_get(),
|
||||
#ifdef CONFIG_THREAD_NAME
|
||||
thread_name ? thread_name : "unknown",
|
||||
#endif
|
||||
pEsf->eax, pEsf->ebx, pEsf->ecx, pEsf->edx,
|
||||
pEsf->esi, pEsf->edi, pEsf->ebp, pEsf->esp,
|
||||
pEsf->eflags, pEsf->cs & 0xFFFFU, pEsf->eip);
|
||||
esf->eax, esf->ebx, esf->ecx, esf->edx,
|
||||
esf->esi, esf->edi, esf->ebp, esf->esp,
|
||||
esf->eflags, esf->cs & 0xFFFFU, esf->eip);
|
||||
#ifdef CONFIG_EXCEPTION_STACK_TRACE
|
||||
unwind_stack(pEsf->ebp, pEsf->cs);
|
||||
unwind_stack(esf->ebp, esf->cs);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
|
||||
/*
|
||||
* Error was fatal to a kernel task or a thread; invoke the system
|
||||
* fatal error handling policy defined for the platform.
|
||||
*/
|
||||
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
z_fatal_error(reason, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
||||
void z_x86_spurious_irq(const NANO_ESF *esf)
|
||||
{
|
||||
int vector = z_irq_controller_isr_vector_get();
|
||||
|
||||
if (vector >= 0) {
|
||||
printk("IRQ vector: %d\n", vector);
|
||||
}
|
||||
|
||||
z_x86_fatal_error(K_ERR_SPURIOUS_IRQ, esf);
|
||||
}
|
||||
|
||||
void z_arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
struct _x86_syscall_stack_frame *ssf =
|
||||
(struct _x86_syscall_stack_frame *)ssf_ptr;
|
||||
|
@ -236,11 +174,11 @@ FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
|
|||
oops.esp = ssf->esp;
|
||||
}
|
||||
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, &oops);
|
||||
z_x86_fatal_error(K_ERR_KERNEL_OOPS, &oops);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_KERNEL_OOPS
|
||||
FUNC_NORETURN void z_do_kernel_oops(const NANO_ESF *esf)
|
||||
void z_do_kernel_oops(const NANO_ESF *esf)
|
||||
{
|
||||
u32_t *stack_ptr = (u32_t *)esf->esp;
|
||||
u32_t reason = *stack_ptr;
|
||||
|
@ -249,13 +187,13 @@ FUNC_NORETURN void z_do_kernel_oops(const NANO_ESF *esf)
|
|||
/* User mode is only allowed to induce oopses and stack check
|
||||
* failures via this software interrupt
|
||||
*/
|
||||
if (esf->cs == USER_CODE_SEG && !(reason == _NANO_ERR_KERNEL_OOPS ||
|
||||
reason == _NANO_ERR_STACK_CHK_FAIL)) {
|
||||
reason = _NANO_ERR_KERNEL_OOPS;
|
||||
if (esf->cs == USER_CODE_SEG && !(reason == K_ERR_KERNEL_OOPS ||
|
||||
reason == K_ERR_STACK_CHK_FAIL)) {
|
||||
reason = K_ERR_KERNEL_OOPS;
|
||||
}
|
||||
#endif
|
||||
|
||||
z_NanoFatalErrorHandler(reason, esf);
|
||||
z_x86_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
extern void (*_kernel_oops_handler)(void);
|
||||
|
@ -265,7 +203,7 @@ NANO_CPU_INT_REGISTER(_kernel_oops_handler, NANO_SOFT_IRQ,
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Define a default ESF for use with z_NanoFatalErrorHandler() in the event
|
||||
* Define a default ESF for use with z_fatal_error() in the event
|
||||
* the caller does not have a NANO_ESF to pass
|
||||
*/
|
||||
const NANO_ESF _default_esf = {
|
||||
|
@ -285,7 +223,7 @@ const NANO_ESF _default_esf = {
|
|||
|
||||
#if CONFIG_EXCEPTION_DEBUG
|
||||
|
||||
static FUNC_NORETURN void generic_exc_handle(unsigned int vector,
|
||||
FUNC_NORETURN static void generic_exc_handle(unsigned int vector,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
printk("***** ");
|
||||
|
@ -303,7 +241,7 @@ static FUNC_NORETURN void generic_exc_handle(unsigned int vector,
|
|||
if ((BIT(vector) & _EXC_ERROR_CODE_FAULTS) != 0) {
|
||||
printk("***** Exception code: 0x%x\n", pEsf->errorCode);
|
||||
}
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, pEsf);
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, pEsf);
|
||||
}
|
||||
|
||||
#define _EXC_FUNC(vector) \
|
||||
|
@ -437,10 +375,10 @@ void page_fault_handler(NANO_ESF *esf)
|
|||
#endif
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
if (check_stack_bounds(esf->esp, 0, esf->cs)) {
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_STACK_CHK_FAIL, esf);
|
||||
z_x86_fatal_error(K_ERR_STACK_CHK_FAIL, esf);
|
||||
}
|
||||
#endif
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_CPU_EXCEPTION, esf);
|
||||
z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
_EXCEPTION_CONNECT_CODE(page_fault_handler, IV_PAGE_FAULT);
|
||||
|
@ -484,10 +422,10 @@ struct task_state_segment _df_tss = {
|
|||
.cr3 = (u32_t)&z_x86_kernel_pdpt
|
||||
};
|
||||
|
||||
static FUNC_NORETURN __used void df_handler_bottom(void)
|
||||
static __used void df_handler_bottom(void)
|
||||
{
|
||||
/* We're back in the main hardware task on the interrupt stack */
|
||||
int reason = _NANO_ERR_CPU_EXCEPTION;
|
||||
int reason = K_ERR_CPU_EXCEPTION;
|
||||
|
||||
/* Restore the top half so it is runnable again */
|
||||
_df_tss.esp = (u32_t)(_df_stack + sizeof(_df_stack));
|
||||
|
@ -496,10 +434,10 @@ static FUNC_NORETURN __used void df_handler_bottom(void)
|
|||
printk("***** Double Fault *****\n");
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
if (check_stack_bounds(_df_esf.esp, 0, _df_esf.cs)) {
|
||||
reason = _NANO_ERR_STACK_CHK_FAIL;
|
||||
reason = K_ERR_STACK_CHK_FAIL;
|
||||
}
|
||||
#endif
|
||||
z_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
|
||||
z_x86_fatal_error(reason, (NANO_ESF *)&_df_esf);
|
||||
}
|
||||
|
||||
static FUNC_NORETURN __used void df_handler_top(void)
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <kernel_structs.h>
|
||||
#include <arch/x86/ia32/asm.h>
|
||||
#include <offsets_short.h>
|
||||
#include <arch/cpu.h> /* _NANO_ERR_SPURIOUS_INT */
|
||||
#include <arch/cpu.h>
|
||||
#include <drivers/interrupt_controller/sysapic.h>
|
||||
|
||||
/* exports (internal APIs) */
|
||||
|
@ -370,11 +370,9 @@ handle_idle:
|
|||
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
|
||||
* records.
|
||||
*
|
||||
* A spurious interrupt is considered a fatal condition, thus this routine
|
||||
* merely sets up the 'reason' and 'pEsf' parameters to the routine
|
||||
* _SysFatalHwErrorHandler(). In other words, there is no provision to return
|
||||
* to the interrupted execution context and thus the volatile registers are not
|
||||
* saved.
|
||||
* A spurious interrupt is considered a fatal condition; there is no provision
|
||||
* to return to the interrupted execution context and thus the volatile
|
||||
* registers are not saved.
|
||||
*
|
||||
* @return Never returns
|
||||
*
|
||||
|
@ -416,20 +414,14 @@ SECTION_FUNC(TEXT, z_SpuriousIntHandler)
|
|||
#ifndef CONFIG_X86_IAMCU
|
||||
pushl %esp /* push cur stack pointer: pEsf arg */
|
||||
#else
|
||||
mov %esp, %edx
|
||||
mov %esp, %eax
|
||||
#endif
|
||||
|
||||
/* re-enable interrupts */
|
||||
sti
|
||||
|
||||
/* push the 'unsigned int reason' parameter */
|
||||
#ifndef CONFIG_X86_IAMCU
|
||||
pushl $_NANO_ERR_SPURIOUS_INT
|
||||
#else
|
||||
movl $_NANO_ERR_SPURIOUS_INT, %eax
|
||||
#endif
|
||||
/* call the fatal error handler */
|
||||
call z_NanoFatalErrorHandler
|
||||
call z_x86_spurious_irq
|
||||
|
||||
/* handler doesn't return */
|
||||
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief Common system fatal error handler
|
||||
*
|
||||
* This module provides the z_SysFatalErrorHandler() routine which is common to
|
||||
* supported platforms.
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <sys/printk.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason the fatal error reason
|
||||
* @param pEsf the pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
FUNC_NORETURN __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
k_thread_abort(_current);
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BOARD_QEMU_X86
|
||||
printk("Terminate emulator due to fatal kernel error\n");
|
||||
/* Causes QEMU to exit. We passed the following on the command line:
|
||||
* -device isa-debug-exit,iobase=0xf4,iosize=0x04
|
||||
*/
|
||||
sys_out32(0, 0xf4);
|
||||
#else
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
#endif
|
||||
CODE_UNREACHABLE;
|
||||
}
|
|
@ -59,7 +59,7 @@ void z_unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
|
|||
printk("*** R8 0x%llx R9 0x%llx R10 0x%llx R11 0x%llx\n",
|
||||
f->r8, f->r9, f->r10, f->r11);
|
||||
|
||||
z_NanoFatalErrorHandler(x86_64_except_reason, NULL);
|
||||
z_fatal_error(x86_64_except_reason, NULL);
|
||||
}
|
||||
|
||||
void z_isr_entry(void)
|
||||
|
@ -214,16 +214,3 @@ void x86_apic_set_timeout(u32_t cyc_from_now)
|
|||
const NANO_ESF _default_esf;
|
||||
|
||||
int x86_64_except_reason;
|
||||
|
||||
void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
z_SysFatalErrorHandler(reason, esf);
|
||||
}
|
||||
|
||||
/* App-overridable handler. Does nothing here */
|
||||
void __weak z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
ARG_UNUSED(esf);
|
||||
k_thread_abort(_current);
|
||||
}
|
||||
|
|
|
@ -31,70 +31,6 @@ const NANO_ESF _default_esf = {
|
|||
retval; \
|
||||
})
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine is called when fatal error conditions are detected by software
|
||||
* and is responsible only for reporting the error. Once reported, it then
|
||||
* invokes the user provided routine z_SysFatalErrorHandler() which is
|
||||
* responsible for implementing the error handling policy.
|
||||
*
|
||||
* The caller is expected to always provide a usable ESF. In the event that the
|
||||
* fatal error does not have a hardware generated ESF, the caller should either
|
||||
* create its own or use a pointer to the global default ESF <_default_esf>.
|
||||
*
|
||||
* @param reason the reason that the handler was called
|
||||
* @param pEsf pointer to the exception stack frame
|
||||
*
|
||||
* @return This function does not return.
|
||||
*/
|
||||
XTENSA_ERR_NORET void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
LOG_PANIC();
|
||||
|
||||
switch (reason) {
|
||||
case _NANO_ERR_HW_EXCEPTION:
|
||||
case _NANO_ERR_RESERVED_IRQ:
|
||||
break;
|
||||
|
||||
#if defined(CONFIG_STACK_CANARIES) || defined(CONFIG_STACK_SENTINEL)
|
||||
case _NANO_ERR_STACK_CHK_FAIL:
|
||||
printk("***** Stack Check Fail! *****\n");
|
||||
break;
|
||||
#endif /* CONFIG_STACK_CANARIES */
|
||||
case _NANO_ERR_ALLOCATION_FAIL:
|
||||
printk("**** Kernel Allocation Failure! ****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_OOPS:
|
||||
printk("***** Kernel OOPS! *****\n");
|
||||
break;
|
||||
|
||||
case _NANO_ERR_KERNEL_PANIC:
|
||||
printk("***** Kernel Panic! *****\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
printk("**** Unknown Fatal Error %d! ****\n", reason);
|
||||
break;
|
||||
}
|
||||
printk("Current thread ID = %p\n"
|
||||
"Faulting instruction address = 0x%x\n",
|
||||
k_current_get(),
|
||||
pEsf->pc);
|
||||
|
||||
/*
|
||||
* Now that the error has been reported, call the user implemented
|
||||
* policy
|
||||
* to respond to the error. The decisions as to what responses are
|
||||
* appropriate to the various errors are something the customer must
|
||||
* decide.
|
||||
*/
|
||||
z_SysFatalErrorHandler(reason, pEsf);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
static char *cause_str(unsigned int cause_code)
|
||||
|
@ -185,22 +121,27 @@ static void dump_exc_state(void)
|
|||
#endif /* CONFIG_PRINTK */
|
||||
}
|
||||
|
||||
XTENSA_ERR_NORET void z_xtensa_fatal_error(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
dump_exc_state();
|
||||
|
||||
printk("Faulting instruction address = 0x%x\n", esf->pc);
|
||||
|
||||
z_fatal_error(reason, esf);
|
||||
}
|
||||
|
||||
XTENSA_ERR_NORET void FatalErrorHandler(void)
|
||||
{
|
||||
printk("*** Unhandled exception ****\n");
|
||||
dump_exc_state();
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
z_xtensa_fatal_error(K_ERR_CPU_EXCEPTION, &_default_esf);
|
||||
}
|
||||
|
||||
XTENSA_ERR_NORET void ReservedInterruptHandler(unsigned int intNo)
|
||||
{
|
||||
printk("*** Reserved Interrupt ***\n");
|
||||
dump_exc_state();
|
||||
printk("INTENABLE = 0x%x\n"
|
||||
"INTERRUPT = 0x%x (%x)\n",
|
||||
get_sreg(INTENABLE), (1 << intNo), intNo);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
z_xtensa_fatal_error(K_ERR_SPURIOUS_IRQ, &_default_esf);
|
||||
}
|
||||
|
||||
void exit(int return_code)
|
||||
|
@ -219,60 +160,10 @@ void exit(int return_code)
|
|||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Fatal error handler
|
||||
*
|
||||
* This routine implements the corrective action to be taken when the system
|
||||
* detects a fatal error.
|
||||
*
|
||||
* This sample implementation attempts to abort the current thread and allow
|
||||
* the system to continue executing, which may permit the system to continue
|
||||
* functioning with degraded capabilities.
|
||||
*
|
||||
* System designers may wish to enhance or substitute this sample
|
||||
* implementation to take other actions, such as logging error (or debug)
|
||||
* information to a persistent repository and/or rebooting the system.
|
||||
*
|
||||
* @param reason the fatal error reason
|
||||
* @param pEsf pointer to exception stack frame
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
XTENSA_ERR_NORET __weak void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf)
|
||||
{
|
||||
ARG_UNUSED(pEsf);
|
||||
|
||||
#if !defined(CONFIG_SIMPLE_FATAL_ERROR_HANDLER)
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
if (reason == _NANO_ERR_STACK_CHK_FAIL) {
|
||||
goto hang_system;
|
||||
}
|
||||
#endif
|
||||
if (reason == _NANO_ERR_KERNEL_PANIC) {
|
||||
goto hang_system;
|
||||
}
|
||||
if (k_is_in_isr() || z_is_thread_essential()) {
|
||||
printk("Fatal fault in %s! Spinning...\n",
|
||||
k_is_in_isr() ? "ISR" : "essential thread");
|
||||
goto hang_system;
|
||||
}
|
||||
printk("Fatal fault in thread %p! Aborting.\n", _current);
|
||||
k_thread_abort(_current);
|
||||
|
||||
hang_system:
|
||||
#else
|
||||
ARG_UNUSED(reason);
|
||||
#endif
|
||||
|
||||
#ifdef XT_SIMULATOR
|
||||
FUNC_NORETURN void z_system_halt(unsigned int reason)
|
||||
{
|
||||
exit(255 - reason);
|
||||
#else
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
#endif
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -87,7 +87,7 @@ void z_irq_spurious(void *arg)
|
|||
__asm__ volatile("rsr.intenable %0" : "=r"(ie));
|
||||
printk(" ** Spurious INTERRUPT(s) %p, INTENABLE = %p\n",
|
||||
(void *)irqs, (void *)ie);
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_RESERVED_IRQ, &_default_esf);
|
||||
z_xtensa_fatal_error(K_ERR_SPURIOUS_IRQ, &_default_esf);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -201,7 +201,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
|
|||
* as these are software errors. Should clean this
|
||||
* up.
|
||||
*/
|
||||
z_NanoFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, &_default_esf);
|
||||
z_xtensa_fatal_error(K_ERR_CPU_EXCEPTION, &_default_esf);
|
||||
}
|
||||
|
||||
return z_get_next_switch_handle(interrupted_stack);
|
||||
|
|
|
@ -34,6 +34,7 @@ extern "C" {
|
|||
|
||||
extern void FatalErrorHandler(void);
|
||||
extern void ReservedInterruptHandler(unsigned int intNo);
|
||||
extern void z_xtensa_fatal_error(unsigned int reason, const NANO_ESF *esf);
|
||||
|
||||
/* Defined in xtensa_context.S */
|
||||
extern void z_xt_coproc_init(void);
|
||||
|
|
|
@ -441,13 +441,14 @@ However, a real implementation is strongly recommended.
|
|||
Fault Management
|
||||
****************
|
||||
|
||||
Each architecture provides two fatal error handlers:
|
||||
|
||||
* :code:`_NanoFatalErrorHandler`, called by software for unrecoverable errors.
|
||||
* :code:`_SysFatalErrorHandler`, which makes the decision on how to handle
|
||||
the thread where the error is generated, most likely by terminating it.
|
||||
|
||||
See the current architecture implementations for examples.
|
||||
In the event of an unhandled CPU exception, the architecture
|
||||
code must call into :c:func:`z_fatal_error`. This function dumps
|
||||
out architecture-agnostic information and makes a policy
|
||||
decision on what to do next by invoking :c:func:`k_sys_fatal_error`.
|
||||
This function can be overridden to implement application-specific
|
||||
policies that could include locking interrupts and spinning forever
|
||||
(the default implementation) or even powering off the
|
||||
system (if supported).
|
||||
|
||||
Toolchain and Linking
|
||||
*********************
|
||||
|
|
|
@ -22,19 +22,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#include <toolchain/gcc.h>
|
||||
extern void z_NanoFatalErrorHandler(unsigned int, const NANO_ESF*);
|
||||
extern void z_SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
|
||||
#endif
|
||||
|
||||
#define _NANO_ERR_HW_EXCEPTION (0) /* MPU/Bus/Usage fault */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
|
||||
|
||||
|
||||
/*
|
||||
* the exception caused by kernel will be handled in interrupt context
|
||||
* when the processor is already in interrupt context, no need to raise
|
||||
|
@ -44,7 +31,7 @@ extern void z_SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf);
|
|||
#define Z_ARCH_EXCEPT(reason_p) do { \
|
||||
if (z_arc_v2_irq_unit_is_in_isr()) { \
|
||||
printk("@ %s:%d:\n", __FILE__, __LINE__); \
|
||||
z_NanoFatalErrorHandler(reason_p, 0); \
|
||||
z_fatal_error(reason_p, 0); \
|
||||
} else {\
|
||||
__asm__ volatile ( \
|
||||
"mov r0, %[reason]\n\t" \
|
||||
|
|
|
@ -22,17 +22,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
extern void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
||||
extern void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
||||
#endif
|
||||
|
||||
#define _NANO_ERR_HW_EXCEPTION (0) /* MPU/Bus/Usage fault */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* ARMv6 will hard-fault if SVC is called with interrupts locked. Just
|
||||
* force them unlocked, the thread is in an undefined state anyway
|
||||
|
|
|
@ -28,13 +28,6 @@ extern "C" {
|
|||
|
||||
#define STACK_ALIGN 4
|
||||
|
||||
#define _NANO_ERR_CPU_EXCEPTION (0) /* Any unhandled exception */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_SPURIOUS_INT (4) /* Spurious interrupt */
|
||||
#define _NANO_ERR_KERNEL_OOPS (5) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (6) /* Kernel panic (fatal to system) */
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#include <zephyr/types.h>
|
||||
#include <irq.h>
|
||||
|
|
|
@ -33,14 +33,6 @@ extern "C" {
|
|||
#define STACK_ALIGN 4
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
#define _NANO_ERR_CPU_EXCEPTION (0) /* Any unhandled exception */
|
||||
#define _NANO_ERR_INVALID_TASK_EXIT (1) /* Invalid task exit */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_SPURIOUS_INT (4) /* Spurious interrupt */
|
||||
#define _NANO_ERR_KERNEL_OOPS (5) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (6) /* Kernel panic (fatal to system) */
|
||||
|
||||
struct __esf {
|
||||
u32_t dummy; /*maybe we will want to add something someday*/
|
||||
};
|
||||
|
@ -51,12 +43,6 @@ extern const NANO_ESF _default_esf;
|
|||
extern u32_t z_timer_cycle_get_32(void);
|
||||
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32()
|
||||
|
||||
FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
/**
|
||||
* @brief Explicitly nop operation.
|
||||
*/
|
||||
|
|
|
@ -76,21 +76,8 @@ typedef struct __esf NANO_ESF;
|
|||
typedef struct soc_esf soc_esf_t;
|
||||
#endif
|
||||
extern const NANO_ESF _default_esf;
|
||||
|
||||
extern FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
extern void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#define _NANO_ERR_CPU_EXCEPTION (0) /* Any unhandled exception */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_SPURIOUS_INT (4) /* Spurious interrupt */
|
||||
#define _NANO_ERR_KERNEL_OOPS (5) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (6) /* Kernel panic (fatal to system) */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -316,28 +316,6 @@ struct _x86_syscall_stack_frame {
|
|||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
|
||||
/*
|
||||
* Reason codes passed to both z_NanoFatalErrorHandler()
|
||||
* and z_SysFatalErrorHandler().
|
||||
*/
|
||||
|
||||
/** Unhandled exception/interrupt */
|
||||
#define _NANO_ERR_SPURIOUS_INT (0)
|
||||
/** Page fault */
|
||||
#define _NANO_ERR_PAGE_FAULT (1)
|
||||
/** General protection fault */
|
||||
#define _NANO_ERR_GEN_PROT_FAULT (2)
|
||||
/** Stack corruption detected */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (4)
|
||||
/** Kernel Allocation Failure */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (5)
|
||||
/** Unhandled exception */
|
||||
#define _NANO_ERR_CPU_EXCEPTION (6)
|
||||
/** Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_OOPS (7)
|
||||
/** Kernel panic (fatal to system) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (8)
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
/**
|
||||
|
@ -589,15 +567,6 @@ extern void k_cpu_idle(void);
|
|||
extern u32_t z_timer_cycle_get_32(void);
|
||||
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32()
|
||||
|
||||
/** kernel provided routine to report any detected fatal error. */
|
||||
extern FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF * pEsf);
|
||||
|
||||
/** User provided routine to handle any detected fatal error post reporting. */
|
||||
extern FUNC_NORETURN void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF * pEsf);
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_ENABLE_TSS
|
||||
extern struct task_state_segment _main_tss;
|
||||
#endif
|
||||
|
|
|
@ -14,16 +14,4 @@
|
|||
|
||||
typedef struct NANO_ESF NANO_ESF;
|
||||
extern const NANO_ESF _default_esf;
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
||||
void z_NanoFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
||||
|
||||
/* Existing code requires only these particular symbols be defined,
|
||||
* but doesn't put them in a global header. Needs cleaner
|
||||
* cross-architecture standardization. Implement only the minimal set
|
||||
* here.
|
||||
*/
|
||||
#define _NANO_ERR_STACK_CHK_FAIL 1
|
||||
#define _NANO_ERR_KERNEL_OOPS 2
|
||||
#define _NANO_ERR_KERNEL_PANIC 3
|
||||
|
||||
#endif /* _X86_64_ARCH_H */
|
||||
|
|
|
@ -31,13 +31,6 @@ extern "C" {
|
|||
|
||||
#define STACK_ALIGN 16
|
||||
|
||||
#define _NANO_ERR_HW_EXCEPTION (0) /* MPU/Bus/Usage fault */
|
||||
#define _NANO_ERR_STACK_CHK_FAIL (2) /* Stack corruption detected */
|
||||
#define _NANO_ERR_ALLOCATION_FAIL (3) /* Kernel Allocation Failure */
|
||||
#define _NANO_ERR_RESERVED_IRQ (4) /* Reserved interrupt */
|
||||
#define _NANO_ERR_KERNEL_OOPS (5) /* Kernel oops (fatal to thread) */
|
||||
#define _NANO_ERR_KERNEL_PANIC (6) /* Kernel panic (fatal to system) */
|
||||
|
||||
/* Xtensa GPRs are often designated by two different names */
|
||||
#define sys_define_gpr_with_alias(name1, name2) union { u32_t name1, name2; }
|
||||
|
||||
|
@ -91,12 +84,6 @@ extern void z_irq_spurious(void *unused);
|
|||
#define XTENSA_ERR_NORET FUNC_NORETURN
|
||||
#endif
|
||||
|
||||
XTENSA_ERR_NORET void z_SysFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *esf);
|
||||
|
||||
XTENSA_ERR_NORET void z_NanoFatalErrorHandler(unsigned int reason,
|
||||
const NANO_ESF *pEsf);
|
||||
|
||||
extern u32_t z_timer_cycle_get_32(void);
|
||||
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32()
|
||||
|
||||
|
|
87
include/fatal.h
Normal file
87
include/fatal.h
Normal file
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_FATAL_H
|
||||
#define ZEPHYR_INCLUDE_FATAL_H
|
||||
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/* TODO Items:
|
||||
*
|
||||
* - Delete default_esf everywhere, just pass a NULL pointer around in that
|
||||
* situation
|
||||
*
|
||||
*/
|
||||
|
||||
enum k_fatal_error_reason {
|
||||
/** Generic CPU exception, not covered by other codes */
|
||||
K_ERR_CPU_EXCEPTION,
|
||||
|
||||
/** Unhandled hardware interrupt */
|
||||
K_ERR_SPURIOUS_IRQ,
|
||||
|
||||
/** Faulting context overflowed its stack buffer */
|
||||
K_ERR_STACK_CHK_FAIL,
|
||||
|
||||
/** Moderate severity software error */
|
||||
K_ERR_KERNEL_OOPS,
|
||||
|
||||
/** High severity software error */
|
||||
K_ERR_KERNEL_PANIC
|
||||
|
||||
/* TODO: add more codes for exception types that are common across
|
||||
* architectures
|
||||
*/
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Fatal error policy handler
|
||||
*
|
||||
* This function is not invoked by application code, but is declared as a
|
||||
* weak symbol so that applications may introduce their own policy.
|
||||
*
|
||||
* The default implementation of this function halts the system
|
||||
* unconditionally. Depending on architecture support, this may be
|
||||
* a simple infinite loop, power off the hardware, or exit an emulator.
|
||||
*
|
||||
* If this function returns, then the currently executing thread will be
|
||||
* aborted.
|
||||
*
|
||||
* A few notes for custom implementations:
|
||||
*
|
||||
* - If the error is determined to be unrecoverable, LOG_PANIC() should be
|
||||
* invoked to flush any pending logging buffers.
|
||||
* - K_ERR_KERNEL_PANIC indicates a severe unrecoverable error in the kernel
|
||||
* itself, and should not be considered recoverable. There is an assertion
|
||||
* in z_fatal_error() to enforce this.
|
||||
* - Even outside of a kernel panic, unless the fault occurred in user mode,
|
||||
* the kernel itself may be in an inconsistent state, with API calls to
|
||||
* kernel objects possibly exhibiting undefined behavior or triggering
|
||||
* another exception.
|
||||
*
|
||||
* @param reason The reason for the fatal error
|
||||
* @param esf Exception context, with details and partial or full register
|
||||
* state when the error occurred. May in some cases be NULL.
|
||||
*/
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf);
|
||||
|
||||
/**
|
||||
* Called by architecture code upon a fatal error.
|
||||
*
|
||||
* This function dumps out architecture-agnostic information about the error
|
||||
* and then makes a policy decision on what to do by invoking
|
||||
* k_sys_fatal_error_handler().
|
||||
*
|
||||
* On architectures where k_thread_abort() never returns, this function
|
||||
* never returns either.
|
||||
*
|
||||
* @param reason The reason for the fatal error
|
||||
* @param esf Exception context, with details and partial or full register
|
||||
* state when the error occurred. May in some cases be NULL.
|
||||
*/
|
||||
void z_fatal_error(unsigned int reason, const NANO_ESF *esf);
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_FATAL_H */
|
|
@ -4658,8 +4658,7 @@ extern void z_sys_power_save_idle_exit(s32_t ticks);
|
|||
*/
|
||||
#define z_except_reason(reason) do { \
|
||||
printk("@ %s:%d:\n", __FILE__, __LINE__); \
|
||||
z_NanoFatalErrorHandler(reason, &_default_esf); \
|
||||
k_thread_abort(k_current_get()); \
|
||||
z_fatal_error(reason, &_default_esf); \
|
||||
} while (false)
|
||||
|
||||
#endif /* _ARCH__EXCEPT */
|
||||
|
@ -4670,13 +4669,13 @@ extern void z_sys_power_save_idle_exit(s32_t ticks);
|
|||
* This should be called when a thread has encountered an unrecoverable
|
||||
* runtime condition and needs to terminate. What this ultimately
|
||||
* means is determined by the _fatal_error_handler() implementation, which
|
||||
* will be called will reason code _NANO_ERR_KERNEL_OOPS.
|
||||
* will be called will reason code K_ERR_KERNEL_OOPS.
|
||||
*
|
||||
* If this is called from ISR context, the default system fatal error handler
|
||||
* will treat it as an unrecoverable system error, just like k_panic().
|
||||
* @req K-MISC-003
|
||||
*/
|
||||
#define k_oops() z_except_reason(_NANO_ERR_KERNEL_OOPS)
|
||||
#define k_oops() z_except_reason(K_ERR_KERNEL_OOPS)
|
||||
|
||||
/**
|
||||
* @brief Fatally terminate the system
|
||||
|
@ -4684,10 +4683,10 @@ extern void z_sys_power_save_idle_exit(s32_t ticks);
|
|||
* This should be called when the Zephyr kernel has encountered an
|
||||
* unrecoverable runtime condition and needs to terminate. What this ultimately
|
||||
* means is determined by the _fatal_error_handler() implementation, which
|
||||
* will be called will reason code _NANO_ERR_KERNEL_PANIC.
|
||||
* will be called will reason code K_ERR_KERNEL_PANIC.
|
||||
* @req K-MISC-004
|
||||
*/
|
||||
#define k_panic() z_except_reason(_NANO_ERR_KERNEL_PANIC)
|
||||
#define k_panic() z_except_reason(K_ERR_KERNEL_PANIC)
|
||||
|
||||
/*
|
||||
* private APIs that are utilized by one or more public APIs
|
||||
|
|
|
@ -35,5 +35,6 @@
|
|||
#include <sys/rb.h>
|
||||
#include <sys_clock.h>
|
||||
#include <spinlock.h>
|
||||
#include <fatal.h>
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_KERNEL_INCLUDES_H_ */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
add_library(kernel
|
||||
device.c
|
||||
errno.c
|
||||
fatal.c
|
||||
idle.c
|
||||
init.c
|
||||
mailbox.c
|
||||
|
|
|
@ -36,7 +36,7 @@ void FUNC_NORETURN _StackCheckHandler(void)
|
|||
{
|
||||
/* Stack canary error is a software fatal condition; treat it as such.
|
||||
*/
|
||||
z_except_reason(_NANO_ERR_STACK_CHK_FAIL);
|
||||
z_except_reason(K_ERR_STACK_CHK_FAIL);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
|
|
118
kernel/fatal.c
Normal file
118
kernel/fatal.c
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <kernel_internal.h>
|
||||
#include <sys/printk.h>
|
||||
#include <sys/__assert.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <logging/log_ctrl.h>
|
||||
#include <fatal.h>
|
||||
|
||||
/* LCOV_EXCL_START */
|
||||
FUNC_NORETURN __weak void z_arch_system_halt(unsigned int reason)
|
||||
{
|
||||
ARG_UNUSED(reason);
|
||||
|
||||
/* TODO: What's the best way to totally halt the system if SMP
|
||||
* is enabled?
|
||||
*/
|
||||
|
||||
(void)z_arch_irq_lock();
|
||||
for (;;) {
|
||||
k_cpu_idle();
|
||||
}
|
||||
}
|
||||
/* LCOV_EXCL_STOP */
|
||||
|
||||
/* LCOV_EXCL_START */
|
||||
__weak void k_sys_fatal_error_handler(unsigned int reason,
|
||||
const NANO_ESF *esf)
|
||||
{
|
||||
ARG_UNUSED(esf);
|
||||
|
||||
LOG_PANIC();
|
||||
|
||||
printk("Halting system.\n");
|
||||
z_arch_system_halt(reason);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
/* LCOV_EXCL_STOP */
|
||||
|
||||
static const char *thread_name_get(struct k_thread *thread)
|
||||
{
|
||||
const char *thread_name = k_thread_name_get(thread);
|
||||
|
||||
if (thread_name == NULL || thread_name[0] == '\0') {
|
||||
thread_name = "unknown";
|
||||
}
|
||||
|
||||
return thread_name;
|
||||
}
|
||||
|
||||
static const char *reason_to_str(unsigned int reason)
|
||||
{
|
||||
switch (reason) {
|
||||
case K_ERR_CPU_EXCEPTION:
|
||||
return "CPU exception";
|
||||
case K_ERR_SPURIOUS_IRQ:
|
||||
return "Unhandled interrupt";
|
||||
case K_ERR_STACK_CHK_FAIL:
|
||||
return "Stack overflow";
|
||||
case K_ERR_KERNEL_OOPS:
|
||||
return "Kernel oops";
|
||||
case K_ERR_KERNEL_PANIC:
|
||||
return "Kernel panic";
|
||||
default:
|
||||
return "Unknown error";
|
||||
}
|
||||
}
|
||||
|
||||
void z_fatal_error(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
struct k_thread *thread = k_current_get();
|
||||
|
||||
/* TODO: Replace all printk()s here and in arch error handling code
|
||||
* to some special printk_fatal() function, which enables panic
|
||||
* mode and routes messages to printk or LOG subsystem appropriately
|
||||
* based on configuration.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
printk(">>> ZEPHYR FATAL ERROR %d: %s\n", reason,
|
||||
reason_to_str(reason));
|
||||
|
||||
/* FIXME: This doesn't seem to work as expected on all arches.
|
||||
* Need a reliable way to determine whether the fault happened when
|
||||
* an IRQ or exception was being handled, or thread context.
|
||||
*
|
||||
* See #17656
|
||||
*
|
||||
* if (k_is_in_isr()) {
|
||||
* printk("Fault during interrupt handling\n");
|
||||
* }
|
||||
*/
|
||||
|
||||
printk("Current thread: %p (%s)\n", thread, thread_name_get(thread));
|
||||
#endif /* CONFIG_PRINTK */
|
||||
|
||||
k_sys_fatal_error_handler(reason, esf);
|
||||
|
||||
/* If the system fatal error handler returns, then kill the faulting
|
||||
* thread; a policy decision was made not to hang the system.
|
||||
*
|
||||
* Note that k_thread_abort() returns on some architectures but
|
||||
* not others; e.g. on ARC, x86_64, Xtensa with ASM2, ARM
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_TEST)) {
|
||||
__ASSERT(reason != K_ERR_KERNEL_PANIC,
|
||||
"Attempted to recover from a kernel panic condition");
|
||||
/* FIXME: #17656 */
|
||||
__ASSERT(!k_is_in_isr(),
|
||||
"Attempted to recover from a fatal error in ISR");
|
||||
}
|
||||
k_thread_abort(thread);
|
||||
}
|
|
@ -267,6 +267,11 @@ extern int z_stack_adjust_initialized;
|
|||
extern void z_arch_busy_wait(u32_t usec_to_wait);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* TODO: document
|
||||
*/
|
||||
extern FUNC_NORETURN void z_arch_system_halt(unsigned int reason);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -310,7 +310,7 @@ void z_check_stack_sentinel(void)
|
|||
if (*stack != STACK_SENTINEL) {
|
||||
/* Restore it so further checks don't trigger this same error */
|
||||
*stack = STACK_SENTINEL;
|
||||
z_except_reason(_NANO_ERR_STACK_CHK_FAIL);
|
||||
z_except_reason(K_ERR_STACK_CHK_FAIL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -44,6 +44,8 @@ extern "C" {
|
|||
/* FIXME: Properly integrate with Zephyr's arch specific code */
|
||||
#define CONFIG_X86 1
|
||||
#define CONFIG_PRINTK 1
|
||||
struct esf;
|
||||
typedef struct esf NANO_ESF;
|
||||
#endif
|
||||
|
||||
#include <sys/printk.h>
|
||||
|
|
|
@ -48,7 +48,7 @@ void ztest_test_fail(void);
|
|||
*
|
||||
* Normally a test passes just by returning without an assertion failure.
|
||||
* However, if the success case for your test involves a fatal fault,
|
||||
* you can call this function from z_SysFatalErrorHandler to indicate that
|
||||
* you can call this function from k_sys_fatal_error_handler to indicate that
|
||||
* the test passed before aborting the thread.
|
||||
*/
|
||||
void ztest_test_pass(void);
|
||||
|
|
|
@ -48,6 +48,11 @@ static volatile int int_handler_executed;
|
|||
/* Assume the spurious interrupt handler will execute and abort the task */
|
||||
static volatile int spur_handler_aborted_thread = 1;
|
||||
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
zassert_equal(reason, K_ERR_SPURIOUS_IRQ, "wrong error reason");
|
||||
zassert_equal(k_current_get(), &my_thread, "wrong thread crashed");
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler to perform various actions from within an ISR context
|
||||
|
|
|
@ -44,37 +44,10 @@ volatile int rv;
|
|||
|
||||
static volatile int crash_reason;
|
||||
|
||||
/* On some architectures, k_thread_abort(_current) will return instead
|
||||
* of z_swap'ing away.
|
||||
*
|
||||
* On ARM the PendSV exception is queued and immediately fires upon
|
||||
* completing the exception path; the faulting thread is never run
|
||||
* again.
|
||||
*
|
||||
* On Xtensa/asm2 and x86_64 the handler is running in interrupt
|
||||
* context and on the interrupt stack and needs to return through the
|
||||
* interrupt exit code.
|
||||
*
|
||||
* In both cases the thread is guaranteed never to run again once we
|
||||
* return from the z_SysFatalErrorHandler().
|
||||
*/
|
||||
#if !(defined(CONFIG_ARM) || defined(CONFIG_XTENSA_ASM2) \
|
||||
|| defined(CONFIG_ARC) || defined(CONFIG_X86_64))
|
||||
#define ERR_IS_NORETURN 1
|
||||
#endif
|
||||
|
||||
#ifdef ERR_IS_NORETURN
|
||||
FUNC_NORETURN
|
||||
#endif
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
TC_PRINT("Caught system error -- reason %d\n", reason);
|
||||
crash_reason = reason;
|
||||
|
||||
k_thread_abort(_current);
|
||||
#ifdef ERR_IS_NORETURN
|
||||
CODE_UNREACHABLE;
|
||||
#endif
|
||||
}
|
||||
|
||||
void alt_thread1(void)
|
||||
|
@ -226,9 +199,9 @@ void check_stack_overflow(void *handler, u32_t flags)
|
|||
NULL, NULL, NULL, K_PRIO_PREEMPT(PRIORITY), flags,
|
||||
K_NO_WAIT);
|
||||
|
||||
zassert_equal(crash_reason, _NANO_ERR_STACK_CHK_FAIL,
|
||||
zassert_equal(crash_reason, K_ERR_STACK_CHK_FAIL,
|
||||
"bad reason code got %d expected %d\n",
|
||||
crash_reason, _NANO_ERR_STACK_CHK_FAIL);
|
||||
crash_reason, K_ERR_STACK_CHK_FAIL);
|
||||
zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
|
||||
}
|
||||
#endif /* !CONFIG_ARCH_POSIX */
|
||||
|
@ -276,9 +249,9 @@ void test_fatal(void)
|
|||
NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
|
||||
K_NO_WAIT);
|
||||
k_thread_abort(&alt_thread);
|
||||
zassert_equal(crash_reason, _NANO_ERR_KERNEL_OOPS,
|
||||
zassert_equal(crash_reason, K_ERR_KERNEL_OOPS,
|
||||
"bad reason code got %d expected %d\n",
|
||||
crash_reason, _NANO_ERR_KERNEL_OOPS);
|
||||
crash_reason, K_ERR_KERNEL_OOPS);
|
||||
zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
|
||||
|
||||
TC_PRINT("test alt thread 3: initiate kernel panic\n");
|
||||
|
@ -288,9 +261,9 @@ void test_fatal(void)
|
|||
NULL, NULL, NULL, K_PRIO_COOP(PRIORITY), 0,
|
||||
K_NO_WAIT);
|
||||
k_thread_abort(&alt_thread);
|
||||
zassert_equal(crash_reason, _NANO_ERR_KERNEL_PANIC,
|
||||
zassert_equal(crash_reason, K_ERR_KERNEL_PANIC,
|
||||
"bad reason code got %d expected %d\n",
|
||||
crash_reason, _NANO_ERR_KERNEL_PANIC);
|
||||
crash_reason, K_ERR_KERNEL_PANIC);
|
||||
zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
|
||||
|
||||
TC_PRINT("test alt thread 4: fail assertion\n");
|
||||
|
@ -301,9 +274,9 @@ void test_fatal(void)
|
|||
K_NO_WAIT);
|
||||
k_thread_abort(&alt_thread);
|
||||
/* Default assert_post_action() induces a kernel panic */
|
||||
zassert_equal(crash_reason, _NANO_ERR_KERNEL_PANIC,
|
||||
zassert_equal(crash_reason, K_ERR_KERNEL_PANIC,
|
||||
"bad reason code got %d expected %d\n",
|
||||
crash_reason, _NANO_ERR_KERNEL_PANIC);
|
||||
crash_reason, K_ERR_KERNEL_PANIC);
|
||||
zassert_not_equal(rv, TC_FAIL, "thread was not aborted");
|
||||
|
||||
#ifndef CONFIG_ARCH_POSIX
|
||||
|
|
|
@ -19,7 +19,7 @@ K_SEM_DEFINE(barrier_sem,
|
|||
|
||||
ZTEST_BMEM bool valid_fault;
|
||||
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
printk("Caught system error -- reason %d %d\n", reason, valid_fault);
|
||||
if (valid_fault) {
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
|
||||
FUNC_NORETURN
|
||||
#endif
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
INFO("Caught system error -- reason %d\n", reason);
|
||||
ztest_test_pass();
|
||||
|
|
|
@ -15,6 +15,11 @@
|
|||
ZTEST_BMEM static int count;
|
||||
ZTEST_BMEM static int ret = TC_PASS;
|
||||
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
zassert_equal(reason, K_ERR_STACK_CHK_FAIL, "wrong error type");
|
||||
}
|
||||
|
||||
void check_input(const char *name, const char *input);
|
||||
|
||||
/**
|
||||
|
|
|
@ -57,14 +57,14 @@ K_APP_DMEM(part0) bool mem_access_check;
|
|||
K_APP_BMEM(part0) static volatile bool expect_fault;
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
#define REASON_HW_EXCEPTION _NANO_ERR_CPU_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS _NANO_ERR_KERNEL_OOPS
|
||||
#define REASON_HW_EXCEPTION K_ERR_CPU_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS K_ERR_KERNEL_OOPS
|
||||
#elif defined(CONFIG_ARM)
|
||||
#define REASON_HW_EXCEPTION _NANO_ERR_HW_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS _NANO_ERR_HW_EXCEPTION
|
||||
#define REASON_HW_EXCEPTION K_ERR_CPU_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS K_ERR_CPU_EXCEPTION
|
||||
#elif defined(CONFIG_ARC)
|
||||
#define REASON_HW_EXCEPTION _NANO_ERR_HW_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS _NANO_ERR_KERNEL_OOPS
|
||||
#define REASON_HW_EXCEPTION K_ERR_CPU_EXCEPTION
|
||||
#define REASON_KERNEL_OOPS K_ERR_KERNEL_OOPS
|
||||
#else
|
||||
#error "Not implemented for this architecture"
|
||||
#endif
|
||||
|
@ -86,7 +86,7 @@ K_APP_BMEM(part0) static volatile unsigned int expected_reason;
|
|||
#if !(defined(CONFIG_ARM) || defined(CONFIG_ARC))
|
||||
FUNC_NORETURN
|
||||
#endif
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
INFO("Caught system error -- reason %d\n", reason);
|
||||
/*
|
||||
|
|
|
@ -675,7 +675,7 @@ void pipe_put_get_timeout(void)
|
|||
|
||||
/******************************************************************************/
|
||||
ZTEST_BMEM bool valid_fault;
|
||||
void z_SysFatalErrorHandler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *pEsf)
|
||||
{
|
||||
printk("Caught system error -- reason %d\n", reason);
|
||||
if (valid_fault) {
|
||||
|
|
|
@ -13,6 +13,13 @@
|
|||
static K_THREAD_STACK_DEFINE(dyn_thread_stack, STACKSIZE);
|
||||
static K_SEM_DEFINE(start_sem, 0, 1);
|
||||
static K_SEM_DEFINE(end_sem, 0, 1);
|
||||
static ZTEST_BMEM struct k_thread *dyn_thread;
|
||||
|
||||
void k_sys_fatal_error_handler(unsigned int reason, const NANO_ESF *esf)
|
||||
{
|
||||
zassert_equal(reason, K_ERR_KERNEL_OOPS, "wrong error reason");
|
||||
zassert_equal(k_current_get(), dyn_thread, "wrong thread crashed");
|
||||
}
|
||||
|
||||
static void dyn_thread_entry(void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
@ -29,7 +36,6 @@ static void prep(void)
|
|||
|
||||
static void create_dynamic_thread(void)
|
||||
{
|
||||
struct k_thread *dyn_thread;
|
||||
k_tid_t tid;
|
||||
|
||||
dyn_thread = k_object_alloc(K_OBJ_THREAD);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue