zephyr/arch/arm/core/fault.c

387 lines
8.4 KiB
C
Raw Normal View History

/*
* Copyright (c) 2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Common fault handler for ARM Cortex-M
*
* Common fault handler for ARM Cortex-M processors.
*/
#include <toolchain.h>
#include <sections.h>
#include <kernel.h>
kernel/arch: consolidate tTCS and TNANO definitions There was a lot of duplication between architectures for the definition of threads and the "nanokernel" guts. These have been consolidated. Now, a common file kernel/unified/include/kernel_structs.h holds the common definitions. Architectures provide two files to complement it: kernel_arch_data.h and kernel_arch_func.h. The first one contains at least the struct _thread_arch and struct _kernel_arch data structures, as well as the struct _callee_saved and struct _caller_saved register layouts. The second file contains anything that needs what is provided by the common stuff in kernel_structs.h. Those two files are only meant to be included in kernel_structs.h in very specific locations. The thread data structure has been separated into three major parts: common struct _thread_base and struct k_thread, and arch-specific struct _thread_arch. The first and third ones are included in the second. The struct s_NANO data structure has been split into two: common struct _kernel and arch-specific struct _kernel_arch. The latter is included in the former. Offsets files have also changed: nano_offsets.h has been renamed kernel_offsets.h and is still included by the arch-specific offsets.c. Also, since the thread and kernel data structures are now made of sub-structures, offsets have to be added to make up the full offset. Some of these additions have been consolidated in shorter symbols, available from kernel/unified/include/offsets_short.h, which includes an arch-specific offsets_arch_short.h. Most of the code include offsets_short.h now instead of offsets.h. Change-Id: I084645cb7e6db8db69aeaaf162963fe157045d5a Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
2016-11-08 10:36:50 -05:00
#include <kernel_structs.h>
#include <inttypes.h>
#ifdef CONFIG_PRINTK
#include <misc/printk.h>
#define PR_EXC(...) printk(__VA_ARGS__)
#else
#define PR_EXC(...)
#endif /* CONFIG_PRINTK */
#if (CONFIG_FAULT_DUMP > 0)
#define FAULT_DUMP(esf, fault) _FaultDump(esf, fault)
#else
#define FAULT_DUMP(esf, fault) \
do { \
(void) esf; \
(void) fault; \
} while ((0))
#endif
#if (CONFIG_FAULT_DUMP == 1)
/**
*
* @brief Dump information regarding fault (FAULT_DUMP == 1)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 1
* (short form).
*
* eg. (precise bus error escalated to hard fault):
*
* Fault! EXC #3, Thread: 0x200000dc, instr: 0x000011d3
* HARD FAULT: Escalation (see below)!
* MMFSR: 0x00000000, BFSR: 0x00000082, UFSR: 0x00000000
* BFAR: 0xff001234
*
* @return N/A
*/
void _FaultDump(const NANO_ESF *esf, int fault)
{
PR_EXC("Fault! EXC #%d, Thread: %p, instr @ 0x%" PRIx32 "\n",
fault,
k_current_get(),
esf->pc);
#if defined(CONFIG_ARMV6_M)
#elif defined(CONFIG_ARMV7_M)
int escalation = 0;
if (3 == fault) { /* hard fault */
escalation = SCB->HFSR & SCB_HFSR_FORCED_Msk;
PR_EXC("HARD FAULT: %s\n",
escalation ? "Escalation (see below)!"
: "Bus fault on vector table read\n");
}
PR_EXC("MMFSR: 0x%" PRIx32 ", BFSR: 0x%" PRIx32 ", UFSR: 0x%"
PRIx32 "\n",
__scs.scb.cfsr.byte.mmfsr.val,
__scs.scb.cfsr.byte.bfsr.val,
__scs.scb.cfsr.byte.ufsr.val);
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC("MMFAR: 0x%" PRIx32 "\n", _ScbMemFaultAddrGet());
if (escalation) {
_ScbMemFaultMmfarReset();
}
}
if (_ScbBusFaultIsBfarValid()) {
PR_EXC("BFAR: 0x%" PRIx32 "\n", _ScbBusFaultAddrGet());
if (escalation) {
_ScbBusFaultBfarReset();
}
}
/* clear USFR sticky bits */
_ScbUsageFaultAllFaultsReset();
arm: Restructure ARM cpu related preprocessor conditionals. The ARM code base provides for three mutually exclusive ARM architecture related conditional compilation choices. M0_M0PLUS, M3_M4 and M7. Throughout the code base we have conditional compilation gated around these three choices. Adjust the form of this conditional compilation to adopt a uniform structure. The uniform structure always selects code based on the definition of an appropriate config option rather the the absence of a definition. Removing the extensive use of #else ensures that when support for other ARM architecture versions is added we get hard compilation failures rather than attempting to compile inappropriate code for the added architecture with unexpected runtime consequences. Adopting this uniform structure makes it straight forward to replace the adhoc CPU_CORTEX_M3_M4 and CPU_CORTEX_M0_M0PLUS configuration variables with ones that directly represent the actual underlying ARM architectures we provide support for. This change also paves the way for folding adhoc conditional compilation related to CPU_CORTEX_M7 directly in support for ARMv7-M. This change is mechanical in nature involving two transforms: 1) #if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... 2) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #else ... #endif is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... #else #error Unknown ARM architecture #endif Change-Id: I7229029b174da3a8b3c6fb2eec63d776f1d11e24 Signed-off-by: Marcus Shawcroft <marcus.shawcroft@arm.com>
2016-12-31 13:18:25 +00:00
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
}
#endif
#if (CONFIG_FAULT_DUMP == 2)
/**
*
* @brief Dump thread information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _FaultThreadShow(const NANO_ESF *esf)
{
PR_EXC(" Executing thread ID (thread): %p\n"
" Faulting instruction address: 0x%" PRIx32 "\n",
k_current_get(), esf->pc);
}
#if defined(CONFIG_ARMV6_M)
#elif defined(CONFIG_ARMV7_M)
/**
*
* @brief Dump MPU fault information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _MpuFault(const NANO_ESF *esf, int fromHardFault)
{
PR_EXC("***** MPU FAULT *****\n");
_FaultThreadShow(esf);
if (_ScbMemFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbMemFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbMemFaultIsDataAccessViolation()) {
PR_EXC(" Data Access Violation\n");
if (_ScbMemFaultIsMmfarValid()) {
PR_EXC(" Address: 0x%" PRIx32 "\n",
_ScbMemFaultAddrGet());
if (fromHardFault) {
_ScbMemFaultMmfarReset();
}
}
} else if (_ScbMemFaultIsInstrAccessViolation()) {
PR_EXC(" Instruction Access Violation\n");
}
}
/**
*
* @brief Dump bus fault information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _BusFault(const NANO_ESF *esf, int fromHardFault)
{
PR_EXC("***** BUS FAULT *****\n");
_FaultThreadShow(esf);
if (_ScbBusFaultIsStacking()) {
PR_EXC(" Stacking error\n");
} else if (_ScbBusFaultIsUnstacking()) {
PR_EXC(" Unstacking error\n");
} else if (_ScbBusFaultIsPrecise()) {
PR_EXC(" Precise data bus error\n");
if (_ScbBusFaultIsBfarValid()) {
PR_EXC(" Address: 0x%" PRIx32 "\n",
_ScbBusFaultAddrGet());
if (fromHardFault) {
_ScbBusFaultBfarReset();
}
}
/* it's possible to have both a precise and imprecise fault */
if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
}
} else if (_ScbBusFaultIsImprecise()) {
PR_EXC(" Imprecise data bus error\n");
} else if (_ScbBusFaultIsInstrBusErr()) {
PR_EXC(" Instruction bus error\n");
}
}
/**
*
* @brief Dump usage fault information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _UsageFault(const NANO_ESF *esf)
{
PR_EXC("***** USAGE FAULT *****\n");
_FaultThreadShow(esf);
/* bits are sticky: they stack and must be reset */
if (_ScbUsageFaultIsDivByZero()) {
PR_EXC(" Division by zero\n");
}
if (_ScbUsageFaultIsUnaligned()) {
PR_EXC(" Unaligned memory access\n");
}
if (_ScbUsageFaultIsNoCp()) {
PR_EXC(" No coprocessor instructions\n");
}
if (_ScbUsageFaultIsInvalidPcLoad()) {
PR_EXC(" Illegal load of EXC_RETURN into PC\n");
}
if (_ScbUsageFaultIsInvalidState()) {
PR_EXC(" Illegal use of the EPSR\n");
}
if (_ScbUsageFaultIsUndefinedInstr()) {
PR_EXC(" Attempt to execute undefined instruction\n");
}
_ScbUsageFaultAllFaultsReset();
}
/**
*
* @brief Dump debug monitor exception information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
ARG_UNUSED(esf);
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
}
arm: Restructure ARM cpu related preprocessor conditionals. The ARM code base provides for three mutually exclusive ARM architecture related conditional compilation choices. M0_M0PLUS, M3_M4 and M7. Throughout the code base we have conditional compilation gated around these three choices. Adjust the form of this conditional compilation to adopt a uniform structure. The uniform structure always selects code based on the definition of an appropriate config option rather the the absence of a definition. Removing the extensive use of #else ensures that when support for other ARM architecture versions is added we get hard compilation failures rather than attempting to compile inappropriate code for the added architecture with unexpected runtime consequences. Adopting this uniform structure makes it straight forward to replace the adhoc CPU_CORTEX_M3_M4 and CPU_CORTEX_M0_M0PLUS configuration variables with ones that directly represent the actual underlying ARM architectures we provide support for. This change also paves the way for folding adhoc conditional compilation related to CPU_CORTEX_M7 directly in support for ARMv7-M. This change is mechanical in nature involving two transforms: 1) #if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... 2) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #else ... #endif is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... #else #error Unknown ARM architecture #endif Change-Id: I7229029b174da3a8b3c6fb2eec63d776f1d11e24 Signed-off-by: Marcus Shawcroft <marcus.shawcroft@arm.com>
2016-12-31 13:18:25 +00:00
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
/**
*
* @brief Dump hard fault information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _HardFault(const NANO_ESF *esf)
{
PR_EXC("***** HARD FAULT *****\n");
#if defined(CONFIG_ARMV6_M)
_FaultThreadShow(esf);
#elif defined(CONFIG_ARMV7_M)
if (_ScbHardFaultIsBusErrOnVectorRead()) {
PR_EXC(" Bus fault on vector table read\n");
} else if (SCB->HFSR & SCB_HFSR_FORCED_Msk) {
PR_EXC(" Fault escalation (see below)\n");
if (_ScbIsMemFault()) {
_MpuFault(esf, 1);
} else if (_ScbIsBusFault()) {
_BusFault(esf, 1);
} else if (_ScbIsUsageFault()) {
_UsageFault(esf);
}
}
arm: Restructure ARM cpu related preprocessor conditionals. The ARM code base provides for three mutually exclusive ARM architecture related conditional compilation choices. M0_M0PLUS, M3_M4 and M7. Throughout the code base we have conditional compilation gated around these three choices. Adjust the form of this conditional compilation to adopt a uniform structure. The uniform structure always selects code based on the definition of an appropriate config option rather the the absence of a definition. Removing the extensive use of #else ensures that when support for other ARM architecture versions is added we get hard compilation failures rather than attempting to compile inappropriate code for the added architecture with unexpected runtime consequences. Adopting this uniform structure makes it straight forward to replace the adhoc CPU_CORTEX_M3_M4 and CPU_CORTEX_M0_M0PLUS configuration variables with ones that directly represent the actual underlying ARM architectures we provide support for. This change also paves the way for folding adhoc conditional compilation related to CPU_CORTEX_M7 directly in support for ARMv7-M. This change is mechanical in nature involving two transforms: 1) #if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... 2) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #else ... #endif is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... #else #error Unknown ARM architecture #endif Change-Id: I7229029b174da3a8b3c6fb2eec63d776f1d11e24 Signed-off-by: Marcus Shawcroft <marcus.shawcroft@arm.com>
2016-12-31 13:18:25 +00:00
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
}
/**
*
* @brief Dump reserved exception information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _ReservedException(const NANO_ESF *esf, int fault)
{
ARG_UNUSED(esf);
PR_EXC("***** %s %d) *****\n",
fault < 16 ? "Reserved Exception (" : "Spurious interrupt (IRQ ",
fault - 16);
}
/**
*
* @brief Dump information regarding fault (FAULT_DUMP == 2)
*
* Dump information regarding the fault when CONFIG_FAULT_DUMP is set to 2
* (long form).
*
* eg. (precise bus error escalated to hard fault):
*
* Executing thread ID (thread): 0x200000dc
* Faulting instruction address: 0x000011d3
* ***** HARD FAULT *****
* Fault escalation (see below)
* ***** BUS FAULT *****
* Precise data bus error
* Address: 0xff001234
*
* @return N/A
*/
static void _FaultDump(const NANO_ESF *esf, int fault)
{
switch (fault) {
case 3:
_HardFault(esf);
break;
#if defined(CONFIG_ARMV6_M)
#elif defined(CONFIG_ARMV7_M)
case 4:
_MpuFault(esf, 0);
break;
case 5:
_BusFault(esf, 0);
break;
case 6:
_UsageFault(esf);
break;
case 12:
_DebugMonitor(esf);
break;
arm: Restructure ARM cpu related preprocessor conditionals. The ARM code base provides for three mutually exclusive ARM architecture related conditional compilation choices. M0_M0PLUS, M3_M4 and M7. Throughout the code base we have conditional compilation gated around these three choices. Adjust the form of this conditional compilation to adopt a uniform structure. The uniform structure always selects code based on the definition of an appropriate config option rather the the absence of a definition. Removing the extensive use of #else ensures that when support for other ARM architecture versions is added we get hard compilation failures rather than attempting to compile inappropriate code for the added architecture with unexpected runtime consequences. Adopting this uniform structure makes it straight forward to replace the adhoc CPU_CORTEX_M3_M4 and CPU_CORTEX_M0_M0PLUS configuration variables with ones that directly represent the actual underlying ARM architectures we provide support for. This change also paves the way for folding adhoc conditional compilation related to CPU_CORTEX_M7 directly in support for ARMv7-M. This change is mechanical in nature involving two transforms: 1) #if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... 2) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #else ... #endif is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... #else #error Unknown ARM architecture #endif Change-Id: I7229029b174da3a8b3c6fb2eec63d776f1d11e24 Signed-off-by: Marcus Shawcroft <marcus.shawcroft@arm.com>
2016-12-31 13:18:25 +00:00
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
default:
_ReservedException(esf, fault);
break;
}
}
#endif /* FAULT_DUMP == 2 */
/**
*
* @brief Fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* Since the ESF can be either on the MSP or PSP depending if an exception or
* interrupt was already being handled, it is passed a pointer to both and has
* to find out on which the ESP is present.
*
* @param esf ESF on the stack, either MSP or PSP depending at what processor
* state the exception was taken.
*
* @return This function does not return.
*/
void _Fault(const NANO_ESF *esf)
{
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
FAULT_DUMP(esf, fault);
_SysFatalErrorHandler(_NANO_ERR_HW_EXCEPTION, esf);
}
/**
*
* @brief Initialization of fault handling
*
* Turns on the desired hardware faults.
*
* @return N/A
*/
void _FaultInit(void)
{
#if defined(CONFIG_ARMV6_M)
#elif defined(CONFIG_ARMV7_M)
SCB->CCR |= SCB_CCR_DIV_0_TRP_Msk;
arm: Restructure ARM cpu related preprocessor conditionals. The ARM code base provides for three mutually exclusive ARM architecture related conditional compilation choices. M0_M0PLUS, M3_M4 and M7. Throughout the code base we have conditional compilation gated around these three choices. Adjust the form of this conditional compilation to adopt a uniform structure. The uniform structure always selects code based on the definition of an appropriate config option rather the the absence of a definition. Removing the extensive use of #else ensures that when support for other ARM architecture versions is added we get hard compilation failures rather than attempting to compile inappropriate code for the added architecture with unexpected runtime consequences. Adopting this uniform structure makes it straight forward to replace the adhoc CPU_CORTEX_M3_M4 and CPU_CORTEX_M0_M0PLUS configuration variables with ones that directly represent the actual underlying ARM architectures we provide support for. This change also paves the way for folding adhoc conditional compilation related to CPU_CORTEX_M7 directly in support for ARMv7-M. This change is mechanical in nature involving two transforms: 1) #if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... 2) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #else ... #endif is transformed to: #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) ... #elif defined(CONFIG_CPU_CORTEX_M3_M4) || defined(CONFIG_CPU_CORTEX_M7) ... #else #error Unknown ARM architecture #endif Change-Id: I7229029b174da3a8b3c6fb2eec63d776f1d11e24 Signed-off-by: Marcus Shawcroft <marcus.shawcroft@arm.com>
2016-12-31 13:18:25 +00:00
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M */
}