arch: arm: major cleanup and refactoring for fault function
This commit refactors and cleans up __fault, so the function - reduces to supplying MSP, PSP, and EXC_RETURN to the C function for fault handling - simplifies itself, removing conditional implementation, i.e. based on ARM Secure firmware, The reason for that is simple: it is much better to write the fault handling in C instead of assembly, so we really do only what is strictly required, in assembly. Therefore, the commit refactors the z_arm_fault() function as well, organizing better the different functional blocks, that is: - unlocking interrupts - retriving ESF - asserting for HW errors - printing additional error logs The refactoring unifies the way the ESF is retrieved for the different Cortex-M variants and security execution states. Signed-off-by: Ioannis Glaropoulos <Ioannis.Glaropoulos@nordicsemi.no>
This commit is contained in:
parent
26e4d43916
commit
4aa3f71337
2 changed files with 182 additions and 158 deletions
|
@ -40,9 +40,6 @@ LOG_MODULE_DECLARE(os);
|
||||||
#define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
|
#define EACD(edr) (((edr) & SYSMPU_EDR_EACD_MASK) >> SYSMPU_EDR_EACD_SHIFT)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE) || \
|
|
||||||
defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
|
||||||
|
|
||||||
/* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
|
/* Exception Return (EXC_RETURN) is provided in LR upon exception entry.
|
||||||
* It is used to perform an exception return and to detect possible state
|
* It is used to perform an exception return and to detect possible state
|
||||||
* transition upon exception.
|
* transition upon exception.
|
||||||
|
@ -102,7 +99,6 @@ LOG_MODULE_DECLARE(os);
|
||||||
* to the Secure stack during a Non-Secure exception entry.
|
* to the Secure stack during a Non-Secure exception entry.
|
||||||
*/
|
*/
|
||||||
#define ADDITIONAL_STATE_CONTEXT_WORDS 10
|
#define ADDITIONAL_STATE_CONTEXT_WORDS 10
|
||||||
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -777,6 +773,128 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
|
||||||
#endif /* CONFIG_FAULT_DUMP== 2 */
|
#endif /* CONFIG_FAULT_DUMP== 2 */
|
||||||
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This internal function does the following:
|
||||||
|
*
|
||||||
|
* - Retrieves the exception stack frame
|
||||||
|
* - Evaluates whether to report being in a nested exception
|
||||||
|
*
|
||||||
|
* If the ESF is not successfully retrieved, the function signals
|
||||||
|
* an error by returning NULL.
|
||||||
|
*
|
||||||
|
* @return ESF pointer on success, otherwise return NULL
|
||||||
|
*/
|
||||||
|
static inline z_arch_esf_t *get_esf(u32_t msp, u32_t psp, u32_t exc_return,
|
||||||
|
bool *nested_exc)
|
||||||
|
{
|
||||||
|
bool alternative_state_exc = false;
|
||||||
|
z_arch_esf_t *ptr_esf;
|
||||||
|
|
||||||
|
*nested_exc = false;
|
||||||
|
|
||||||
|
if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
|
||||||
|
EXC_RETURN_INDICATOR_PREFIX) {
|
||||||
|
/* Invalid EXC_RETURN value. This is a fatal error. */
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
||||||
|
if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
|
||||||
|
/* Secure Firmware shall only handle Secure Exceptions.
|
||||||
|
* This is a fatal error.
|
||||||
|
*/
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
|
||||||
|
/* Exception entry occurred in Secure stack. */
|
||||||
|
} else {
|
||||||
|
/* Exception entry occurred in Non-Secure stack. Therefore,
|
||||||
|
* msp/psp point to the Secure stack, however, the actual
|
||||||
|
* exception stack frame is located in the Non-Secure stack.
|
||||||
|
*/
|
||||||
|
alternative_state_exc = true;
|
||||||
|
|
||||||
|
/* Dump the Secure stack before handling the actual fault. */
|
||||||
|
z_arch_esf_t *secure_esf;
|
||||||
|
|
||||||
|
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
|
||||||
|
/* Secure stack pointed by PSP */
|
||||||
|
secure_esf = (z_arch_esf_t *)psp;
|
||||||
|
} else {
|
||||||
|
/* Secure stack pointed by MSP */
|
||||||
|
secure_esf = (z_arch_esf_t *)msp;
|
||||||
|
*nested_exc = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
SECURE_STACK_DUMP(secure_esf);
|
||||||
|
|
||||||
|
/* Handle the actual fault.
|
||||||
|
* Extract the correct stack frame from the Non-Secure state
|
||||||
|
* and supply it to the fault handing function.
|
||||||
|
*/
|
||||||
|
if (exc_return & EXC_RETURN_MODE_THREAD) {
|
||||||
|
ptr_esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
|
||||||
|
} else {
|
||||||
|
ptr_esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
||||||
|
if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
|
||||||
|
/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
|
||||||
|
* This is a fatal error.
|
||||||
|
*/
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
|
||||||
|
/* Exception entry occurred in Secure stack.
|
||||||
|
*
|
||||||
|
* Note that Non-Secure firmware cannot inspect the Secure
|
||||||
|
* stack to determine the root cause of the fault. Fault
|
||||||
|
* inspection will indicate the Non-Secure instruction
|
||||||
|
* that performed the branch to the Secure domain.
|
||||||
|
*/
|
||||||
|
alternative_state_exc = true;
|
||||||
|
|
||||||
|
PR_FAULT_INFO("Exception occurred in Secure State");
|
||||||
|
|
||||||
|
if (exc_return & EXC_RETURN_SPSEL_PROCESS) {
|
||||||
|
/* Non-Secure stack frame on PSP */
|
||||||
|
ptr_esf = (z_arch_esf_t *)psp;
|
||||||
|
} else {
|
||||||
|
/* Non-Secure stack frame on MSP */
|
||||||
|
ptr_esf = (z_arch_esf_t *)msp;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Exception entry occurred in Non-Secure stack. */
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
/* The processor has a single execution state.
|
||||||
|
* We verify that the Thread mode is using PSP.
|
||||||
|
*/
|
||||||
|
if ((exc_return & EXC_RETURN_MODE_THREAD) &&
|
||||||
|
(!(exc_return & EXC_RETURN_SPSEL_PROCESS))) {
|
||||||
|
PR_EXC("SPSEL in thread mode does not indicate PSP");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
||||||
|
|
||||||
|
if (!alternative_state_exc) {
|
||||||
|
if (exc_return & EXC_RETURN_MODE_THREAD) {
|
||||||
|
/* Returning to thread mode */
|
||||||
|
ptr_esf = (z_arch_esf_t *)psp;
|
||||||
|
|
||||||
|
} else {
|
||||||
|
/* Returning to handler mode */
|
||||||
|
ptr_esf = (z_arch_esf_t *)msp;
|
||||||
|
*nested_exc = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ptr_esf;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @brief ARM Fault handler
|
* @brief ARM Fault handler
|
||||||
|
@ -791,106 +909,66 @@ static void secure_stack_dump(const z_arch_esf_t *secure_esf)
|
||||||
* The k_sys_fatal_error_handler() is invoked once the above operations are
|
* The k_sys_fatal_error_handler() is invoked once the above operations are
|
||||||
* completed, and is responsible for implementing the error handling policy.
|
* completed, and is responsible for implementing the error handling policy.
|
||||||
*
|
*
|
||||||
* The provided ESF pointer points to the exception stack frame of the current
|
* The function needs, first, to determine the exception stack frame.
|
||||||
* security state. Note that the current security state might not be the actual
|
* Note that the current security state might not be the actual
|
||||||
* state in which the processor was executing, when the exception occurred.
|
* state in which the processor was executing, when the exception occurred.
|
||||||
* The actual state may need to be determined by inspecting the EXC_RETURN
|
* The actual state may need to be determined by inspecting the EXC_RETURN
|
||||||
* value, which is provided as argument to the Fault handler.
|
* value, which is provided as argument to the Fault handler.
|
||||||
*
|
*
|
||||||
* @param esf Pointer to the exception stack frame of the current security
|
* If the exception occurred in the same security state, the stack frame
|
||||||
* state. The stack frame may be either on the Main stack (MSP) or Process
|
* will be pointed to by either MSP or PSP depending on the processor
|
||||||
* stack (PSP) depending at what execution state the exception was taken.
|
* execution state when the exception occurred. MSP and PSP values are
|
||||||
|
* provided as arguments to the Fault handler.
|
||||||
*
|
*
|
||||||
|
* @param msp MSP value immediately after the exception occurred
|
||||||
|
* @param psp PSP value immediately after the exception occurred
|
||||||
* @param exc_return EXC_RETURN value present in LR after exception entry.
|
* @param exc_return EXC_RETURN value present in LR after exception entry.
|
||||||
*
|
*
|
||||||
* Note: exc_return argument shall only be used by the Fault handler if we are
|
|
||||||
* running a Secure Firmware.
|
|
||||||
*/
|
*/
|
||||||
void z_arm_fault(z_arch_esf_t *esf, u32_t exc_return)
|
void z_arm_fault(u32_t msp, u32_t psp, u32_t exc_return)
|
||||||
{
|
{
|
||||||
u32_t reason = K_ERR_CPU_EXCEPTION;
|
u32_t reason = K_ERR_CPU_EXCEPTION;
|
||||||
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
|
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
|
||||||
bool recoverable;
|
bool recoverable, nested_exc;
|
||||||
|
z_arch_esf_t *esf;
|
||||||
|
|
||||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
/* Create a stack-ed copy of the ESF to be used during
|
||||||
if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
|
* the fault handling process.
|
||||||
EXC_RETURN_INDICATOR_PREFIX) {
|
*/
|
||||||
/* Invalid EXC_RETURN value */
|
z_arch_esf_t esf_copy;
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
if ((exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) == 0U) {
|
|
||||||
/* Secure Firmware shall only handle Secure Exceptions.
|
|
||||||
* This is a fatal error.
|
|
||||||
*/
|
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
|
/* Force unlock interrupts */
|
||||||
/* Exception entry occurred in Secure stack. */
|
z_arch_irq_unlock(0);
|
||||||
} else {
|
|
||||||
/* Exception entry occurred in Non-Secure stack. Therefore, 'esf'
|
|
||||||
* holds the Secure stack information, however, the actual
|
|
||||||
* exception stack frame is located in the Non-Secure stack.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Dump the Secure stack before handling the actual fault. */
|
/* Retrieve the Exception Stack Frame (ESF) to be supplied
|
||||||
SECURE_STACK_DUMP(esf);
|
* as argument to the remainder of the fault handling process.
|
||||||
|
*/
|
||||||
/* Handle the actual fault.
|
esf = get_esf(msp, psp, exc_return, &nested_exc);
|
||||||
* Extract the correct stack frame from the Non-Secure state
|
__ASSERT(esf != NULL,
|
||||||
* and supply it to the fault handing function.
|
"ESF could not be retrieved successfully. Shall never occur.");
|
||||||
*/
|
|
||||||
if (exc_return & EXC_RETURN_MODE_THREAD) {
|
|
||||||
esf = (z_arch_esf_t *)__TZ_get_PSP_NS();
|
|
||||||
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) == 0) {
|
|
||||||
PR_EXC("RETTOBASE does not match EXC_RETURN");
|
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
esf = (z_arch_esf_t *)__TZ_get_MSP_NS();
|
|
||||||
if ((SCB->ICSR & SCB_ICSR_RETTOBASE_Msk) != 0) {
|
|
||||||
PR_EXC("RETTOBASE does not match EXC_RETURN");
|
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#elif defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
|
||||||
if ((exc_return & EXC_RETURN_INDICATOR_PREFIX) !=
|
|
||||||
EXC_RETURN_INDICATOR_PREFIX) {
|
|
||||||
/* Invalid EXC_RETURN value */
|
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
if (exc_return & EXC_RETURN_EXCEPTION_SECURE_Secure) {
|
|
||||||
/* Non-Secure Firmware shall only handle Non-Secure Exceptions.
|
|
||||||
* This is a fatal error.
|
|
||||||
*/
|
|
||||||
goto _exit_fatal;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (exc_return & EXC_RETURN_RETURN_STACK_Secure) {
|
|
||||||
/* Exception entry occurred in Secure stack.
|
|
||||||
*
|
|
||||||
* Note that Non-Secure firmware cannot inspect the Secure
|
|
||||||
* stack to determine the root cause of the fault. Fault
|
|
||||||
* inspection will indicate the Non-Secure instruction
|
|
||||||
* that performed the branch to the Secure domain.
|
|
||||||
*/
|
|
||||||
PR_FAULT_INFO("Exception occurred in Secure State");
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
(void) exc_return;
|
|
||||||
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
|
|
||||||
|
|
||||||
reason = fault_handle(esf, fault, &recoverable);
|
reason = fault_handle(esf, fault, &recoverable);
|
||||||
if (recoverable) {
|
if (recoverable) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE) || \
|
/* Copy ESF */
|
||||||
defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
memcpy(&esf_copy, esf, sizeof(z_arch_esf_t));
|
||||||
_exit_fatal:
|
|
||||||
#endif
|
/* Overwrite stacked IPSR to mark a nested exception,
|
||||||
z_arm_fatal_error(reason, esf);
|
* or a return to Thread mode. Note that this may be
|
||||||
|
* required, if the retrieved ESF contents are invalid
|
||||||
|
* due to, for instance, a stacking error.
|
||||||
|
*/
|
||||||
|
if (nested_exc) {
|
||||||
|
if ((esf_copy.basic.xpsr & IPSR_ISR_Msk) == 0) {
|
||||||
|
esf_copy.basic.xpsr |= IPSR_ISR_Msk;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
esf_copy.basic.xpsr &= ~(IPSR_ISR_Msk);
|
||||||
|
}
|
||||||
|
|
||||||
|
z_arm_fatal_error(reason, &esf_copy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||||
|
* Copyright (c) 2017-2019 Nordic Semiconductor ASA.
|
||||||
*
|
*
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
*/
|
*/
|
||||||
|
@ -13,7 +14,6 @@
|
||||||
|
|
||||||
#include <toolchain.h>
|
#include <toolchain.h>
|
||||||
#include <linker/sections.h>
|
#include <linker/sections.h>
|
||||||
#include <arch/cpu.h>
|
|
||||||
|
|
||||||
_ASM_FILE_PROLOGUE
|
_ASM_FILE_PROLOGUE
|
||||||
|
|
||||||
|
@ -43,14 +43,18 @@ GTEXT(z_arm_reserved)
|
||||||
*
|
*
|
||||||
* @brief Fault handler installed in the fault and reserved vectors
|
* @brief Fault handler installed in the fault and reserved vectors
|
||||||
*
|
*
|
||||||
* Entry point for the hard fault, MPU fault, bus fault, usage fault, debug
|
* Entry point for the HardFault, MemManageFault, BusFault, UsageFault,
|
||||||
* monitor and reserved exceptions.
|
* SecureFault, Debug Monitor, and reserved exceptions.
|
||||||
*
|
*
|
||||||
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
|
* For Cortex-M: the function supplies the values of
|
||||||
* and second parameters to the z_arm_fault() C function that will handle the
|
* - the MSP
|
||||||
* rest. This has to be done because at this point we do not know if the fault
|
* - the PSP
|
||||||
* happened while handling an exception or not, and thus the ESF could be on
|
* - the EXC_RETURN value
|
||||||
* either stack. z_arm_fault() will find out where the ESF resides.
|
* as parameters to the z_arm_fault() C function that will perform the
|
||||||
|
* rest of the fault handling (i.e. z_arm_fault(MSP, PSP, EXC_RETURN)).
|
||||||
|
*
|
||||||
|
* For Cortex-R: the function simply invokes z_arm_fault() with currently
|
||||||
|
* unused arguments.
|
||||||
*
|
*
|
||||||
* Provides these symbols:
|
* Provides these symbols:
|
||||||
*
|
*
|
||||||
|
@ -83,82 +87,24 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_data_abort)
|
||||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||||
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_reserved)
|
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_reserved)
|
||||||
|
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) || \
|
||||||
/* force unlock interrupts */
|
defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||||
cpsie i
|
|
||||||
|
|
||||||
/* Use EXC_RETURN state to find out if stack frame is on the
|
|
||||||
* MSP or PSP
|
|
||||||
*/
|
|
||||||
ldr r0, =0x4
|
|
||||||
mov r1, lr
|
|
||||||
tst r1, r0
|
|
||||||
beq _stack_frame_msp
|
|
||||||
mrs r0, PSP
|
|
||||||
bne _stack_frame_endif
|
|
||||||
_stack_frame_msp:
|
|
||||||
mrs r0, MSP
|
mrs r0, MSP
|
||||||
_stack_frame_endif:
|
mrs r1, PSP
|
||||||
|
mov r2, lr /* EXC_RETURN */
|
||||||
|
|
||||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
push {r0, lr}
|
||||||
/* force unlock interrupts */
|
|
||||||
eors.n r0, r0
|
|
||||||
msr BASEPRI, r0
|
|
||||||
|
|
||||||
#if !defined(CONFIG_ARM_SECURE_FIRMWARE) && \
|
|
||||||
!defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
|
||||||
/* this checks to see if we are in a nested exception */
|
|
||||||
ldr ip, =_SCS_ICSR
|
|
||||||
ldr ip, [ip]
|
|
||||||
ands.w ip, #_SCS_ICSR_RETTOBASE
|
|
||||||
|
|
||||||
ite eq /* is the RETTOBASE bit zero ? */
|
|
||||||
mrseq r0, MSP /* if so, we're not returning to thread mode,
|
|
||||||
* thus this is a nested exception: the stack
|
|
||||||
* frame is on the MSP */
|
|
||||||
mrsne r0, PSP /* if not, we are returning to thread mode, thus
|
|
||||||
* this is not a nested exception: the stack
|
|
||||||
* frame is on the PSP */
|
|
||||||
#else
|
|
||||||
/* RETTOBASE flag is not banked between security states.
|
|
||||||
* Therefore, we cannot rely on this flag, to obtain the SP
|
|
||||||
* of the current security state.
|
|
||||||
* Instead, we use the EXC_RETURN.SPSEL flag.
|
|
||||||
*/
|
|
||||||
ldr r0, =0x4
|
|
||||||
mov r1, lr
|
|
||||||
tst r1, r0
|
|
||||||
beq _s_stack_frame_msp
|
|
||||||
mrs r0, PSP
|
|
||||||
bne _s_stack_frame_endif
|
|
||||||
_s_stack_frame_msp:
|
|
||||||
mrs r0, MSP
|
|
||||||
_s_stack_frame_endif:
|
|
||||||
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
|
|
||||||
#elif defined(CONFIG_ARMV7_R)
|
#elif defined(CONFIG_ARMV7_R)
|
||||||
/*
|
/*
|
||||||
* Pass null for the esf to z_arm_fault for now. A future PR will add
|
* Pass null for the esf to z_arm_fault for now. A future PR will add
|
||||||
* better exception debug for Cortex-R that subsumes what esf
|
* better exception debug for Cortex-R that subsumes what esf
|
||||||
* provides.
|
* provides.
|
||||||
*/
|
*/
|
||||||
mov r0, #0
|
mov r0, #0
|
||||||
#else
|
#else
|
||||||
#error Unknown ARM architecture
|
#error Unknown ARM architecture
|
||||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE || CONFIG_ARMv7_M_ARMV8_M_MAINLINE */
|
||||||
|
|
||||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE) || \
|
|
||||||
defined(CONFIG_ARM_NONSECURE_FIRMWARE)
|
|
||||||
/* The stack pointer that is retrieved above, points to the stack,
|
|
||||||
* where the exception is taken. However, the exeption may have
|
|
||||||
* occurred in the alternative security state.
|
|
||||||
*
|
|
||||||
* To determine this we need to inspect the EXC_RETURN value
|
|
||||||
* located in the LR. Therefore, we supply the LR value as an
|
|
||||||
* argument to the fault handler.
|
|
||||||
*/
|
|
||||||
mov r1, lr
|
|
||||||
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
|
|
||||||
push {r0, lr}
|
|
||||||
bl z_arm_fault
|
bl z_arm_fault
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_CORTEX_M)
|
#if defined(CONFIG_CPU_CORTEX_M)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue