arch: arm: aarch32: Rework non-Cortex-M exception handling

This commit reworks the ARM AArch32 non-Cortex-M (i.e. Cortex-A and
Cortex-R) exception handling to establish the base exception handling
framework and support detailed exception information reporting.

Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
This commit is contained in:
Stephanos Ioannidis 2020-04-08 20:16:27 +09:00 committed by Carles Cufí
commit ae427177c0
7 changed files with 380 additions and 52 deletions

View file

@ -25,6 +25,7 @@ config CPU_CORTEX_R
select CPU_CORTEX select CPU_CORTEX
select HAS_CMSIS_CORE select HAS_CMSIS_CORE
select HAS_FLASH_LOAD_OFFSET select HAS_FLASH_LOAD_OFFSET
select ARCH_HAS_THREAD_ABORT
help help
This option signifies the use of a CPU of the Cortex-R family. This option signifies the use of a CPU of the Cortex-R family.

View file

@ -11,4 +11,5 @@ zephyr_library_sources(
irq_init.c irq_init.c
reboot.c reboot.c
stacks.c stacks.c
thread_abort.c
) )

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2014 Wind River Systems, Inc. * Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@ -8,7 +8,19 @@
* @file * @file
* @brief Exception handlers for ARM Cortex-A and Cortex-R * @brief Exception handlers for ARM Cortex-A and Cortex-R
* *
* Exception handlers for ARM Cortex-A and Cortex-R processors. * This file implements the exception handlers (undefined instruction, prefetch
* abort and data abort) for ARM Cortex-A and Cortex-R processors.
*
* All exception handlers save the exception stack frame into the exception
* mode stack rather than the system mode stack, in order to ensure predictable
* exception behaviour (i.e. an arbitrary thread stack overflow cannot cause
* exception handling and thereby subsequent total system failure).
*
* In case the exception is due to a fatal (unrecoverable) fault, the fault
* handler is responsible for invoking the architecture fatal exception handler
* (z_arm_fatal_error) which invokes the kernel fatal exception handler
* (z_fatal_error) that either locks up the system or aborts the current thread
* depending on the application exception handler implementation.
*/ */
#include <toolchain.h> #include <toolchain.h>
@ -27,32 +39,110 @@ GTEXT(z_arm_prefetch_abort)
GTEXT(z_arm_data_abort) GTEXT(z_arm_data_abort)
/** /**
* @brief Undefined instruction exception handler
* *
* @brief Generic exception handler * An undefined instruction (UNDEF) exception is generated when an undefined
* * instruction, or a VFP instruction when the VFP is not enabled, is
* A generic exception handler that simply invokes z_arm_fault() with currently * encountered.
* unused arguments.
*
* Provides these symbols:
*
* z_arm_undef_instruction
* z_arm_prefetch_abort
* z_arm_data_abort
*/ */
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction) SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_undef_instruction)
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_prefetch_abort) /*
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort) * The undefined instruction address is offset by 2 if the previous
* mode is Thumb; otherwise, it is offset by 4.
*/
push {r0}
mrs r0, spsr
tst r0, #T_BIT
subeq lr, #4 /* ARM (!T_BIT) */
subne lr, #2 /* Thumb (T_BIT) */
pop {r0}
/* /*
* Pass null for the esf to z_arm_fault for now. A future PR will add * Store r0-r3, r12, lr, lr_und and spsr_und into the stack to
* better exception debug for Cortex-R that subsumes what esf * construct an exception stack frame.
* provides.
*/ */
mov r0, #0 srsdb sp, #MODE_UND!
bl z_arm_fault stmfd sp, {r0-r3, r12, lr}^
sub sp, #24
pop {r0, lr} /* Increment exception nesting count */
subs pc, lr, #8 ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
.end /* Invoke fault handler */
mov r0, sp
bl z_arm_fault_undef_instruction
/* Exit exception */
b z_arm_exc_exit
/**
* @brief Prefetch abort exception handler
*
* A prefetch abort (PABT) exception is generated when the processor marks the
* prefetched instruction as invalid and the instruction is executed.
*/
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_prefetch_abort)
/*
* The faulting instruction address is always offset by 4 for the
* prefetch abort exceptions.
*/
sub lr, #4
/*
* Store r0-r3, r12, lr, lr_abt and spsr_abt into the stack to
* construct an exception stack frame.
*/
srsdb sp, #MODE_ABT!
stmfd sp, {r0-r3, r12, lr}^
sub sp, #24
/* Increment exception nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
/* Invoke fault handler */
mov r0, sp
bl z_arm_fault_prefetch
/* Exit exception */
b z_arm_exc_exit
/**
* @brief Data abort exception handler
*
* A data abort (DABT) exception is generated when an error occurs on a data
* memory access. This exception can be either synchronous or asynchronous,
* depending on the type of fault that caused it.
*/
SECTION_SUBSEC_FUNC(TEXT, __exc, z_arm_data_abort)
/*
* The faulting instruction address is always offset by 8 for the data
* abort exceptions.
*/
sub lr, #8
/*
* Store r0-r3, r12, lr, lr_abt and spsr_abt into the stack to
* construct an exception stack frame.
*/
srsdb sp, #MODE_ABT!
stmfd sp, {r0-r3, r12, lr}^
sub sp, #24
/* Increment exception nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
/* Invoke fault handler */
mov r0, sp
bl z_arm_fault_data
/* Exit exception */
b z_arm_exc_exit

View file

@ -48,37 +48,20 @@ GDATA(_kernel)
* z_arm_int_exit(); * z_arm_int_exit();
* } * }
*/ */
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
/* z_arm_int_exit falls through to z_arm_exc_exit (they are aliases of each
* other)
*/
/**
*
* @brief Kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See z_arm_int_exit().
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
#ifdef CONFIG_PREEMPT_ENABLED #ifdef CONFIG_PREEMPT_ENABLED
/* Do not context switch if exiting a nested interrupt */ /* Do not context switch if exiting a nested interrupt */
ldr r3, =_kernel ldr r3, =_kernel
ldr r0, [r3, #_kernel_offset_to_nested] ldr r0, [r3, #_kernel_offset_to_nested]
cmp r0, #1 cmp r0, #1
bhi _EXIT_EXC bhi __EXIT_INT
ldr r1, [r3, #_kernel_offset_to_current] ldr r1, [r3, #_kernel_offset_to_current]
ldr r0, [r3, #_kernel_offset_to_ready_q_cache] ldr r0, [r3, #_kernel_offset_to_ready_q_cache]
cmp r0, r1 cmp r0, r1
blne z_arm_pendsv blne z_arm_pendsv
_EXIT_EXC: __EXIT_INT:
#endif /* CONFIG_PREEMPT_ENABLED */ #endif /* CONFIG_PREEMPT_ENABLED */
#ifdef CONFIG_STACK_SENTINEL #ifdef CONFIG_STACK_SENTINEL
@ -109,3 +92,68 @@ _EXIT_EXC:
cps #MODE_SYS cps #MODE_SYS
pop {r0-r3, r12, lr} pop {r0-r3, r12, lr}
rfeia sp! rfeia sp!
/**
* @brief Kernel housekeeping when exiting exception handler
*
* The exception exit routine performs appropriate housekeeping tasks depending
* on the mode of exit:
*
* If exiting a nested or non-fatal exception, the exit routine restores the
* saved exception stack frame to resume the excepted context.
*
* If exiting a non-nested fatal exception, the exit routine, assuming that the
* current faulting thread is aborted, discards the saved exception stack
* frame containing the aborted thread context and switches to the next
* scheduled thread.
*
* void z_arm_exc_exit(bool fatal)
*
* @param fatal True if exiting from a fatal fault; otherwise, false
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
/* Do not context switch if exiting a nested exception */
ldr r3, =_kernel
ldr r1, [r3, #_kernel_offset_to_nested]
cmp r1, #1
bhi __EXIT_EXC
/* If the fault is not fatal, return to the current thread context */
cmp r0, #0
beq __EXIT_EXC
/*
* If the fault is fatal, the current thread must have been aborted by
* the exception handler. Clean up the exception stack frame and switch
* to the next scheduled thread.
*/
/* Clean up exception stack frame */
add sp, #32
/* Switch in the next scheduled thread */
bl z_arm_pendsv
/* Decrement exception nesting count */
ldr r0, [r3, #_kernel_offset_to_nested]
sub r0, r0, #1
str r0, [r3, #_kernel_offset_to_nested]
/* Return to the switched thread */
cps #MODE_SYS
pop {r0-r3, r12, lr}
rfeia sp!
__EXIT_EXC:
/* Decrement exception nesting count */
ldr r0, [r3, #_kernel_offset_to_nested]
sub r0, r0, #1
str r0, [r3, #_kernel_offset_to_nested]
/*
* Restore r0-r3, r12, lr, lr_und and spsr_und from the exception stack
* and return to the current thread.
*/
ldmia sp, {r0-r3, r12, lr}^
add sp, #24
rfeia sp!

View file

@ -1,4 +1,5 @@
/* /*
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
* Copyright (c) 2018 Lexmark International, Inc. * Copyright (c) 2018 Lexmark International, Inc.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
@ -6,24 +7,159 @@
#include <kernel.h> #include <kernel.h>
#include <kernel_internal.h> #include <kernel_internal.h>
#include <kernel_structs.h> #include <logging/log.h>
LOG_MODULE_DECLARE(os);
#define FAULT_DUMP_VERBOSE (CONFIG_FAULT_DUMP == 2)
#if FAULT_DUMP_VERBOSE
static const char *get_dbgdscr_moe_string(u32_t moe)
{
switch (moe) {
case DBGDSCR_MOE_HALT_REQUEST:
return "Halt Request";
case DBGDSCR_MOE_BREAKPOINT:
return "Breakpoint";
case DBGDSCR_MOE_ASYNC_WATCHPOINT:
return "Asynchronous Watchpoint";
case DBGDSCR_MOE_BKPT_INSTRUCTION:
return "BKPT Instruction";
case DBGDSCR_MOE_EXT_DEBUG_REQUEST:
return "External Debug Request";
case DBGDSCR_MOE_VECTOR_CATCH:
return "Vector Catch";
case DBGDSCR_MOE_OS_UNLOCK_CATCH:
return "OS Unlock Catch";
case DBGDSCR_MOE_SYNC_WATCHPOINT:
return "Synchronous Watchpoint";
default:
return "Unknown";
}
}
static void dump_debug_event(void)
{
/* Read and parse debug mode of entry */
u32_t dbgdscr = __get_DBGDSCR();
u32_t moe = (dbgdscr & DBGDSCR_MOE_Msk) >> DBGDSCR_MOE_Pos;
/* Print debug event information */
LOG_ERR("Debug Event (%s)", get_dbgdscr_moe_string(moe));
}
static void dump_fault(u32_t status, u32_t addr)
{
/*
* Dump fault status and, if applicable, tatus-specific information.
* Note that the fault address is only displayed for the synchronous
* faults because it is unpredictable for asynchronous faults.
*/
switch (status) {
case FSR_FS_ALIGNMENT_FAULT:
LOG_ERR("Alignment Fault @ 0x%08x", addr);
break;
case FSR_FS_BACKGROUND_FAULT:
LOG_ERR("Background Fault @ 0x%08x", addr);
break;
case FSR_FS_PERMISSION_FAULT:
LOG_ERR("Permission Fault @ 0x%08x", addr);
break;
case FSR_FS_SYNC_EXTERNAL_ABORT:
LOG_ERR("Synchronous External Abort @ 0x%08x", addr);
break;
case FSR_FS_ASYNC_EXTERNAL_ABORT:
LOG_ERR("Asynchronous External Abort");
break;
case FSR_FS_SYNC_PARITY_ERROR:
LOG_ERR("Synchronous Parity/ECC Error @ 0x%08x", addr);
break;
case FSR_FS_ASYNC_PARITY_ERROR:
LOG_ERR("Asynchronous Parity/ECC Error");
break;
case FSR_FS_DEBUG_EVENT:
dump_debug_event();
break;
default:
LOG_ERR("Unknown (%u)", status);
}
}
#endif
/** /**
* @brief Undefined instruction fault handler
* *
* @brief Fault handler * @return Returns true if the fault is fatal
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* This is a stub for more exception handling code to be added later.
*/ */
void z_arm_fault(z_arch_esf_t *esf, u32_t exc_return) bool z_arm_fault_undef_instruction(z_arch_esf_t *esf)
{ {
/* Print fault information */
LOG_ERR("***** UNDEFINED INSTRUCTION ABORT *****");
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf); z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf);
/* All undefined instructions are treated as fatal for now */
return true;
} }
/**
* @brief Prefetch abort fault handler
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_prefetch(z_arch_esf_t *esf)
{
/* Read and parse Instruction Fault Status Register (IFSR) */
u32_t ifsr = __get_IFSR();
u32_t fs = ((ifsr & IFSR_FS1_Msk) >> 6) | (ifsr & IFSR_FS0_Msk);
/* Read Instruction Fault Address Register (IFAR) */
u32_t ifar = __get_IFAR();
/* Print fault information*/
LOG_ERR("***** PREFETCH ABORT *****");
if (FAULT_DUMP_VERBOSE) {
dump_fault(fs, ifar);
}
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf);
/* All prefetch aborts are treated as fatal for now */
return true;
}
/**
* @brief Data abort fault handler
*
* @return Returns true if the fault is fatal
*/
bool z_arm_fault_data(z_arch_esf_t *esf)
{
/* Read and parse Data Fault Status Register (DFSR) */
u32_t dfsr = __get_DFSR();
u32_t fs = ((dfsr & DFSR_FS1_Msk) >> 6) | (dfsr & DFSR_FS0_Msk);
/* Read Data Fault Address Register (DFAR) */
u32_t dfar = __get_DFAR();
/* Print fault information*/
LOG_ERR("***** DATA ABORT *****");
if (FAULT_DUMP_VERBOSE) {
dump_fault(fs, dfar);
}
/* Invoke kernel fatal exception handler */
z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf);
/* All data aborts are treated as fatal for now */
return true;
}
/**
* @brief Initialisation of fault handling
*/
void z_arm_fault_init(void) void z_arm_fault_init(void)
{ {
/* Nothing to do for now */
} }

View file

@ -0,0 +1,46 @@
/*
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM Cortex-A and Cortex-R k_thread_abort() routine
*
* The ARM Cortex-A and Cortex-R architectures provide their own
* k_thread_abort() to deal with different CPU modes when a thread aborts.
*/
#include <kernel.h>
#include <kswap.h>
extern void z_thread_single_abort(struct k_thread *thread);
void z_impl_k_thread_abort(k_tid_t thread)
{
__ASSERT(!(thread->base.user_options & K_ESSENTIAL),
"essential thread aborted");
z_thread_single_abort(thread);
z_thread_monitor_exit(thread);
/*
* Swap context if and only if the thread is not aborted inside an
* interrupt/exception handler; it is not necessary to swap context
* inside an interrupt/exception handler because the handler swaps
* context when exiting.
*/
if (!arch_is_in_isr()) {
if (thread == _current) {
/* Direct use of swap: reschedule doesn't have a test
* for "is _current dead" and we don't want one for
* performance reasons.
*/
z_swap_unlocked();
} else {
z_reschedule_unlocked();
}
}
}

View file

@ -15,6 +15,7 @@
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_EXC_H_ #ifndef ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_EXC_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_EXC_H_ #define ZEPHYR_INCLUDE_ARCH_ARM_AARCH32_EXC_H_
#if defined(CONFIG_CPU_CORTEX_M)
#include <devicetree.h> #include <devicetree.h>
#include <arch/arm/aarch32/cortex_m/nvic.h> #include <arch/arm/aarch32/cortex_m/nvic.h>
@ -61,6 +62,7 @@
/* Use lowest possible priority level for PendSV */ /* Use lowest possible priority level for PendSV */
#define _EXC_PENDSV_PRIO 0xff #define _EXC_PENDSV_PRIO 0xff
#define _EXC_PENDSV_PRIO_MASK Z_EXC_PRIO(_EXC_PENDSV_PRIO) #define _EXC_PENDSV_PRIO_MASK Z_EXC_PRIO(_EXC_PENDSV_PRIO)
#endif /* CONFIG_CPU_CORTEX_M */
#ifdef _ASMLANGUAGE #ifdef _ASMLANGUAGE
GTEXT(z_arm_exc_exit); GTEXT(z_arm_exc_exit);
@ -91,7 +93,11 @@ struct __esf {
typedef struct __esf z_arch_esf_t; typedef struct __esf z_arch_esf_t;
#ifdef CONFIG_CPU_CORTEX_M
extern void z_arm_exc_exit(void); extern void z_arm_exc_exit(void);
#else
extern void z_arm_exc_exit(bool fatal);
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }