arch: arm: cortex_m: use C rather than asm in isr_wrapper & exc_exit
Asm is notoriously harder to maintain than C and requires core specific adaptation which impairs even more the readability of the code. This is a first step in reducing the amount of ASM in arch/arm/cortex_m Signed-off-by: Wilfried Chauveau <wilfried.chauveau@arm.com>
This commit is contained in:
parent
85feaa60e2
commit
8e55468af2
4 changed files with 102 additions and 198 deletions
|
@ -3,7 +3,7 @@
|
|||
zephyr_library()
|
||||
|
||||
zephyr_library_sources(
|
||||
exc_exit.S
|
||||
exc_exit.c
|
||||
fault.c
|
||||
fault_s.S
|
||||
fpu.c
|
||||
|
@ -20,7 +20,7 @@ zephyr_library_sources(
|
|||
)
|
||||
|
||||
zephyr_library_sources_ifndef(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER irq_init.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
|
||||
zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE __aeabi_read_tp.S)
|
||||
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2023 Arm Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
@ -13,16 +14,9 @@
|
|||
* wrapped around by _isr_wrapper()).
|
||||
*/
|
||||
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/linker/sections.h>
|
||||
#include <offsets_short.h>
|
||||
#include <zephyr/arch/cpu.h>
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(z_arm_exc_exit)
|
||||
GTEXT(z_arm_int_exit)
|
||||
GDATA(_kernel)
|
||||
#include <zephyr/kernel.h>
|
||||
#include <kswap.h>
|
||||
#include <cmsis_core.h>
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -48,12 +42,7 @@ GDATA(_kernel)
|
|||
* }
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
|
||||
|
||||
/* z_arm_int_exit falls through to z_arm_exc_exit (they are aliases of each
|
||||
* other)
|
||||
*/
|
||||
FUNC_ALIAS(z_arm_exc_exit, z_arm_int_exit, void);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -63,36 +52,15 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
|
|||
* See z_arm_int_exit().
|
||||
*
|
||||
*/
|
||||
|
||||
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
|
||||
|
||||
Z_GENERIC_SECTION(.text._HandlerModeExit) void z_arm_exc_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_PREEMPT_ENABLED
|
||||
ldr r3, =_kernel
|
||||
|
||||
ldr r1, [r3, #_kernel_offset_to_current]
|
||||
ldr r0, [r3, #_kernel_offset_to_ready_q_cache]
|
||||
cmp r0, r1
|
||||
beq _EXIT_EXC
|
||||
|
||||
/* context switch required, pend the PendSV exception */
|
||||
ldr r1, =_SCS_ICSR
|
||||
ldr r2, =_SCS_ICSR_PENDSV
|
||||
str r2, [r1]
|
||||
|
||||
_ExcExitWithGdbStub:
|
||||
|
||||
_EXIT_EXC:
|
||||
if (_kernel.ready_q.cache != _kernel.cpus->current) {
|
||||
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
|
||||
}
|
||||
#endif /* CONFIG_PREEMPT_ENABLED */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
push {r0, lr}
|
||||
bl z_check_stack_sentinel
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r1}
|
||||
mov lr, r1
|
||||
#else
|
||||
pop {r0, lr}
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
z_check_stack_sentinel();
|
||||
#endif /* CONFIG_STACK_SENTINEL */
|
||||
|
||||
bx lr
|
||||
}
|
|
@ -1,152 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief ARM Cortex-M wrapper for ISRs with parameter
|
||||
*
|
||||
* Wrapper installed in vector table for handling dynamic interrupts that accept
|
||||
* a parameter.
|
||||
*/
|
||||
/*
|
||||
* Tell armclang that stack alignment are ensured.
|
||||
*/
|
||||
.eabi_attribute Tag_ABI_align_preserved, 1
|
||||
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/linker/sections.h>
|
||||
#include <offsets_short.h>
|
||||
#include <zephyr/arch/cpu.h>
|
||||
#include <zephyr/sw_isr_table.h>
|
||||
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GDATA(_sw_isr_table)
|
||||
|
||||
GTEXT(_isr_wrapper)
|
||||
GTEXT(z_arm_int_exit)
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Wrapper around ISRs when inserted in software ISR table
|
||||
*
|
||||
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
|
||||
* using the running interrupt number as the index, and invokes the registered
|
||||
* ISR with its corresponding argument. When returning from the ISR, it
|
||||
* determines if a context switch needs to happen (see documentation for
|
||||
* z_arm_pendsv()) and pends the PendSV exception if so: the latter will
|
||||
* perform the context switch itself.
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _isr_wrapper)
|
||||
|
||||
push {r0,lr} /* r0, lr are now the first items on the stack */
|
||||
|
||||
#ifdef CONFIG_TRACING_ISR
|
||||
bl sys_trace_isr_enter
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/*
|
||||
* All interrupts are disabled when handling idle wakeup. For tickless
|
||||
* idle, this ensures that the calculation and programming of the
|
||||
* device for the next timer deadline is not interrupted. For
|
||||
* non-tickless idle, this ensures that the clearing of the kernel idle
|
||||
* state is not interrupted. In each case, z_pm_save_idle_exit
|
||||
* is called with interrupts disabled.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Disable interrupts to prevent nesting while exiting idle state. This
|
||||
* is only necessary for the Cortex-M because it is the only ARM
|
||||
* architecture variant that automatically enables interrupts when
|
||||
* entering an ISR.
|
||||
*/
|
||||
cpsid i /* PRIMASK = 1 */
|
||||
|
||||
/* is this a wakeup from idle ? */
|
||||
ldr r2, =_kernel
|
||||
/* requested idle duration, in ticks */
|
||||
ldr r0, [r2, #_kernel_offset_to_idle]
|
||||
cmp r0, #0
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
beq _idle_state_cleared
|
||||
movs.n r1, #0
|
||||
/* clear kernel idle state */
|
||||
str r1, [r2, #_kernel_offset_to_idle]
|
||||
bl z_pm_save_idle_exit
|
||||
_idle_state_cleared:
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ittt ne
|
||||
movne r1, #0
|
||||
/* clear kernel idle state */
|
||||
strne r1, [r2, #_kernel_offset_to_idle]
|
||||
blne z_pm_save_idle_exit
|
||||
#else
|
||||
#error Unknown ARM architecture
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
|
||||
cpsie i /* re-enable interrupts (PRIMASK = 0) */
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
bl z_soc_irq_get_active
|
||||
#else
|
||||
mrs r0, IPSR /* get exception number */
|
||||
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
ldr r1, =16
|
||||
subs r0, r1 /* get IRQ number */
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
push {r0}
|
||||
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
|
||||
lsls r0, #3 /* table is 8-byte wide */
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
sub r0, r0, #16 /* get IRQ number */
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
push {r0}
|
||||
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
|
||||
lsl r0, r0, #3 /* table is 8-byte wide */
|
||||
#else
|
||||
#error Unknown ARM architecture
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
|
||||
ldr r1, =_sw_isr_table
|
||||
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
|
||||
* in thumb mode */
|
||||
|
||||
ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */
|
||||
blx r3 /* call ISR */
|
||||
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
pop {r0}
|
||||
bl z_soc_irq_eoi
|
||||
#endif /* CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
|
||||
|
||||
#ifdef CONFIG_TRACING_ISR
|
||||
bl sys_trace_isr_exit
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r3}
|
||||
mov lr, r3
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
pop {r0, lr}
|
||||
#else
|
||||
#error Unknown ARM architecture
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
|
||||
/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
|
||||
* 'bx' instead of 'blx' because exception return is done in
|
||||
* z_arm_int_exit() */
|
||||
ldr r1, =z_arm_int_exit
|
||||
bx r1
|
88
arch/arm/core/cortex_m/isr_wrapper.c
Normal file
88
arch/arm/core/cortex_m/isr_wrapper.c
Normal file
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
|
||||
* Copyright (c) 2023 Arm Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief ARM Cortex-M wrapper for ISRs with parameter
|
||||
*
|
||||
* Wrapper installed in vector table for handling dynamic interrupts that accept
|
||||
* a parameter.
|
||||
*/
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/irq.h>
|
||||
#include <zephyr/pm/pm.h>
|
||||
#include <cmsis_core.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Wrapper around ISRs when inserted in software ISR table
|
||||
*
|
||||
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
|
||||
* using the running interrupt number as the index, and invokes the registered
|
||||
* ISR with its corresponding argument. When returning from the ISR, it
|
||||
* determines if a context switch needs to happen (see documentation for
|
||||
* z_arm_pendsv()) and pends the PendSV exception if so: the latter will
|
||||
* perform the context switch itself.
|
||||
*
|
||||
*/
|
||||
void _isr_wrapper(void)
|
||||
{
|
||||
#ifdef CONFIG_TRACING_ISR
|
||||
sys_trace_isr_enter();
|
||||
#endif /* CONFIG_TRACING_ISR */
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/*
|
||||
* All interrupts are disabled when handling idle wakeup. For tickless
|
||||
* idle, this ensures that the calculation and programming of the
|
||||
* device for the next timer deadline is not interrupted. For
|
||||
* non-tickless idle, this ensures that the clearing of the kernel idle
|
||||
* state is not interrupted. In each case, z_pm_save_idle_exit
|
||||
* is called with interrupts disabled.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Disable interrupts to prevent nesting while exiting idle state. This
|
||||
* is only necessary for the Cortex-M because it is the only ARM
|
||||
* architecture variant that automatically enables interrupts when
|
||||
* entering an ISR.
|
||||
*/
|
||||
__disable_irq();
|
||||
|
||||
/* is this a wakeup from idle ? */
|
||||
/* requested idle duration, in ticks */
|
||||
if (_kernel.idle != 0) {
|
||||
/* clear kernel idle state */
|
||||
_kernel.idle = 0;
|
||||
z_pm_save_idle_exit();
|
||||
}
|
||||
/* re-enable interrupts */
|
||||
__enable_irq();
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
int32_t irq_number = z_soc_irq_get_active();
|
||||
#else
|
||||
/* _sw_isr_table does not map the expections, only the interrupts. */
|
||||
int32_t irq_number = __get_IPSR();
|
||||
#endif
|
||||
irq_number -= 16;
|
||||
|
||||
struct _isr_table_entry *entry = &_sw_isr_table[irq_number];
|
||||
(entry->isr)(entry->arg);
|
||||
|
||||
#if defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
|
||||
z_soc_irq_eoi(irq_number);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING_ISR
|
||||
sys_trace_isr_exit();
|
||||
#endif /* CONFIG_TRACING_ISR */
|
||||
|
||||
z_arm_exc_exit();
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue