arch: arm: aarch32: Rework non-Cortex-M context preservation

The current context preservation implementation saves the spsr and
lr_irq registers, which contain the cpsr and pc register values of the
interrupted context, in the thread callee-saved block and this prevents
nesting of interrupts because these values are required to be part of
the exception stack frame to preserve the nested interrupt context.

This commit reworks the AArch32 non-Cortex-M context preservation
implementation to save the spsr and lr_irq registers in the exception
stack frame to allow preservation of the nested interrupt context as
well as the interrupted thread context.

Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
This commit is contained in:
Stephanos Ioannidis 2020-03-20 22:16:18 +09:00 committed by Ioannis Glaropoulos
commit b63a028fbc
7 changed files with 108 additions and 70 deletions

View file

@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -73,11 +74,17 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
#ifdef CONFIG_PREEMPT_ENABLED
ldr r0, =_kernel
ldr r3, =_kernel
ldr r1, [r0, #_kernel_offset_to_current]
#ifndef CONFIG_CPU_CORTEX_M
/* Do not context switch if exiting a nested interrupt */
ldr r0, [r3, #_kernel_offset_to_nested]
cmp r0, #1
bhi _EXIT_EXC
#endif /* !CONFIG_CPU_CORTEX_M */
ldr r0, [r0, #_kernel_offset_to_ready_q_cache]
ldr r1, [r3, #_kernel_offset_to_current]
ldr r0, [r3, #_kernel_offset_to_ready_q_cache]
cmp r0, r1
beq _EXIT_EXC
@ -113,23 +120,28 @@ _EXIT_EXC:
#if defined(CONFIG_CPU_CORTEX_M)
bx lr
#elif defined(CONFIG_CPU_CORTEX_R)
/* Disable nested interrupts while exiting */
cpsid i
/* Decrement interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
sub r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
/* Restore previous stack pointer */
pop {r2, r3}
add sp, sp, r3
/*
* Restore r0-r3, r12 and lr stored into the process stack by the mode
* Restore r0-r3, r12 and lr_irq stored into the process stack by the mode
* entry function. These registers are saved by _isr_wrapper for IRQ mode
* and z_arm_svc for SVC mode.
*
* r0-r3 are either the values from the thread before it was switched out
* or they are the args to _new_thread for a new thread.
*/
push {r4-r6}
mrs r6, cpsr
cps #MODE_SYS
ldmia sp!, {r0-r5}
msr cpsr_c, r6
mov r12, r4
mov lr, r5
pop {r4-r6}
movs pc, lr
pop {r0-r3, r12, lr}
rfeia sp!
#endif

View file

@ -1,5 +1,6 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -45,20 +46,39 @@ SECTION_FUNC(TEXT, _isr_wrapper)
push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
/*
* Save away r0-r3 from previous context to the process stack since
* they are clobbered here. Also, save away lr since we may swap
* processes and return to a different thread.
* Save away r0-r3, r12 and lr_irq for the previous context to the
* process stack since they are clobbered here. Also, save away lr
* and spsr_irq since we may swap processes and return to a different
* thread.
*/
push {r4, r5}
mov r4, r12
sub r5, lr, #4
sub lr, lr, #4
srsdb #MODE_SYS!
cps #MODE_SYS
stmdb sp!, {r0-r5}
cps #MODE_IRQ
push {r0-r3, r12, lr}
pop {r4, r5}
#endif
/*
* Use SVC mode stack for predictable interrupt behaviour; running ISRs
* in the SYS/USR mode stack (i.e. interrupted thread stack) leaves the
* ISR stack usage at the mercy of the interrupted thread and this can
* be prone to stack overflows if any of the ISRs and/or preemptible
* threads have high stack usage.
*
* When userspace is enabled, this also prevents leaking privileged
* information to the user mode.
*/
cps #MODE_SVC
/* Align stack at double-word boundary */
and r3, sp, #4
sub sp, sp, r3
push {r2, r3}
/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
#endif /* CONFIG_CPU_CORTEX_M */
#ifdef CONFIG_EXECUTION_BENCHMARKING
bl read_timer_start_of_isr
@ -78,12 +98,13 @@ SECTION_FUNC(TEXT, _isr_wrapper)
* is called with interrupts disabled.
*/
/*
* FIXME: Remove the Cortex-M conditional compilation checks for `cpsid i`
* and `cpsie i` after the Cortex-R port is updated to support
* interrupt nesting. For more details, refer to the issue #21758.
*/
#if defined(CONFIG_CPU_CORTEX_M)
/*
* Disable interrupts to prevent nesting while exiting idle state. This
* is only necessary for the Cortex-M because it is the only ARM
* architecture variant that automatically enables interrupts when
* entering an ISR.
*/
cpsid i /* PRIMASK = 1 */
#endif
@ -146,6 +167,21 @@ _idle_state_cleared:
#else
#error Unknown ARM architecture
#endif /* CONFIG_CPU_CORTEX_M */
#if !defined(CONFIG_CPU_CORTEX_M)
/*
* Enable interrupts to allow nesting.
*
* Note that interrupts are disabled up to this point on the ARM
* architecture variants other than the Cortex-M. It is also important
* to note that that most interrupt controllers require that the nested
* interrupts are handled after the active interrupt is acknowledged;
* this is be done through the `get_active` interrupt controller
* interface function.
*/
cpsie i
#endif /* !CONFIG_CPU_CORTEX_M */
ldr r1, =_sw_isr_table
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */
@ -186,7 +222,7 @@ _idle_state_cleared:
pop {r0, lr}
#elif defined(CONFIG_ARMV7_R)
/*
* r0,lr were saved on the process stack since a swap could
* r0 and lr_irq were saved on the process stack since a swap could
* happen. exc_exit will handle getting those values back
* from the process stack to return to the correct location
* so there is no need to do anything here.

View file

@ -1,6 +1,7 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
* Copyright (c) 2017-2019 Nordic Semiconductor ASA.
* Copyright (c) 2020 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -110,8 +111,9 @@ out_fp_endif:
#endif /* CONFIG_FP_SHARING */
#elif defined(CONFIG_ARMV7_R)
/* Store rest of process context */
mrs r12, SPSR
stm r0, {r4-r12,sp,lr}^
cps #MODE_SYS
stm r0, {r4-r11, sp}
cps #MODE_SVC
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -315,9 +317,10 @@ _thread_irq_disabled:
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
/* restore r4-r12 for incoming thread, plus system sp and lr */
ldm r0, {r4-r12,sp,lr}^
msr SPSR_fsxc, r12
/* restore r4-r11 and sp for incoming thread */
cps #MODE_SYS
ldm r0, {r4-r11, sp}
cps #MODE_SVC
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -585,15 +588,21 @@ SECTION_FUNC(TEXT, z_arm_svc)
* Save r12 and the lr as we could be swapping in another process and
* returning to a different location.
*/
push {r4, r5}
mov r4, r12
mov r5, lr
srsdb #MODE_SYS!
cps #MODE_SYS
stmdb sp!, {r0-r5}
push {r0-r3, r12, lr}
cps #MODE_SVC
pop {r4, r5}
/* Align stack at double-word boundary */
and r3, sp, #4
sub sp, sp, r3
push {r2, r3}
/* Increment interrupt nesting count */
ldr r2, =_kernel
ldr r0, [r2, #_kernel_offset_to_nested]
add r0, r0, #1
str r0, [r2, #_kernel_offset_to_nested]
/* Get SVC number */
mrs r0, spsr

View file

@ -125,19 +125,19 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx->basic.a2 = (u32_t)parameter1;
pInitCtx->basic.a3 = (u32_t)parameter2;
pInitCtx->basic.a4 = (u32_t)parameter3;
#if defined(CONFIG_CPU_CORTEX_M)
pInitCtx->basic.xpsr =
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
#else
pInitCtx->basic.xpsr = A_BIT | MODE_SYS;
#if defined(CONFIG_COMPILER_ISA_THUMB2)
pInitCtx->basic.xpsr |= T_BIT;
#endif /* CONFIG_COMPILER_ISA_THUMB2 */
#endif /* CONFIG_CPU_CORTEX_M */
thread->callee_saved.psp = (u32_t)pInitCtx;
#if defined(CONFIG_CPU_CORTEX_R)
pInitCtx->basic.lr = (u32_t)pInitCtx->basic.pc;
thread->callee_saved.spsr = A_BIT | MODE_SYS;
#if defined(CONFIG_COMPILER_ISA_THUMB2)
thread->callee_saved.spsr |= T_BIT;
#endif
thread->callee_saved.lr = (u32_t)pInitCtx->basic.pc;
#endif /* CONFIG_CPU_CORTEX_R */
thread->arch.basepri = 0;
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)

View file

@ -65,10 +65,6 @@ GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_callee_saved_t, spsr);
GEN_OFFSET_SYM(_callee_saved_t, lr);
#endif
/* size of the entire preempt registers structure */

View file

@ -35,16 +35,7 @@ extern volatile irq_offload_routine_t offload_routine;
/* Check the CPSR mode bits to see if we are in IRQ or FIQ mode */
static ALWAYS_INLINE bool arch_is_in_isr(void)
{
unsigned int status;
__asm__ volatile(
" mrs %0, cpsr"
: "=r" (status) : : "memory", "cc");
status &= MODE_MASK;
return (status == MODE_FIQ) ||
(status == MODE_IRQ) ||
(status == MODE_SVC);
return (_kernel.nested != 0);
}
/**

View file

@ -31,13 +31,7 @@ struct _callee_saved {
u32_t v6; /* r9 */
u32_t v7; /* r10 */
u32_t v8; /* r11 */
#if defined(CONFIG_CPU_CORTEX_R)
u32_t spsr;/* r12 */
u32_t psp; /* r13 */
u32_t lr; /* r14 */
#else
u32_t psp; /* r13 */
#endif
};
typedef struct _callee_saved _callee_saved_t;