arch: arm: Add code for swapping threads between secure and non-secure

This adds code to swap_helper.S which does special handling of LR when
the interrupt came from secure. The LR value is stored to memory, and
put back into LR when swapping back to the relevant thread.

Also, add special handling of FP state when switching from secure to
non-secure, since we don't know whether the original non-secure thread
(which called a secure service) was using FP registers, so we always
store them, just in case.

Signed-off-by: Øyvind Rønningstad <oyvind.ronningstad@nordicsemi.no>
This commit is contained in:
Øyvind Rønningstad 2020-06-24 14:31:33 +02:00 committed by Ioannis Glaropoulos
commit a2cfb8431d
7 changed files with 100 additions and 36 deletions

View file

@ -225,6 +225,14 @@ config ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS
threads many not be context-switched-out while doing a Secure
function call.
config ARM_STORE_EXC_RETURN
bool
default y if FPU_SHARING || ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS
help
Store the EXC_RETURN value when switching threads.
This is needed when switching between threads that differ in either
FPU usage or security domain.
choice
prompt "Floating point ABI"
default FP_HARDABI

View file

@ -71,6 +71,11 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
/* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */
strb lr, [r2, #_thread_offset_to_mode_exc_return]
#endif
/* addr of callee-saved regs in thread in r0 */
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
@ -95,15 +100,9 @@ SECTION_FUNC(TEXT, z_arm_pendsv)
stmia r0, {v1-v8, ip}
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-out thread had been using the FP registers. */
ldr r0, =0x10 /* EXC_RETURN.F_Type Mask */
tst lr, r0 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */
beq out_fp_active
/* FP context inactive: clear FP state */
ldr r0, [r2, #_thread_offset_to_mode]
bic r0, #0x4 /* _current->arch.mode &= ~(CONTROL_FPCA_Msk) */
b out_fp_endif
tst lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */
bne out_fp_endif
out_fp_active:
/* FP context active: set FP state and store callee-saved registers.
* Note: if Lazy FP stacking is enabled, storing the callee-saved
* registers will automatically trigger FP state preservation in
@ -111,11 +110,8 @@ out_fp_active:
*/
add r0, r2, #_thread_offset_to_preempt_float
vstmia r0, {s16-s31}
ldr r0, [r2, #_thread_offset_to_mode]
orrs r0, r0, #0x4 /* _current->arch.mode |= CONTROL_FPCA_Msk */
out_fp_endif:
str r0, [r2, #_thread_offset_to_mode]
/* At this point FPCCR.LSPACT is guaranteed to be cleared,
* regardless of whether the thread has an active FP context.
*/
@ -201,6 +197,11 @@ out_fp_endif:
str r0, [r4]
#endif
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
/* Restore EXC_RETURN value. */
ldrsb lr, [r2, #_thread_offset_to_mode_exc_return]
#endif
/* Restore previous interrupt disable state (irq_lock key)
@ -286,9 +287,8 @@ _thread_irq_disabled:
#ifdef CONFIG_FPU_SHARING
/* Assess whether switched-in thread had been using the FP registers. */
ldr r0, [r2, #_thread_offset_to_mode]
tst r0, #0x04 /* thread.arch.mode & CONTROL.FPCA Msk */
bne in_fp_active
tst lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */
beq in_fp_active
/* FP context inactive for swapped-in thread:
* - reset FPSCR to 0
* - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning
@ -296,7 +296,6 @@ _thread_irq_disabled:
*/
movs.n r3, #0
vmsr fpscr, r3
orrs lr, lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */
b in_fp_endif
in_fp_active:
@ -305,7 +304,6 @@ in_fp_active:
* - FPSCR and caller-saved registers will be restored automatically
* - restore callee-saved FP registers
*/
bic lr, #0x10 /* EXC_RETURN | (~EXC_RETURN.F_Type_Msk) */
add r0, r2, #_thread_offset_to_preempt_float
vldmia r0, {s16-s31}
in_fp_endif:

View file

@ -23,6 +23,20 @@
#define FP_GUARD_EXTRA_SIZE 0
#endif
#ifndef EXC_RETURN_FTYPE
/* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_FTYPE (0x00000010UL)
#endif
/* Default last octet of EXC_RETURN, for threads that have not run yet.
* The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
*/
#if defined(CONFIG_ARM_NONSECURE_FIRMWARE)
#define DEFAULT_EXC_RETURN 0xBC;
#else
#define DEFAULT_EXC_RETURN 0xFD;
#endif
#if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M)
extern K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
#endif
@ -101,8 +115,11 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->callee_saved.psp = (uint32_t)iframe;
thread->arch.basepri = 0;
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
thread->arch.mode = 0;
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
#endif
#if FP_GUARD_EXTRA_SIZE > 0
if ((thread->base.user_options & K_FP_REGS) != 0) {
thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
@ -177,7 +194,7 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
{
if (((thread->base.user_options & K_FP_REGS) != 0) ||
((thread->arch.mode & CONTROL_FPCA_Msk) != 0)) {
((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) {
/* The thread has been pre-tagged (at creation or later) with
* K_FP_REGS, i.e. it is expected to be using the FPU registers
* (if not already). Activate lazy stacking and program a large

View file

@ -34,10 +34,13 @@ GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, mode);
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
#endif
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, preempt_float);

View file

@ -29,17 +29,24 @@
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
#define _thread_offset_to_mode \
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET)
#endif
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
#define _thread_offset_to_mode_exc_return \
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_exc_return_OFFSET)
#endif
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#endif
#endif
#if defined(CONFIG_THREAD_STACK_INFO)
#define _thread_offset_to_stack_info_start \
(___thread_stack_info_t_start_OFFSET + ___thread_t_stack_info_OFFSET)
#endif
/* end - threads */
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_OFFSETS_SHORT_ARCH_H_ */

View file

@ -74,25 +74,51 @@ struct _thread_arch {
struct _preempt_float preempt_float;
#endif
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
/*
* Status variable holding several thread status flags
* as follows:
*
* +--------------bit-3----------bit-2--------bit-1---+----bit-0------+
* byte 0
* +-bits 4-7-----bit-3----------bit-2--------bit-1---+----bit-0------+
* : | | | | |
* : reserved |<Guard FLOAT>| <FP context> | reserved | <priv mode> |
* : bits | | CONTROL.FPCA | | CONTROL.nPRIV |
* : reserved |<Guard FLOAT>| reserved | reserved | <priv mode> |
* : bits | | | | CONTROL.nPRIV |
* +------------------------------------------------------------------+
*
* byte 1
* +----------------------------bits 8-15-----------------------------+
* : Least significant byte of EXC_RETURN |
* : bit 15| bit 14| bit 13 | bit 12| bit 11 | bit 10 | bit 9 | bit 8 |
* : Res | S | DCRS | FType | Mode | SPSel | Res | ES |
* +------------------------------------------------------------------+
*
* Bit 0: thread's current privileged mode (Supervisor or User mode)
* Mirrors CONTROL.nPRIV flag.
* Bit 2: indicating whether the thread has an active FP context.
* Bit 2: Deprecated in favor of FType. Note: FType = !CONTROL.FPCA.
* indicating whether the thread has an active FP context.
* Mirrors CONTROL.FPCA flag.
* Bit 3: indicating whether the thread is applying the long (FLOAT)
* or the default MPU stack guard size.
*
* Bits 8-15: Least significant octet of the EXC_RETURN value when a
* thread is switched-out. The value is copied from LR when
* entering the PendSV handler. When the thread is
* switched in again, the value is restored to LR before
* exiting the PendSV handler.
*/
uint32_t mode;
union {
uint32_t mode;
#if defined(CONFIG_ARM_STORE_EXC_RETURN)
__packed struct {
uint8_t mode_bits;
uint8_t mode_exc_return;
uint16_t mode_reserved2;
};
#endif
};
#if defined(CONFIG_USERSPACE)
uint32_t priv_stack_start;
#endif

View file

@ -25,6 +25,11 @@
#define BASEPRI_MODIFIED_2 0x40
#define SWAP_RETVAL 0x1234
#ifndef EXC_RETURN_FTYPE
/* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_FTYPE (0x00000010UL)
#endif
extern void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
static struct k_thread alt_thread;
@ -254,9 +259,9 @@ static void alt_thread_entry(void)
__get_CONTROL());
/* Verify that the _current_ (alt) thread is
* initialized with mode.FPCA cleared
* initialized with EXC_RETURN.Ftype set
*/
zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0,
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Alt thread FPCA flag not clear at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
/* Alt thread is created with K_FP_REGS set, so we
@ -276,8 +281,8 @@ static void alt_thread_entry(void)
zassert_true(__get_FPSCR() == 0,
"(Alt thread) FPSCR is not cleared at initialization: 0x%x\n", __get_FPSCR());
zassert_true((p_ztest_thread->arch.mode & CONTROL_FPCA_Msk) != 0,
"ztest thread mode FPCA flag not updated at swap-out: 0x%0x\n",
zassert_true((p_ztest_thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
"ztest thread mode Ftype flag not updated at swap-out: 0x%0x\n",
p_ztest_thread->arch.mode);
/* Verify that the main test thread (ztest) has stored the FP
@ -447,8 +452,8 @@ void test_arm_thread_swap(void)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* The main test thread is not (yet) actively using the FP registers */
zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0,
"Thread FPCA flag not clear at initialization 0x%0x\n",
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization 0x%0x\n",
_current->arch.mode);
/* Verify that the main test thread is initialized with FPCA cleared. */
@ -476,8 +481,8 @@ void test_arm_thread_swap(void)
/* The main test thread is using the FP registers, but the .mode
* flag is not updated until the next context switch.
*/
zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0,
"Thread FPCA flag not clear at initialization\n");
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
"Thread Ftype flag not set at initialization\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((_current->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
@ -700,8 +705,8 @@ void test_arm_thread_swap(void)
/* The main test thread is using the FP registers, and the .mode
* flag and MPU GUARD flag are now updated.
*/
zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) != 0,
"Thread FPCA flag not set after main returned back\n");
zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
"Thread Ftype flag not cleared after main returned back\n");
#if defined(CONFIG_MPU_STACK_GUARD)
zassert_true((_current->arch.mode &
Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,