From a2cfb8431da6084ea34fdf0e264f4743a0c98cf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=98yvind=20R=C3=B8nningstad?= Date: Wed, 24 Jun 2020 14:31:33 +0200 Subject: [PATCH] arch: arm: Add code for swapping threads between secure and non-secure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds code to swap_helper.S which does special handling of LR when the interrupt came from secure. The LR value is stored to memory, and put back into LR when swapping back to the relevant thread. Also, add special handling of FP state when switching from secure to non-secure, since we don't know whether the original non-secure thread (which called a secure service) was using FP registers, so we always store them, just in case. Signed-off-by: Øyvind Rønningstad --- arch/arm/core/aarch32/Kconfig | 8 ++++ arch/arm/core/aarch32/swap_helper.S | 30 +++++++-------- arch/arm/core/aarch32/thread.c | 21 +++++++++- arch/arm/core/offsets/offsets_aarch32.c | 5 ++- arch/arm/include/aarch32/offsets_short_arch.h | 9 ++++- include/arch/arm/aarch32/thread.h | 38 ++++++++++++++++--- .../arm/arm_thread_swap/src/arm_thread_arch.c | 25 +++++++----- 7 files changed, 100 insertions(+), 36 deletions(-) diff --git a/arch/arm/core/aarch32/Kconfig b/arch/arm/core/aarch32/Kconfig index 272364fef60..cd88caf9e0b 100644 --- a/arch/arm/core/aarch32/Kconfig +++ b/arch/arm/core/aarch32/Kconfig @@ -225,6 +225,14 @@ config ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS threads many not be context-switched-out while doing a Secure function call. +config ARM_STORE_EXC_RETURN + bool + default y if FPU_SHARING || ARM_NONSECURE_PREEMPTIBLE_SECURE_CALLS + help + Store the EXC_RETURN value when switching threads. + This is needed when switching between threads that differ in either + FPU usage or security domain. + choice prompt "Floating point ABI" default FP_HARDABI diff --git a/arch/arm/core/aarch32/swap_helper.S b/arch/arm/core/aarch32/swap_helper.S index eb0684c0f58..a3cc7a8dd60 100644 --- a/arch/arm/core/aarch32/swap_helper.S +++ b/arch/arm/core/aarch32/swap_helper.S @@ -71,6 +71,11 @@ SECTION_FUNC(TEXT, z_arm_pendsv) ldr r1, =_kernel ldr r2, [r1, #_kernel_offset_to_current] +#if defined(CONFIG_ARM_STORE_EXC_RETURN) + /* Store LSB of LR (EXC_RETURN) to the thread's 'mode' word. */ + strb lr, [r2, #_thread_offset_to_mode_exc_return] +#endif + /* addr of callee-saved regs in thread in r0 */ ldr r0, =_thread_offset_to_callee_saved add r0, r2 @@ -95,15 +100,9 @@ SECTION_FUNC(TEXT, z_arm_pendsv) stmia r0, {v1-v8, ip} #ifdef CONFIG_FPU_SHARING /* Assess whether switched-out thread had been using the FP registers. */ - ldr r0, =0x10 /* EXC_RETURN.F_Type Mask */ - tst lr, r0 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */ - beq out_fp_active - /* FP context inactive: clear FP state */ - ldr r0, [r2, #_thread_offset_to_mode] - bic r0, #0x4 /* _current->arch.mode &= ~(CONTROL_FPCA_Msk) */ - b out_fp_endif + tst lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */ + bne out_fp_endif -out_fp_active: /* FP context active: set FP state and store callee-saved registers. * Note: if Lazy FP stacking is enabled, storing the callee-saved * registers will automatically trigger FP state preservation in @@ -111,11 +110,8 @@ out_fp_active: */ add r0, r2, #_thread_offset_to_preempt_float vstmia r0, {s16-s31} - ldr r0, [r2, #_thread_offset_to_mode] - orrs r0, r0, #0x4 /* _current->arch.mode |= CONTROL_FPCA_Msk */ out_fp_endif: - str r0, [r2, #_thread_offset_to_mode] /* At this point FPCCR.LSPACT is guaranteed to be cleared, * regardless of whether the thread has an active FP context. */ @@ -201,6 +197,11 @@ out_fp_endif: str r0, [r4] #endif +#endif + +#if defined(CONFIG_ARM_STORE_EXC_RETURN) + /* Restore EXC_RETURN value. */ + ldrsb lr, [r2, #_thread_offset_to_mode_exc_return] #endif /* Restore previous interrupt disable state (irq_lock key) @@ -286,9 +287,8 @@ _thread_irq_disabled: #ifdef CONFIG_FPU_SHARING /* Assess whether switched-in thread had been using the FP registers. */ - ldr r0, [r2, #_thread_offset_to_mode] - tst r0, #0x04 /* thread.arch.mode & CONTROL.FPCA Msk */ - bne in_fp_active + tst lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */ + beq in_fp_active /* FP context inactive for swapped-in thread: * - reset FPSCR to 0 * - set EXC_RETURN.F_Type (prevents FP frame un-stacking when returning @@ -296,7 +296,6 @@ _thread_irq_disabled: */ movs.n r3, #0 vmsr fpscr, r3 - orrs lr, lr, #0x10 /* EXC_RETURN & EXC_RETURN.F_Type_Msk */ b in_fp_endif in_fp_active: @@ -305,7 +304,6 @@ in_fp_active: * - FPSCR and caller-saved registers will be restored automatically * - restore callee-saved FP registers */ - bic lr, #0x10 /* EXC_RETURN | (~EXC_RETURN.F_Type_Msk) */ add r0, r2, #_thread_offset_to_preempt_float vldmia r0, {s16-s31} in_fp_endif: diff --git a/arch/arm/core/aarch32/thread.c b/arch/arm/core/aarch32/thread.c index 6e864dfc146..f6d8bc04d87 100644 --- a/arch/arm/core/aarch32/thread.c +++ b/arch/arm/core/aarch32/thread.c @@ -23,6 +23,20 @@ #define FP_GUARD_EXTRA_SIZE 0 #endif +#ifndef EXC_RETURN_FTYPE +/* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_FTYPE (0x00000010UL) +#endif + +/* Default last octet of EXC_RETURN, for threads that have not run yet. + * The full EXC_RETURN value will be e.g. 0xFFFFFFBC. + */ +#if defined(CONFIG_ARM_NONSECURE_FIRMWARE) +#define DEFAULT_EXC_RETURN 0xBC; +#else +#define DEFAULT_EXC_RETURN 0xFD; +#endif + #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M) extern K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE); #endif @@ -101,8 +115,11 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, thread->callee_saved.psp = (uint32_t)iframe; thread->arch.basepri = 0; -#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) +#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE) thread->arch.mode = 0; +#if defined(CONFIG_ARM_STORE_EXC_RETURN) + thread->arch.mode_exc_return = DEFAULT_EXC_RETURN; +#endif #if FP_GUARD_EXTRA_SIZE > 0 if ((thread->base.user_options & K_FP_REGS) != 0) { thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk; @@ -177,7 +194,7 @@ static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread, uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread) { if (((thread->base.user_options & K_FP_REGS) != 0) || - ((thread->arch.mode & CONTROL_FPCA_Msk) != 0)) { + ((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) { /* The thread has been pre-tagged (at creation or later) with * K_FP_REGS, i.e. it is expected to be using the FPU registers * (if not already). Activate lazy stacking and program a large diff --git a/arch/arm/core/offsets/offsets_aarch32.c b/arch/arm/core/offsets/offsets_aarch32.c index 3a0cbb69353..ff5d61fbcc8 100644 --- a/arch/arm/core/offsets/offsets_aarch32.c +++ b/arch/arm/core/offsets/offsets_aarch32.c @@ -34,10 +34,13 @@ GEN_OFFSET_SYM(_thread_arch_t, swap_return_value); #if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) GEN_OFFSET_SYM(_thread_arch_t, mode); +#endif +#if defined(CONFIG_ARM_STORE_EXC_RETURN) +GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return); +#endif #if defined(CONFIG_USERSPACE) GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start); #endif -#endif #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) GEN_OFFSET_SYM(_thread_arch_t, preempt_float); diff --git a/arch/arm/include/aarch32/offsets_short_arch.h b/arch/arm/include/aarch32/offsets_short_arch.h index cf473c659c9..f5b3b619fbe 100644 --- a/arch/arm/include/aarch32/offsets_short_arch.h +++ b/arch/arm/include/aarch32/offsets_short_arch.h @@ -29,17 +29,24 @@ #if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) #define _thread_offset_to_mode \ (___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET) +#endif + +#if defined(CONFIG_ARM_STORE_EXC_RETURN) +#define _thread_offset_to_mode_exc_return \ + (___thread_t_arch_OFFSET + ___thread_arch_t_mode_exc_return_OFFSET) +#endif #ifdef CONFIG_USERSPACE #define _thread_offset_to_priv_stack_start \ (___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET) #endif -#endif #if defined(CONFIG_THREAD_STACK_INFO) #define _thread_offset_to_stack_info_start \ (___thread_stack_info_t_start_OFFSET + ___thread_t_stack_info_OFFSET) #endif + + /* end - threads */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH32_OFFSETS_SHORT_ARCH_H_ */ diff --git a/include/arch/arm/aarch32/thread.h b/include/arch/arm/aarch32/thread.h index 6f808102bae..4a4938c7e03 100644 --- a/include/arch/arm/aarch32/thread.h +++ b/include/arch/arm/aarch32/thread.h @@ -74,25 +74,51 @@ struct _thread_arch { struct _preempt_float preempt_float; #endif -#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING) +#if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE) /* * Status variable holding several thread status flags * as follows: * - * +--------------bit-3----------bit-2--------bit-1---+----bit-0------+ + * byte 0 + * +-bits 4-7-----bit-3----------bit-2--------bit-1---+----bit-0------+ * : | | | | | - * : reserved || | reserved | | - * : bits | | CONTROL.FPCA | | CONTROL.nPRIV | + * : reserved || reserved | reserved | | + * : bits | | | | CONTROL.nPRIV | + * +------------------------------------------------------------------+ + * + * byte 1 + * +----------------------------bits 8-15-----------------------------+ + * : Least significant byte of EXC_RETURN | + * : bit 15| bit 14| bit 13 | bit 12| bit 11 | bit 10 | bit 9 | bit 8 | + * : Res | S | DCRS | FType | Mode | SPSel | Res | ES | * +------------------------------------------------------------------+ * * Bit 0: thread's current privileged mode (Supervisor or User mode) * Mirrors CONTROL.nPRIV flag. - * Bit 2: indicating whether the thread has an active FP context. + * Bit 2: Deprecated in favor of FType. Note: FType = !CONTROL.FPCA. + * indicating whether the thread has an active FP context. * Mirrors CONTROL.FPCA flag. * Bit 3: indicating whether the thread is applying the long (FLOAT) * or the default MPU stack guard size. + * + * Bits 8-15: Least significant octet of the EXC_RETURN value when a + * thread is switched-out. The value is copied from LR when + * entering the PendSV handler. When the thread is + * switched in again, the value is restored to LR before + * exiting the PendSV handler. */ - uint32_t mode; + union { + uint32_t mode; + +#if defined(CONFIG_ARM_STORE_EXC_RETURN) + __packed struct { + uint8_t mode_bits; + uint8_t mode_exc_return; + uint16_t mode_reserved2; + }; +#endif + }; + #if defined(CONFIG_USERSPACE) uint32_t priv_stack_start; #endif diff --git a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c index b7a1c4dee09..58eecac8000 100644 --- a/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c +++ b/tests/arch/arm/arm_thread_swap/src/arm_thread_arch.c @@ -25,6 +25,11 @@ #define BASEPRI_MODIFIED_2 0x40 #define SWAP_RETVAL 0x1234 +#ifndef EXC_RETURN_FTYPE +/* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_FTYPE (0x00000010UL) +#endif + extern void z_move_thread_to_end_of_prio_q(struct k_thread *thread); static struct k_thread alt_thread; @@ -254,9 +259,9 @@ static void alt_thread_entry(void) __get_CONTROL()); /* Verify that the _current_ (alt) thread is - * initialized with mode.FPCA cleared + * initialized with EXC_RETURN.Ftype set */ - zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0, + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, "Alt thread FPCA flag not clear at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) /* Alt thread is created with K_FP_REGS set, so we @@ -276,8 +281,8 @@ static void alt_thread_entry(void) zassert_true(__get_FPSCR() == 0, "(Alt thread) FPSCR is not cleared at initialization: 0x%x\n", __get_FPSCR()); - zassert_true((p_ztest_thread->arch.mode & CONTROL_FPCA_Msk) != 0, - "ztest thread mode FPCA flag not updated at swap-out: 0x%0x\n", + zassert_true((p_ztest_thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, + "ztest thread mode Ftype flag not updated at swap-out: 0x%0x\n", p_ztest_thread->arch.mode); /* Verify that the main test thread (ztest) has stored the FP @@ -447,8 +452,8 @@ void test_arm_thread_swap(void) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) /* The main test thread is not (yet) actively using the FP registers */ - zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0, - "Thread FPCA flag not clear at initialization 0x%0x\n", + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + "Thread Ftype flag not set at initialization 0x%0x\n", _current->arch.mode); /* Verify that the main test thread is initialized with FPCA cleared. */ @@ -476,8 +481,8 @@ void test_arm_thread_swap(void) /* The main test thread is using the FP registers, but the .mode * flag is not updated until the next context switch. */ - zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) == 0, - "Thread FPCA flag not clear at initialization\n"); + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0, + "Thread Ftype flag not set at initialization\n"); #if defined(CONFIG_MPU_STACK_GUARD) zassert_true((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0, @@ -700,8 +705,8 @@ void test_arm_thread_swap(void) /* The main test thread is using the FP registers, and the .mode * flag and MPU GUARD flag are now updated. */ - zassert_true((_current->arch.mode & CONTROL_FPCA_Msk) != 0, - "Thread FPCA flag not set after main returned back\n"); + zassert_true((_current->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0, + "Thread Ftype flag not cleared after main returned back\n"); #if defined(CONFIG_MPU_STACK_GUARD) zassert_true((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,