arch: arm: cortex_r: Do not use user stack in svc/isr modes

The user thread cannot be trusted so do not use the stack pointer it
passes in.  Use the thread's privilege stack when in privileged modes to
make sure a user thread does not trick the svc/isr handlers into writing
to memory it should not.

Signed-off-by: Bradley Bolen <bbolen@lexmark.com>
This commit is contained in:
Bradley Bolen 2021-05-23 17:23:26 -04:00 committed by Christopher Friedt
commit 65dcab81d0
8 changed files with 183 additions and 68 deletions

View file

@ -26,6 +26,42 @@ GTEXT(z_arm_int_exit)
GTEXT(z_arm_pendsv)
GDATA(_kernel)
.macro userspace_exc_exit
#if defined(CONFIG_USERSPACE)
cps #MODE_SVC
sub sp, #8
push {r0-r1}
/*
* Copy return state from sys/usr state onto the svc stack.
* We have to put $sp_usr back into $sp since we switched to
* the privileged stack on exception entry. The return state
* is on the privileged stack so it needs to be copied to the
* svc stack since we cannot trust the usr stack.
*/
cps #MODE_SYS
pop {r0-r1}
cps #MODE_SVC
str r0, [sp, #8]
str r1, [sp, #12]
/* Only switch the stacks if returning to a user thread */
and r1, #MODE_MASK
cmp r1, #MODE_USR
bne system_thread_exit\@
/* Restore user stack pointer */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
cps #MODE_SYS
ldr sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
cps #MODE_SVC
system_thread_exit\@:
pop {r0-r1}
#endif
.endm
/**
* @brief Kernel housekeeping when exiting interrupt handler installed directly
* in the vector table
@ -98,6 +134,7 @@ __EXIT_INT:
*/
cps #MODE_SYS
pop {r0-r3, r12, lr}
userspace_exc_exit
rfeia sp!
/**
@ -157,6 +194,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
/* Return to the switched thread */
cps #MODE_SYS
pop {r0-r3, r12, lr}
userspace_exc_exit
rfeia sp!
__EXIT_EXC:

View file

@ -45,6 +45,35 @@ SECTION_FUNC(TEXT, _isr_wrapper)
#if defined(CONFIG_CPU_CORTEX_M)
push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
#if defined(CONFIG_USERSPACE)
/* See comment below about svc stack usage */
cps #MODE_SVC
push {r0}
/* Determine if interrupted thread was in user context */
cps #MODE_IRQ
mrs r0, spsr
and r0, #MODE_MASK
cmp r0, #MODE_USR
bne isr_system_thread
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
/* Save away user stack pointer */
cps #MODE_SYS
str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
/* Switch to privileged stack */
ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */
isr_system_thread:
cps #MODE_SVC
pop {r0}
cps #MODE_IRQ
#endif
/*
* Save away r0-r3, r12 and lr_irq for the previous context to the
* process stack since they are clobbered here. Also, save away lr

View file

@ -615,6 +615,29 @@ valid_syscall_id:
* @return N/A
*/
SECTION_FUNC(TEXT, z_arm_svc)
#if defined(CONFIG_USERSPACE)
/* Determine if incoming thread was in user context */
push {r0}
mrs r0, spsr
and r0, #MODE_MASK
cmp r0, #MODE_USR
bne svc_system_thread
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
/* Save away user stack pointer */
cps #MODE_SYS
str sp, [r0, #_thread_offset_to_sp_usr] /* sp_usr */
/* Switch to privileged stack */
ldr sp, [r0, #_thread_offset_to_priv_stack_end] /* priv stack end */
cps #MODE_SVC
svc_system_thread:
pop {r0}
#endif
/*
* Switch to system mode to store r0-r3 to the process stack pointer.
* Save r12 and the lr as we could be swapping in another process and
@ -758,22 +781,9 @@ valid_syscall_id:
add sp,sp,r3 /* un-do stack pointer alignment to double-word boundary */
/*
* We need to store the spsr_svc onto the user stack before going off to
* the system call dispatcher. This is needed since the system call may
* call _swap() which will invoke another SVC, overwriting this register.
*/
mrs r0, spsr
/* Switch to system mode */
cps #MODE_SYS
/*
* save spsr_svc to the user stack to be restored after the system call
* completes
*/
push {r0}
/*
* Restore the nested level. The thread that is doing the system call may
* be put to sleep, as in the case of waiting in k_msgq_get() with
@ -788,27 +798,26 @@ valid_syscall_id:
/*
* restore r0-r3 from stack since we've used them above during demux
*/
ldr r0, [sp, #4]
ldr r1, [sp, #8]
ldr r2, [sp, #12]
ldr r3, [sp, #16]
ldr r0, [sp, #0]
ldr r1, [sp, #4]
ldr r2, [sp, #8]
ldr r3, [sp, #12]
/*
* grab return address from USER/SYSTEM stack frame
* (just past the SVC opcode)
*/
ldr r8, [sp, #24]
ldr r8, [sp, #20]
/*
* User stack left with:
*
* sp+0: spsr_svc
* sp+4: r0
* sp+8: r1
* sp+12: r2
* sp+16: r3
* sp+20: r12
* sp+24: LR_svc (address of opcode just following SVC opcode )
* sp: r0
* sp+4: r1
* sp+8: r2
* sp+12: r3
* sp+16: r12
* sp+20: LR_svc (address of opcode just following SVC opcode )
*/
/* branch to _arm_do_syscall. We will not return here. */

View file

@ -267,6 +267,11 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#endif /* CONFIG_MPU_STACK_GUARD */
#if defined(CONFIG_CPU_CORTEX_R)
_current->arch.priv_stack_end =
_current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
#endif
z_arm_userspace_enter(user_entry, p1, p2, p3,
(uint32_t)_current->stack_info.start,
_current->stack_info.size -

View file

@ -59,11 +59,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
add r0, r0, r1
/* Restore p1 from ip */
mov r1, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
#elif defined(CONFIG_CPU_CORTEX_R)
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r0, ip
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
str r0, [ip, #_thread_offset_to_priv_stack_end] /* priv stack end */
#endif
/* store current stack pointer to ip
@ -345,7 +352,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
pop {r0, r1}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
@ -356,12 +362,16 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
#elif defined(CONFIG_CPU_CORTEX_R)
/* Store current LR at the beginning of the priv stack */
push {lr}
#endif
/* switch to privileged stack */
#if defined(CONFIG_CPU_CORTEX_R)
mov sp, ip
#else
#if !defined(CONFIG_CPU_CORTEX_R)
/*
* switch to privileged stack
* The stack switch happens on exception entry for Cortex-R
*/
msr PSP, ip
#endif
@ -444,7 +454,11 @@ dispatch_syscall:
/* BAD SYSCALL path */
/* fixup stack frame on the privileged stack, adding ssf */
mov ip, sp
#if defined(CONFIG_CPU_CORTEX_R)
push {r4,r5,ip}
#else
push {r4,r5,ip,lr}
#endif
b dispatch_syscall
valid_syscall:
@ -478,12 +492,16 @@ dispatch_syscall:
* for same reasoning as above: we now disable external interrupts.
*/
cpsid i
#endif
/* restore LR */
ldr lr, [sp,#12]
#else
/* restore LR */
ldr lr, [sp,#16]
#endif
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
/*
@ -528,14 +546,11 @@ dispatch_syscall:
mov r0, ip
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) \
|| defined(CONFIG_CPU_CORTEX_R)
/* set stack back to unprivileged stack */
ldr ip, [sp,#12]
#endif
#if defined(CONFIG_CPU_CORTEX_R)
mov sp, ip
#else
#if !defined(CONFIG_CPU_CORTEX_R)
msr PSP, ip
#endif
@ -583,31 +598,6 @@ dispatch_syscall:
isb
pop {r0, r1}
#if defined(CONFIG_CPU_CORTEX_R)
/*
* Re-load items from user stack that were save in swap_helper.S. We
* don't need the values of R0-R3, so just adjust the stack pointer. We
* do need the old r12 and lr values.
*/
pop {r1} /* r1 = spsr_svc */
add sp, sp, #(4*4)
ldmia sp!, {r2-r3}
cps #MODE_SVC
/*
* Restore lr_svc stored into the SVC mode stack by the mode entry
* function. This ensures that the return address of the interrupted
* context is preserved in case of interrupt nesting.
*/
pop {lr}
/* restore the spsr_svc register */
msr spsr_fsxc,r1
mov r12, r2
mov lr, r3
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
@ -653,6 +643,39 @@ dispatch_syscall:
#endif
#if defined(CONFIG_CPU_CORTEX_R)
/*
* The stack contains (from top)
* spsr lr lr_svc r12 r3 r2 r1 r0 lr sp r5 r4
* Unwind everything except the return state that will be used for rfeia.
*/
add sp, sp, #(8*4)
ldmia sp!, {r12,lr}
pop {r2, r3}
cps #MODE_SVC
/*
* Restore lr_svc stored into the SVC mode stack by the mode entry
* function. This ensures that the return address of the interrupted
* context is preserved in case of interrupt nesting.
*/
pop {lr}
/*
* Move the return state from the privileged stack to the service
* stack. We need to put the user stack back in $sp, but we cannot
* trust the user stack. Therefore, put the return state on the svc
* stack and return from there.
*/
push {r2, r3}
/* Restore user stack pointer */
ldr r1, =_kernel
ldr r1, [r1, #_kernel_offset_to_current]
cps #MODE_SYS
ldr sp, [r1, #_thread_offset_to_sp_usr] /* sp_usr */
cps #MODE_SVC
/* Zero out volatile (caller-saved) registers so as to not leak state from
* kernel mode. The C calling convention for the syscall handler will
* restore the others to original values.
@ -661,12 +684,7 @@ dispatch_syscall:
mov r2, #0
mov r3, #0
/*
* return from SVC state to user state. SRSDB was used to save state
* in swap_helper.S. Change to sys mode so that we can recover those
* those values from the user stack
*/
cps #MODE_SYS
/* return from SVC state to user state. */
rfeia sp!
#else
bx ip

View file

@ -40,6 +40,10 @@ GEN_OFFSET_SYM(_thread_arch_t, mode_exc_return);
#endif
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_end);
GEN_OFFSET_SYM(_thread_arch_t, sp_usr);
#endif
#endif
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)

View file

@ -39,6 +39,14 @@
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#if defined(CONFIG_CPU_CORTEX_R)
#define _thread_offset_to_priv_stack_end \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_end_OFFSET)
#define _thread_offset_to_sp_usr \
(___thread_t_arch_OFFSET + ___thread_arch_t_sp_usr_OFFSET)
#endif
#endif
#if defined(CONFIG_THREAD_STACK_INFO)

View file

@ -121,6 +121,10 @@ struct _thread_arch {
#if defined(CONFIG_USERSPACE)
uint32_t priv_stack_start;
#if defined(CONFIG_CPU_CORTEX_R)
uint32_t priv_stack_end;
uint32_t sp_usr;
#endif
#endif
#endif
};