arch: arm: userspace: adapt assembly code for Cortex-M Baseline
In this commit we implement the assembly functions in userspace.S - z_arm_userspace_enter() - z_arm_do_syscall() - z_arch_user_string_nlen() for ARMv6-M and ARMv8-M Baseline architecture. We "inline" the implementation for Baseline, along with the Mainline (ARMv7-M) implementation, i.e. we rework only what is required to build for Baseline Cortex-M. Signed-off-by: Ioannis Glaropoulos <Ioannis.Glaropoulos@nordicsemi.no>
This commit is contained in:
parent
2d6bb624d6
commit
cfe1b1de1a
1 changed files with 209 additions and 0 deletions
|
@ -50,9 +50,20 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
/* prepare to set stack to privileged stack */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* move p1 to ip */
|
||||
mov ip, r1
|
||||
ldr r1, =_thread_offset_to_priv_stack_start
|
||||
ldr r0, [r0, r1] /* priv stack ptr */
|
||||
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, r1
|
||||
/* Restore p1 from ip */
|
||||
mov r1, ip
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, ip
|
||||
#endif
|
||||
|
||||
/* store current stack pointer to ip
|
||||
* the current stack pointer is needed to retrieve
|
||||
|
@ -73,7 +84,12 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
|
||||
/* push args to stack */
|
||||
push {r1,r2,r3,lr}
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
mov r1, ip
|
||||
push {r0,r1}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
push {r0,ip}
|
||||
#endif
|
||||
|
||||
/* Re-program dynamic memory map.
|
||||
*
|
||||
|
@ -93,6 +109,16 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl z_arm_configure_dynamic_mpu_regions
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0,r3}
|
||||
|
||||
/* load up stack info from user stack */
|
||||
ldr r0, [r3]
|
||||
ldr r3, [r3, #4]
|
||||
mov ip, r3
|
||||
|
||||
push {r0,r3}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
pop {r0,ip}
|
||||
|
||||
/* load up stack info from user stack */
|
||||
|
@ -100,6 +126,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
ldr ip, [ip, #4]
|
||||
|
||||
push {r0,ip}
|
||||
#endif
|
||||
|
||||
/* clear the user stack area to clean out privileged data */
|
||||
/* from right past the guard right up to the end */
|
||||
|
@ -111,7 +138,12 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
#endif
|
||||
bl memset
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r1}
|
||||
mov ip, r1
|
||||
#elif (defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE))
|
||||
pop {r0,ip}
|
||||
#endif
|
||||
|
||||
/* r0 contains user stack start, ip contains user stack size */
|
||||
add r0, r0, ip /* calculate top of stack */
|
||||
|
@ -121,8 +153,17 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
mov r3, #0
|
||||
msr PSPLIM, r3
|
||||
#endif
|
||||
|
||||
/* pop remaining arguments from stack before switching stacks */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* Use r4 to pop lr, then restore r4 */
|
||||
mov ip, r4
|
||||
pop {r1,r2,r3,r4}
|
||||
mov lr, r4
|
||||
mov r4, ip
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
pop {r1,r2,r3,lr}
|
||||
#endif
|
||||
|
||||
/* set stack to user stack */
|
||||
msr PSP, r0
|
||||
|
@ -145,6 +186,21 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
#endif /* CONFIG_EXECUTION_BENCHMARKING */
|
||||
|
||||
/* change processor mode to unprivileged */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
push {r0, r1, r2, r3}
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r1, =_thread_offset_to_mode
|
||||
ldr r1, [r0, r1]
|
||||
movs r2, #1
|
||||
orrs r1, r1, r2
|
||||
mrs r3, CONTROL
|
||||
orrs r3, r3, r2
|
||||
mov ip, r3
|
||||
/* Store (unprivileged) mode in thread's mode state variable */
|
||||
ldr r2, =_thread_offset_to_mode
|
||||
str r1, [r0, r2]
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
push {r0, r1}
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
|
@ -154,6 +210,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
orrs ip, ip, #1
|
||||
/* Store (unprivileged) mode in thread's mode state variable */
|
||||
str r1, [r0, #_thread_offset_to_mode]
|
||||
#endif
|
||||
dsb
|
||||
msr CONTROL, ip
|
||||
|
||||
|
@ -162,10 +219,21 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
|
|||
* instructions with the previous privilege.
|
||||
*/
|
||||
isb
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
pop {r0, r1, r2, r3}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
pop {r0, r1}
|
||||
#endif
|
||||
|
||||
/* jump to z_thread_entry entry */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
push {r0, r1}
|
||||
ldr r0, =z_thread_entry
|
||||
mov ip, r0
|
||||
pop {r0, r1}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr ip, =z_thread_entry
|
||||
#endif
|
||||
bx ip
|
||||
|
||||
/**
|
||||
|
@ -197,6 +265,30 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
|||
msr PSPLIM, ip
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* save current stack pointer (user stack) */
|
||||
mov ip, sp
|
||||
/* temporarily push to user stack */
|
||||
push {r0,r1}
|
||||
/* setup privileged stack */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
adds r0, r0, #_thread_offset_to_priv_stack_start
|
||||
ldr r0, [r0] /* priv stack ptr */
|
||||
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r1
|
||||
|
||||
/* Store current SP and LR at the beginning of the priv stack */
|
||||
subs r0, #8
|
||||
mov r1, ip
|
||||
str r1, [r0, #0]
|
||||
mov r1, lr
|
||||
str r1, [r0, #4]
|
||||
mov ip, r0
|
||||
/* Restore user stack and original r0, r1 */
|
||||
pop {r0, r1}
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
/* setup privileged stack */
|
||||
ldr ip, =_kernel
|
||||
ldr ip, [ip, #_kernel_offset_to_current]
|
||||
|
@ -207,6 +299,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
|||
subs ip, #8
|
||||
str sp, [ip, #0]
|
||||
str lr, [ip, #4]
|
||||
#endif
|
||||
|
||||
/* switch to privileged stack */
|
||||
msr PSP, ip
|
||||
|
@ -224,6 +317,54 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
|
|||
* r6 contains call_id
|
||||
* r8 contains original LR
|
||||
*/
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* save r0, r1 to ip, lr */
|
||||
mov ip, r0
|
||||
mov lr, r1
|
||||
ldr r0, =K_SYSCALL_BAD
|
||||
cmp r6, r0
|
||||
bne valid_syscall
|
||||
|
||||
/* BAD SYSCALL path */
|
||||
/* fixup stack frame on the privileged stack, adding ssf */
|
||||
mov r1, sp
|
||||
push {r4,r5}
|
||||
/* ssf is present in r1 (sp) */
|
||||
push {r1,lr}
|
||||
/* restore r0, r1 */
|
||||
mov r0, ip
|
||||
mov r1, lr
|
||||
b dispatch_syscall
|
||||
valid_syscall:
|
||||
/* push args to complete stack frame */
|
||||
push {r4,r5}
|
||||
|
||||
dispatch_syscall:
|
||||
/* original r0 is saved in ip */
|
||||
ldr r0, =_k_syscall_table
|
||||
lsls r6, #2
|
||||
add r0, r6
|
||||
ldr r0, [r0] /* load table address */
|
||||
/* swap ip and r0, restore r1 from lr */
|
||||
mov r1, ip
|
||||
mov ip, r0
|
||||
mov r0, r1
|
||||
mov r1, lr
|
||||
/* execute function from dispatch table */
|
||||
blx ip
|
||||
|
||||
/* restore LR
|
||||
* r0 holds the return value and needs to be preserved
|
||||
*/
|
||||
mov ip, r0
|
||||
mov r0, sp
|
||||
adds r0, #12
|
||||
ldr r0, [r0]
|
||||
mov lr, r0
|
||||
/* Restore r0 */
|
||||
mov r0, ip
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr ip, =K_SYSCALL_BAD
|
||||
cmp r6, ip
|
||||
bne valid_syscall
|
||||
|
@ -248,6 +389,7 @@ dispatch_syscall:
|
|||
|
||||
/* restore LR */
|
||||
ldr lr, [sp,#12]
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_BUILTIN_STACK_GUARD)
|
||||
/* clear stack limit (stack protection not required in user mode) */
|
||||
|
@ -255,11 +397,39 @@ dispatch_syscall:
|
|||
msr PSPLIM, r3
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* set stack back to unprivileged stack */
|
||||
mov ip, r0
|
||||
mov r0, sp
|
||||
ldr r0, [r0,#8]
|
||||
msr PSP, r0
|
||||
/* Restore r0 */
|
||||
mov r0, ip
|
||||
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
/* set stack back to unprivileged stack */
|
||||
ldr ip, [sp,#8]
|
||||
msr PSP, ip
|
||||
#endif
|
||||
|
||||
push {r0, r1}
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
push {r2, r3}
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r2, =_thread_offset_to_mode
|
||||
ldr r1, [r0, r2]
|
||||
movs r3, #1
|
||||
orrs r1, r1, r3
|
||||
/* Store (unprivileged) mode in thread's mode state variable */
|
||||
str r1, [r0, r2]
|
||||
dsb
|
||||
/* drop privileges by setting bit 0 in CONTROL */
|
||||
mrs r2, CONTROL
|
||||
orrs r2, r2, r3
|
||||
msr CONTROL, r2
|
||||
pop {r2, r3}
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r1, [r0, #_thread_offset_to_mode]
|
||||
|
@ -271,6 +441,7 @@ dispatch_syscall:
|
|||
mrs ip, CONTROL
|
||||
orrs ip, ip, #1
|
||||
msr CONTROL, ip
|
||||
#endif
|
||||
|
||||
/* ISB is not strictly necessary here (stack pointer is not being
|
||||
* touched), but it's recommended to avoid executing pre-fetched
|
||||
|
@ -279,6 +450,33 @@ dispatch_syscall:
|
|||
isb
|
||||
pop {r0, r1}
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* Zero out volatile (caller-saved) registers so as to not leak state from
|
||||
* kernel mode. The C calling convention for the syscall handler will
|
||||
* restore the others to original values.
|
||||
*/
|
||||
movs r2, #0
|
||||
movs r3, #0
|
||||
|
||||
/*
|
||||
* return back to original function that called SVC, add 1 to force thumb
|
||||
* mode
|
||||
*/
|
||||
|
||||
/* Save return value temporarily to ip */
|
||||
mov ip, r0
|
||||
|
||||
mov r0, r8
|
||||
movs r1, #1
|
||||
orrs r0, r0, r1
|
||||
|
||||
/* swap ip, r0 */
|
||||
mov r1, ip
|
||||
mov ip, r0
|
||||
mov r0, r1
|
||||
movs r1, #0
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
|
||||
/* Zero out volatile (caller-saved) registers so as to not leak state from
|
||||
* kernel mode. The C calling convention for the syscall handler will
|
||||
* restore the others to original values.
|
||||
|
@ -293,6 +491,8 @@ dispatch_syscall:
|
|||
*/
|
||||
mov ip, r8
|
||||
orrs ip, ip, #1
|
||||
|
||||
#endif
|
||||
bx ip
|
||||
|
||||
|
||||
|
@ -303,7 +503,11 @@ SECTION_FUNC(TEXT, z_arch_user_string_nlen)
|
|||
push {r0, r1, r2, r4, r5, lr}
|
||||
|
||||
/* sp+4 is error value, init to -1 */
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
ldr r3, =-1
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
mov.w r3, #-1
|
||||
#endif
|
||||
str r3, [sp, #4]
|
||||
|
||||
/* Perform string length calculation */
|
||||
|
@ -315,7 +519,12 @@ z_arch_user_string_nlen_fault_start:
|
|||
ldrb r5, [r0, r3]
|
||||
|
||||
z_arch_user_string_nlen_fault_end:
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
cmp r5, #0
|
||||
beq strlen_done
|
||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||
cbz r5, strlen_done
|
||||
#endif
|
||||
cmp r3, r1
|
||||
beq.n strlen_done
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue