arch: arm: userspace: z_arm_userspace_enter: reduce push/pop overhead

ARM user space requires ARM_MPU. We can, therefore,
remove the unnecessary #ifdef CONFIG_ARM_MPU blocks
in userspace.S. In addition, we do minor refactoring
in z_arm_userspace_enter(), and z_arm_pendsv(), and
z_arm_svc(), aiming at reducing the push/pop overhead
as much as possible.

Signed-off-by: Ioannis Glaropoulos <Ioannis.Glaropoulos@nordicsemi.no>
This commit is contained in:
Ioannis Glaropoulos 2019-10-11 08:40:39 +02:00 committed by Andrew Boie
commit 237033c61b
2 changed files with 11 additions and 14 deletions

View file

@ -249,8 +249,7 @@ in_fp_endif:
#if defined (CONFIG_ARM_MPU) #if defined (CONFIG_ARM_MPU)
/* Re-program dynamic memory map */ /* Re-program dynamic memory map */
push {r2,lr} push {r2,lr}
ldr r0, =_kernel mov r0, r2 /* _current thread */
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arm_configure_dynamic_mpu_regions bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr} pop {r2,lr}
#endif #endif
@ -501,7 +500,6 @@ _do_syscall:
/* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */ /* Bad syscalls treated as valid syscalls with ID K_SYSCALL_BAD. */
valid_syscall_id: valid_syscall_id:
push {r0, r1}
ldr r0, =_kernel ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current] ldr r0, [r0, #_kernel_offset_to_current]
ldr r1, [r0, #_thread_offset_to_mode] ldr r1, [r0, #_thread_offset_to_mode]
@ -518,7 +516,6 @@ valid_syscall_id:
* instructions with the previous privilege. * instructions with the previous privilege.
*/ */
isb isb
pop {r0, r1}
/* return from SVC to the modified LR - z_arm_do_syscall */ /* return from SVC to the modified LR - z_arm_do_syscall */
bx lr bx lr

View file

@ -62,7 +62,10 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
msr PSPLIM, r0 msr PSPLIM, r0
#endif #endif
#if defined (CONFIG_ARM_MPU) /* push args to stack */
push {r1,r2,r3,lr}
push {r0,ip}
/* Re-program dynamic memory map. /* Re-program dynamic memory map.
* *
* Important note: * Important note:
@ -77,20 +80,17 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
* stack, since we do not control how much stack is actually left, when * stack, since we do not control how much stack is actually left, when
* user invokes z_arm_userspace_enter(). * user invokes z_arm_userspace_enter().
*/ */
push {r0,r1,r2,r3,ip,lr}
ldr r0, =_kernel ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current] ldr r0, [r0, #_kernel_offset_to_current]
bl z_arm_configure_dynamic_mpu_regions bl z_arm_configure_dynamic_mpu_regions
pop {r0,r1,r2,r3,ip,lr}
#endif pop {r0,ip}
/* load up stack info from user stack */ /* load up stack info from user stack */
ldr r0, [ip] ldr r0, [ip]
ldr ip, [ip, #4] ldr ip, [ip, #4]
push {r0,ip}
/* push args to stack */
push {r0,r1,r2,r3,ip,lr}
/* clear the user stack area to clean out privileged data */ /* clear the user stack area to clean out privileged data */
/* from right past the guard right up to the end */ /* from right past the guard right up to the end */
@ -102,18 +102,18 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
#endif #endif
bl memset bl memset
pop {r0,r1,r2,r3,ip,lr} pop {r0,ip}
/* r0 contains user stack start, ip contains user stack size */ /* r0 contains user stack start, ip contains user stack size */
add r0, r0, ip /* calculate top of stack */ add r0, r0, ip /* calculate top of stack */
#if defined(CONFIG_BUILTIN_STACK_GUARD) #if defined(CONFIG_BUILTIN_STACK_GUARD)
/* clear stack limit (stack protection not required in user mode) */ /* clear stack limit (stack protection not required in user mode) */
push {r3}
mov r3, #0 mov r3, #0
msr PSPLIM, r3 msr PSPLIM, r3
pop {r3}
#endif #endif
/* pop remaining arguments from stack before switching stacks */
pop {r1,r2,r3,lr}
/* set stack to user stack */ /* set stack to user stack */
msr PSP, r0 msr PSP, r0