arm: userspace: Rework system call arguments

This patch changes the ARM system calls to use registers for passing
or arguments.  This removes the possibility of stack issues when
callers do not adhere to the AAPCS.

Fixes #6802

Signed-off-by: Andy Gross <andy.gross@linaro.org>
This commit is contained in:
Andy Gross 2018-04-13 16:03:24 -05:00 committed by Andrew Boie
commit 09a8810b33
3 changed files with 81 additions and 107 deletions

View file

@ -368,25 +368,30 @@ _oops:
*
* On SVC exeption, the stack looks like the following:
* r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
* r5 - r6 - call id - saved LR
*
* Registers look like:
* r0 - arg1
* r1 - arg2
* r2 - arg3
* r3 - arg4
* r4 - arg5
* r5 - arg6
* r6 - call_id
* r7 - saved link register
*/
_do_syscall:
ldr r1, [r0, #24] /* grab address of PC from stack frame */
str r1, [r0, #44] /* store address to use for LR after syscall */
ldr r7, [r0, #24] /* grab address of PC from stack frame */
ldr r1, =_arm_do_syscall
str r1, [r0, #24] /* overwrite the LR to point to _arm_do_syscall */
/* validate syscall limit, only set priv mode if valid */
ldr ip, =_SYSCALL_LIMIT
ldr r1, [r0, #40]
cmp r1, ip
cmp r6, ip
blt valid_syscall_id
/* bad syscall id. Set arg0 to bad id and set call_id to SYSCALL_BAD */
str r1, [r0, #0]
ldr r1, =_SYSCALL_BAD
str r1, [r0, #40]
str r6, [r0, #0]
ldr r6, =_SYSCALL_BAD
valid_syscall_id:
/* set mode to privileged, r2 still contains value from CONTROL */

View file

@ -118,62 +118,54 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
*/
SECTION_FUNC(TEXT, _arm_do_syscall)
/*
* r0-r3 are values from pre-SVC from stack frame stored during SVC
* 16 bytes of storage reside on the stack:
* arg5, arg6, call_id, and LR from SVC frame
* r0-r5 contain arguments
* r6 contains call_id
* r7 contains original LR
*/
push {r4,r5,r6,lr}
ldr ip, =_k_syscall_table
ldr r4, [sp, #24] /* load call_id from stack */
lsl r4, #2
add ip, r4
ldr ip, [ip] /* load table address */
ldr r5, =_SYSCALL_BAD
lsl r5, #2 /* shift to match the shift we did on the call_id */
cmp r4, r5
ldr ip, =_SYSCALL_BAD
cmp r6, ip
bne valid_syscall
/* BAD SYSCALL path */
/* fixup stack frame on unprivileged stack, adding ssf */
/* pop registers and lr as this is a one way jump */
mov r4, sp
str r4, [sp, #24]
pop {r4,r5,r6,lr}
mov ip, sp
push {r4,r5,ip,lr}
b dispatch_syscall
valid_syscall:
/* setup priviliged stack */
ldr r4, =_kernel
ldr r4, [r4, #_kernel_offset_to_current]
ldr r5, [r4, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
push {r6}
ldr r6, =_kernel
ldr r6, [r6, #_kernel_offset_to_current]
ldr ip, [r6, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
ldr r6, =CONFIG_PRIVILEGED_STACK_SIZE
add r5, r6
/* setup privileged stack frame */
/* 16 bytes: arg5, arg6, ssf, 4 bytes padding */
sub r5, #16
ldr r6, [sp, #16]
str r6, [r5, #0]
ldr r6, [sp, #20]
str r6, [r5, #4]
mov r6, sp
str r6, [r5, #8] /* store ssf of unprivileged stack */
ldr r6, =0
str r6, [r5, #12] /* store zeroed padding */
add ip, r6
pop {r6}
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
/* switch to privileged stack */
msr PSP, r5
msr PSP, ip
/* push args to complete stack frame */
push {r4,r5}
dispatch_syscall:
ldr ip, =_k_syscall_table
lsl r6, #2
add ip, r6
ldr ip, [ip] /* load table address */
/* execute function from dispatch table */
blx ip
/* restore LR */
ldr lr, [sp,#12]
/* set stack back to unprivileged stack */
ldr ip, [sp,#8]
msr PSP, ip
pop {r4,r5,r6,lr}
/* drop privileges by setting bit 0 in CONTROL */
mrs ip, CONTROL
orrs ip, ip, #1
@ -189,6 +181,6 @@ dispatch_syscall:
* return back to original function that called SVC, add 1 to force thumb
* mode
*/
ldr ip, [sp, #12]
mov ip, r7
orrs ip, ip, #1
bx ip

View file

@ -360,19 +360,16 @@ static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r5 __asm__("r5") = arg6;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("sub sp, #16\n"
"str %[a5], [sp, #0]\n"
"str %[a6], [sp, #4]\n"
"str %[cid], [sp, #8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
[a5] "r" (arg5), [a6] "r" (arg6)
: "ip", "memory");
"r" (r4), "r" (r5), "r" (r6)
: "r7", "memory");
return ret;
}
@ -384,18 +381,15 @@ static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r4 __asm__("r4") = arg5;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("sub sp, #16\n"
"str %[a5], [sp, #0]\n"
"str %[cid], [sp, #8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
[a5] "r" (arg5)
: "ip", "memory");
"r" (r4), "r" (r6)
: "r7", "memory");
return ret;
}
@ -407,16 +401,14 @@ static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r3 __asm__("r3") = arg4;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("sub sp, #16\n"
"str %[cid], [sp,#8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3)
: "ip", "memory");
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r6)
: "r7", "memory");
return ret;
}
@ -427,16 +419,13 @@ static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r2 __asm__("r2") = arg3;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile("sub sp, #16\n"
"str %[cid], [sp,#8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2)
: "r3", "ip", "memory");
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r6)
: "r7", "memory");
return ret;
}
@ -445,17 +434,13 @@ static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r1 __asm__("r1") = arg2;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile(
"sub sp, #16\n"
"str %[cid], [sp,#8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1)
: "r2", "r3", "ip", "memory");
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r6)
: "r7", "memory");
return ret;
}
@ -463,34 +448,26 @@ static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
register u32_t ret __asm__("r0") = arg1;
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile(
"sub sp, #16\n"
"str %[cid], [sp,#8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret)
: "r1", "r2", "r3", "ip", "memory");
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r7", "memory");
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
register u32_t ret __asm__("r0");
register u32_t r6 __asm__("r6") = call_id;
__asm__ volatile(
"sub sp, #16\n"
"str %[cid], [sp,#8]\n"
"svc %[svid]\n"
"add sp, #16\n"
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [cid] "r" (call_id),
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret)
: "r1", "r2", "r3", "ip", "memory");
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r6)
: "r7", "memory");
return ret;
}