aarch64: userspace: Implement syscalls

This patch adds the code managing the syscalls. The privileged stack
is setup before jumping into the real syscall.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-11-26 10:28:07 +01:00 committed by Anas Nashif
commit dacd176991
8 changed files with 211 additions and 1 deletions

View file

@ -64,6 +64,9 @@ config AARCH64_IMAGE_HEADER
This option enables standard ARM64 boot image header used by Linux This option enables standard ARM64 boot image header used by Linux
and understood by loaders such as u-boot on Xen xl tool. and understood by loaders such as u-boot on Xen xl tool.
config PRIVILEGED_STACK_SIZE
default 4096
if CPU_CORTEX_A if CPU_CORTEX_A
config ARMV8_A_NS config ARMV8_A_NS

View file

@ -111,6 +111,11 @@ SECTION_FUNC(TEXT, z_arm64_sync_exc)
cmp x1, #_SVC_CALL_RUNTIME_EXCEPT cmp x1, #_SVC_CALL_RUNTIME_EXCEPT
beq oops beq oops
#ifdef CONFIG_USERSPACE
cmp x1, #_SVC_CALL_SYSTEM_CALL
beq z_arm64_do_syscall
#endif
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
cmp x1, #_SVC_CALL_IRQ_OFFLOAD cmp x1, #_SVC_CALL_IRQ_OFFLOAD
beq offload beq offload

View file

@ -54,6 +54,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
} }
pInitCtx->tpidrro_el0 = 0x0; pInitCtx->tpidrro_el0 = 0x0;
thread->arch.priv_stack_start = 0;
#else #else
pInitCtx->elr = (uint64_t)z_thread_entry; pInitCtx->elr = (uint64_t)z_thread_entry;
#endif #endif
@ -82,6 +83,9 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
z_arch_esf_t *pInitCtx; z_arch_esf_t *pInitCtx;
uintptr_t stack_ptr; uintptr_t stack_ptr;
/* Setup the private stack */
_current->arch.priv_stack_start = (uint64_t)(_current->stack_obj);
/* Reset the stack pointer to the base discarding any old context */ /* Reset the stack pointer to the base discarding any old context */
stack_ptr = Z_STACK_PTR_ALIGN(_current->stack_info.start + stack_ptr = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size - _current->stack_info.size -

View file

@ -85,6 +85,61 @@ abv_fail:
mov x0, #-1 mov x0, #-1
ret ret
/*
* System call entry point.
*/
GTEXT(z_arm64_do_syscall)
SECTION_FUNC(TEXT, z_arm64_do_syscall)
/* Recover the syscall parameters from the ESF */
ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
ldp x2, x3, [sp, ___esf_t_x2_x3_OFFSET]
ldp x4, x5, [sp, ___esf_t_x4_x5_OFFSET]
/* Recover the syscall ID */
ldr x8, [sp, ___esf_t_x8_x9_OFFSET]
/* Check whether the ID is valid */
ldr x9, =K_SYSCALL_LIMIT
cmp x8, x9
blo valid_syscall_id
ldr x8, =K_SYSCALL_BAD
valid_syscall_id:
ldr x9, =_k_syscall_table
ldr x9, [x9, x8, lsl #3]
/* Recover the privileged stack */
#ifdef CONFIG_SMP
get_cpu x10, x8
ldr x10, [x10, #___cpu_t_current_OFFSET]
#else
ldr x10, =_kernel
ldr x10, [x10, #_kernel_offset_to_current]
#endif
ldr x10, [x10, #_thread_offset_to_priv_stack_start]
add x10, x10, #CONFIG_PRIVILEGED_STACK_SIZE
/* Save the original SP on the privileged stack */
mov x11, sp
mov sp, x10
str x11, [sp, #-16]!
/* Jump into the syscall */
msr daifclr, #(DAIFSET_IRQ_BIT)
blr x9
msr daifset, #(DAIFSET_IRQ_BIT)
/* Restore the original SP containing the ESF */
ldr x11, [sp], #16
mov sp, x11
/* Save the return value into the ESF */
str x0, [sp, ___esf_t_x0_x1_OFFSET]
/* Return from exception */
b z_arm64_exit_exc
/* /*
* Routine to jump into userspace * Routine to jump into userspace
* *

View file

@ -29,6 +29,10 @@
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
#include <kernel_offsets.h> #include <kernel_offsets.h>
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x19, x19_x20); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x19, x19_x20);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x21, x21_x22); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x21, x21_x22);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x23, x23_x24); GEN_NAMED_OFFSET_SYM(_callee_saved_t, x23, x23_x24);

View file

@ -9,4 +9,9 @@
#include <offsets.h> #include <offsets.h>
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#endif
#endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_ */ #endif /* ZEPHYR_ARCH_ARM_INCLUDE_AARCH64_OFFSETS_SHORT_ARCH_H_ */

View file

@ -19,6 +19,7 @@
#define _SVC_CALL_CONTEXT_SWITCH 0 #define _SVC_CALL_CONTEXT_SWITCH 0
#define _SVC_CALL_IRQ_OFFLOAD 1 #define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2 #define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
@ -31,6 +32,137 @@
extern "C" { extern "C" {
#endif #endif
/*
* Syscall invocation macros. arm-specific machine constraints used to ensure
* args land in the proper registers.
*/
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5, uintptr_t arg6,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r4 __asm__("x4") = arg5;
register uint64_t r5 __asm__("x5") = arg6;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r5), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t arg5,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r4 __asm__("x4") = arg5;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r4), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3, uintptr_t arg4,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r3 __asm__("x3") = arg4;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
"r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
uintptr_t arg3,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r2 __asm__("x2") = arg3;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r2), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r1 __asm__("x1") = arg2;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r1), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
uintptr_t call_id)
{
register uint64_t ret __asm__("x0") = arg1;
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r8)
: "memory");
return ret;
}
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
{
register uint64_t ret __asm__("x0");
register uint64_t r8 __asm__("x8") = call_id;
__asm__ volatile("svc %[svid]\n"
: "=r"(ret)
: [svid] "i" (_SVC_CALL_SYSTEM_CALL),
"r" (ret), "r" (r8)
: "memory");
return ret;
}
static inline bool arch_is_user_context(void) static inline bool arch_is_user_context(void)
{ {
uint64_t tpidrro_el0; uint64_t tpidrro_el0;

View file

@ -40,7 +40,9 @@ struct _callee_saved {
typedef struct _callee_saved _callee_saved_t; typedef struct _callee_saved _callee_saved_t;
struct _thread_arch { struct _thread_arch {
/* empty */ #ifdef CONFIG_USERSPACE
uint64_t priv_stack_start;
#endif
}; };
typedef struct _thread_arch _thread_arch_t; typedef struct _thread_arch _thread_arch_t;