From 6cf0d000e81ea015024604d1f563485512560c7c Mon Sep 17 00:00:00 2001 From: Carlo Caione Date: Wed, 25 Nov 2020 14:44:21 +0100 Subject: [PATCH] aarch64: userspace: Introduce skeleton code for user-threads Introduce the first pieces needed to schedule user threads by defining two different code paths for kernel and user threads. Signed-off-by: Carlo Caione --- arch/arm/core/aarch64/CMakeLists.txt | 1 + arch/arm/core/aarch64/thread.c | 58 +++++++++++++++++++-- arch/arm/core/aarch64/userspace.S | 27 ++++++++++ arch/arm/include/aarch64/kernel_arch_func.h | 1 + 4 files changed, 82 insertions(+), 5 deletions(-) create mode 100644 arch/arm/core/aarch64/userspace.S diff --git a/arch/arm/core/aarch64/CMakeLists.txt b/arch/arm/core/aarch64/CMakeLists.txt index acfa9c86488..c96f1e7b88c 100644 --- a/arch/arm/core/aarch64/CMakeLists.txt +++ b/arch/arm/core/aarch64/CMakeLists.txt @@ -20,6 +20,7 @@ zephyr_library_sources( vector_table.S ) +zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S) zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c) zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE ../common/tls.c) diff --git a/arch/arm/core/aarch64/thread.c b/arch/arm/core/aarch64/thread.c index 111e132a2b3..c9977d0b497 100644 --- a/arch/arm/core/aarch64/thread.c +++ b/arch/arm/core/aarch64/thread.c @@ -16,11 +16,13 @@ #include #include -/* - * An initial context, to be "restored" by z_arm64_context_switch(), is put at - * the other end of the stack, and thus reusable by the stack when not needed - * anymore. - */ +#ifdef CONFIG_USERSPACE +static bool is_user(struct k_thread *thread) +{ + return (thread->base.user_options & K_USER) != 0; +} +#endif + void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr, k_thread_entry_t entry, void *p1, void *p2, void *p3) @@ -40,7 +42,19 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, * parameters already in place in x1(arg1), x2(arg2), x3(arg3). * - SPSR_ELn: to enable IRQs (we are masking FIQs). */ +#ifdef CONFIG_USERSPACE + /* + * If the new thread is a user thread we jump into + * arch_user_mode_enter() when still in EL1. + */ + if (is_user(thread)) { + pInitCtx->elr = (uint64_t)arch_user_mode_enter; + } else { + pInitCtx->elr = (uint64_t)z_thread_entry; + } +#else pInitCtx->elr = (uint64_t)z_thread_entry; +#endif pInitCtx->spsr = SPSR_MODE_EL1T | DAIF_FIQ_BIT; /* @@ -58,3 +72,37 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread) return z_get_next_switch_handle(*old_thread); } + +#ifdef CONFIG_USERSPACE +FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3) +{ + z_arch_esf_t *pInitCtx; + uintptr_t stack_ptr; + + /* Reset the stack pointer to the base discarding any old context */ + stack_ptr = Z_STACK_PTR_ALIGN(_current->stack_info.start + + _current->stack_info.size - + _current->stack_info.delta); + + /* + * Reconstruct the ESF from scratch to leverage the z_arm64_exit_exc() + * macro that will simulate a return from exception to move from EL1t + * to EL0t. On return we will be in userspace. + */ + pInitCtx = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr); + + pInitCtx->spsr = DAIF_FIQ_BIT | SPSR_MODE_EL0T; + pInitCtx->elr = (uint64_t)z_thread_entry; + + pInitCtx->x0 = (uint64_t)user_entry; + pInitCtx->x1 = (uint64_t)p1; + pInitCtx->x2 = (uint64_t)p2; + pInitCtx->x3 = (uint64_t)p3; + + /* All the needed information is already in the ESF */ + z_arm64_userspace_enter(pInitCtx); + + CODE_UNREACHABLE; +} +#endif diff --git a/arch/arm/core/aarch64/userspace.S b/arch/arm/core/aarch64/userspace.S new file mode 100644 index 00000000000..4b3371393e0 --- /dev/null +++ b/arch/arm/core/aarch64/userspace.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2020 Carlo Caione + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include "macro_priv.inc" + +_ASM_FILE_PROLOGUE + +/* + * Routine to jump into userspace + * + * We leverage z_arm64_exit_exc() to pop out the entry function and parameters + * from ESF and fake a return from exception to move from EL1 to EL0. The fake + * ESF is built in arch_user_mode_enter() before jumping here + */ + +GTEXT(z_arm64_userspace_enter) +SECTION_FUNC(TEXT, z_arm64_userspace_enter) + mov sp, x0 + b z_arm64_exit_exc diff --git a/arch/arm/include/aarch64/kernel_arch_func.h b/arch/arm/include/aarch64/kernel_arch_func.h index 430a0d8a417..d80b51df0b3 100644 --- a/arch/arm/include/aarch64/kernel_arch_func.h +++ b/arch/arm/include/aarch64/kernel_arch_func.h @@ -40,6 +40,7 @@ static inline void arch_switch(void *switch_to, void **switched_from) } extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason); +extern void z_arm64_userspace_enter(z_arch_esf_t *esf); #endif /* _ASMLANGUAGE */