aarch64: userspace: Introduce skeleton code for user-threads

Introduce the first pieces needed to schedule user threads by defining
two different code paths for kernel and user threads.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-11-25 14:44:21 +01:00 committed by Anas Nashif
commit 6cf0d000e8
4 changed files with 82 additions and 5 deletions

View file

@ -20,6 +20,7 @@ zephyr_library_sources(
vector_table.S vector_table.S
) )
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S) zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE ../common/tls.c) zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE ../common/tls.c)

View file

@ -16,11 +16,13 @@
#include <wait_q.h> #include <wait_q.h>
#include <arch/cpu.h> #include <arch/cpu.h>
/* #ifdef CONFIG_USERSPACE
* An initial context, to be "restored" by z_arm64_context_switch(), is put at static bool is_user(struct k_thread *thread)
* the other end of the stack, and thus reusable by the stack when not needed {
* anymore. return (thread->base.user_options & K_USER) != 0;
*/ }
#endif
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry, char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
@ -40,7 +42,19 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* parameters already in place in x1(arg1), x2(arg2), x3(arg3). * parameters already in place in x1(arg1), x2(arg2), x3(arg3).
* - SPSR_ELn: to enable IRQs (we are masking FIQs). * - SPSR_ELn: to enable IRQs (we are masking FIQs).
*/ */
#ifdef CONFIG_USERSPACE
/*
* If the new thread is a user thread we jump into
* arch_user_mode_enter() when still in EL1.
*/
if (is_user(thread)) {
pInitCtx->elr = (uint64_t)arch_user_mode_enter;
} else {
pInitCtx->elr = (uint64_t)z_thread_entry;
}
#else
pInitCtx->elr = (uint64_t)z_thread_entry; pInitCtx->elr = (uint64_t)z_thread_entry;
#endif
pInitCtx->spsr = SPSR_MODE_EL1T | DAIF_FIQ_BIT; pInitCtx->spsr = SPSR_MODE_EL1T | DAIF_FIQ_BIT;
/* /*
@ -58,3 +72,37 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
return z_get_next_switch_handle(*old_thread); return z_get_next_switch_handle(*old_thread);
} }
#ifdef CONFIG_USERSPACE
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
z_arch_esf_t *pInitCtx;
uintptr_t stack_ptr;
/* Reset the stack pointer to the base discarding any old context */
stack_ptr = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
/*
* Reconstruct the ESF from scratch to leverage the z_arm64_exit_exc()
* macro that will simulate a return from exception to move from EL1t
* to EL0t. On return we will be in userspace.
*/
pInitCtx = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr);
pInitCtx->spsr = DAIF_FIQ_BIT | SPSR_MODE_EL0T;
pInitCtx->elr = (uint64_t)z_thread_entry;
pInitCtx->x0 = (uint64_t)user_entry;
pInitCtx->x1 = (uint64_t)p1;
pInitCtx->x2 = (uint64_t)p2;
pInitCtx->x3 = (uint64_t)p3;
/* All the needed information is already in the ESF */
z_arm64_userspace_enter(pInitCtx);
CODE_UNREACHABLE;
}
#endif

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2020 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <syscall.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
/*
* Routine to jump into userspace
*
* We leverage z_arm64_exit_exc() to pop out the entry function and parameters
* from ESF and fake a return from exception to move from EL1 to EL0. The fake
* ESF is built in arch_user_mode_enter() before jumping here
*/
GTEXT(z_arm64_userspace_enter)
SECTION_FUNC(TEXT, z_arm64_userspace_enter)
mov sp, x0
b z_arm64_exit_exc

View file

@ -40,6 +40,7 @@ static inline void arch_switch(void *switch_to, void **switched_from)
} }
extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason); extern void z_arm64_fatal_error(z_arch_esf_t *esf, unsigned int reason);
extern void z_arm64_userspace_enter(z_arch_esf_t *esf);
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */