arm: userspace: Add ARM userspace infrastructure
This patch adds support for userspace on ARM architectures. Arch specific calls for transitioning threads to user mode, system calls, and associated handlers. Signed-off-by: Andy Gross <andy.gross@linaro.org>
This commit is contained in:
parent
9ccdcb9be6
commit
1c047c9bef
14 changed files with 543 additions and 45 deletions
|
@ -16,6 +16,7 @@ zephyr_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S)
|
|||
zephyr_sources_ifdef(CONFIG_CPLUSPLUS __aeabi_atexit.c)
|
||||
zephyr_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
|
||||
zephyr_sources_ifdef(CONFIG_CPU_CORTEX_M0 irq_relay.S)
|
||||
zephyr_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||
|
||||
add_subdirectory_ifdef(CONFIG_CPU_CORTEX_M cortex_m)
|
||||
add_subdirectory_ifdef(CONFIG_CPU_HAS_MPU cortex_m/mpu)
|
||||
|
|
|
@ -23,7 +23,7 @@ config CPU_CORTEX_M
|
|||
select HAS_FLASH_LOAD_OFFSET
|
||||
select HAS_DTS
|
||||
select ARCH_HAS_STACK_PROTECTION if ARM_CORE_MPU
|
||||
select ARCH_HAS_USERSPACE if ARM_USERSPACE
|
||||
select ARCH_HAS_USERSPACE if ARM_CORE_MPU
|
||||
help
|
||||
This option signifies the use of a CPU of the Cortex-M family.
|
||||
|
||||
|
@ -42,14 +42,6 @@ config ARM_STACK_PROTECTION
|
|||
This option enables MPU stack guard to cause a system fatal error
|
||||
if the bounds of the current process stack are overflowed.
|
||||
|
||||
config ARM_USERSPACE
|
||||
bool
|
||||
default n
|
||||
help
|
||||
This option enables APIs to drop a thread's privileges, supporting
|
||||
user-level threads that are protected from each other and from
|
||||
crashing the kernel.
|
||||
|
||||
menu "Architectue Floating Point Options"
|
||||
depends on CPU_HAS_FPU
|
||||
|
||||
|
|
|
@ -23,10 +23,18 @@
|
|||
*/
|
||||
void configure_mpu_stack_guard(struct k_thread *thread)
|
||||
{
|
||||
u32_t guard_size = MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
u32_t guard_start = thread->arch.priv_stack_start ?
|
||||
(u32_t)thread->arch.priv_stack_start :
|
||||
(u32_t)thread->stack_obj;
|
||||
#else
|
||||
u32_t guard_start = thread->stack_info.start;
|
||||
#endif
|
||||
|
||||
arm_core_mpu_disable();
|
||||
arm_core_mpu_configure(THREAD_STACK_GUARD_REGION,
|
||||
thread->stack_info.start - MPU_GUARD_ALIGN_AND_SIZE,
|
||||
thread->stack_info.size);
|
||||
arm_core_mpu_configure(THREAD_STACK_GUARD_REGION, guard_start,
|
||||
guard_size);
|
||||
arm_core_mpu_enable();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -87,3 +87,14 @@ void _do_kernel_oops(const NANO_ESF *esf)
|
|||
{
|
||||
_NanoFatalErrorHandler(esf->r0, esf);
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
u32_t *ssf_contents = ssf_ptr;
|
||||
NANO_ESF oops_esf = { 0 };
|
||||
|
||||
oops_esf.pc = ssf_contents[3];
|
||||
|
||||
_do_kernel_oops(&oops_esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ GEN_OFFSET_SYM(_thread_arch_t, basepri);
|
|||
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
GEN_OFFSET_SYM(_thread_arch_t, mode);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
|
||||
#endif
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ GTEXT(__swap)
|
|||
GTEXT(__svc)
|
||||
GTEXT(__pendsv)
|
||||
GTEXT(_do_kernel_oops)
|
||||
GTEXT(_arm_do_syscall)
|
||||
GDATA(_k_neg_eagain)
|
||||
|
||||
GDATA(_kernel)
|
||||
|
@ -176,12 +177,24 @@ _thread_irq_disabled:
|
|||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* restore mode */
|
||||
ldr r0, [r2, #_thread_offset_to_mode]
|
||||
mrs r3, CONTROL
|
||||
bic r3, #1
|
||||
orr r3, r0
|
||||
msr CONTROL, r3
|
||||
|
||||
/* r2 contains k_thread */
|
||||
add r0, r2, #0
|
||||
push {r2, lr}
|
||||
blx configure_mpu_mem_domain
|
||||
pop {r2, lr}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
add r0, r2, #0
|
||||
push {r2, lr}
|
||||
blx configure_mpu_user_context
|
||||
pop {r2, lr}
|
||||
#endif
|
||||
|
||||
/* load callee-saved + psp from thread */
|
||||
add r0, r2, #_thread_offset_to_callee_saved
|
||||
|
@ -268,7 +281,6 @@ _oops:
|
|||
*/
|
||||
|
||||
SECTION_FUNC(TEXT, __svc)
|
||||
|
||||
tst lr, #0x4 /* did we come from thread mode ? */
|
||||
ite eq /* if zero (equal), came from handler mode */
|
||||
mrseq r0, MSP /* handler mode, stack frame is on MSP */
|
||||
|
@ -283,10 +295,26 @@ SECTION_FUNC(TEXT, __svc)
|
|||
* 0: context switch
|
||||
* 1: irq_offload (if configured)
|
||||
* 2: kernel panic or oops (software generated fatal exception)
|
||||
* 3: System call
|
||||
* Planned implementation of system calls for memory protection will
|
||||
* expand this case.
|
||||
*/
|
||||
ands r1, #0xff
|
||||
#if CONFIG_USERSPACE
|
||||
mrs r2, CONTROL
|
||||
|
||||
cmp r1, #3
|
||||
beq _do_syscall
|
||||
|
||||
/*
|
||||
* check that we are privileged before invoking other SVCs
|
||||
* oops if we are unprivileged
|
||||
*/
|
||||
tst r2, #0x1
|
||||
bne _oops
|
||||
|
||||
cmp r1, #0
|
||||
#endif
|
||||
beq _context_switch
|
||||
|
||||
cmp r1, #2
|
||||
|
@ -324,6 +352,46 @@ _oops:
|
|||
blx _do_kernel_oops
|
||||
pop {pc}
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
/*
|
||||
* System call will setup a jump to the _do_arm_syscall function
|
||||
* when the SVC returns via the bx lr.
|
||||
*
|
||||
* There is some trickery involved here because we have to preserve
|
||||
* the original LR value so that we can return back to the caller of
|
||||
* the SVC.
|
||||
*
|
||||
* On SVC exeption, the stack looks like the following:
|
||||
* r0 - r1 - r2 - r3 - r12 - LR - PC - PSR
|
||||
* r5 - r6 - call id - saved LR
|
||||
*
|
||||
*/
|
||||
_do_syscall:
|
||||
ldr r1, [r0, #24] /* grab address of PC from stack frame */
|
||||
str r1, [r0, #44] /* store address to use for LR after syscall */
|
||||
ldr r1, =_arm_do_syscall
|
||||
str r1, [r0, #24] /* overwrite the LR to point to _arm_do_syscall */
|
||||
|
||||
/* validate syscall limit, only set priv mode if valid */
|
||||
ldr ip, =_SYSCALL_LIMIT
|
||||
ldr r1, [r0, #40]
|
||||
cmp r1, ip
|
||||
blt valid_syscall_id
|
||||
|
||||
/* bad syscall id. Set arg0 to bad id and set call_id to SYSCALL_BAD */
|
||||
str r1, [r0, #0]
|
||||
ldr r1, =_SYSCALL_BAD
|
||||
str r1, [r0, #40]
|
||||
|
||||
valid_syscall_id:
|
||||
/* set mode to privileged, r2 still contains value from CONTROL */
|
||||
bic r2, #1
|
||||
msr CONTROL, r2
|
||||
|
||||
/* return from SVC to the modified LR - _arm_do_syscall */
|
||||
bx lr
|
||||
#endif
|
||||
|
||||
#else
|
||||
#error Unknown ARM architecture
|
||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||
|
@ -381,6 +449,13 @@ SECTION_FUNC(TEXT, __swap)
|
|||
ldr r2, [r1, #_kernel_offset_to_current]
|
||||
str r0, [r2, #_thread_offset_to_basepri]
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
mrs r0, CONTROL
|
||||
movs r3, #1
|
||||
ands r0, r3
|
||||
str r0, [r2, #_thread_offset_to_mode]
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set __swap()'s default return code to -EAGAIN. This eliminates the need
|
||||
* for the timeout code to set it itself.
|
||||
|
|
|
@ -19,6 +19,10 @@
|
|||
#include <string.h>
|
||||
#endif /* CONFIG_INIT_STACKS */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
extern u8_t *_k_priv_stack_find(void *obj);
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initialize a new thread from its stack space
|
||||
|
@ -58,16 +62,33 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
|
||||
_ASSERT_VALID_PRIO(priority, pEntry);
|
||||
|
||||
#if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
char *stackEnd = pStackMem + stackSize - MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#else
|
||||
char *stackEnd = pStackMem + stackSize;
|
||||
#endif
|
||||
struct __esf *pInitCtx;
|
||||
_new_thread_init(thread, pStackMem, stackSize, priority, options);
|
||||
|
||||
_new_thread_init(thread, pStackMem, stackEnd - pStackMem, priority,
|
||||
options);
|
||||
|
||||
/* carve the thread entry struct from the "base" of the stack */
|
||||
|
||||
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd -
|
||||
sizeof(struct __esf)));
|
||||
|
||||
pInitCtx->pc = ((u32_t)_thread_entry) & 0xfffffffe;
|
||||
#if CONFIG_USERSPACE
|
||||
if (options & K_USER) {
|
||||
pInitCtx->pc = (u32_t)_arch_user_mode_enter;
|
||||
} else {
|
||||
pInitCtx->pc = (u32_t)_thread_entry;
|
||||
}
|
||||
#else
|
||||
pInitCtx->pc = (u32_t)_thread_entry;
|
||||
#endif
|
||||
|
||||
/* force ARM mode by clearing LSB of address */
|
||||
pInitCtx->pc &= 0xfffffffe;
|
||||
|
||||
pInitCtx->a1 = (u32_t)pEntry;
|
||||
pInitCtx->a2 = (u32_t)parameter1;
|
||||
pInitCtx->a3 = (u32_t)parameter2;
|
||||
|
@ -78,6 +99,12 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
thread->callee_saved.psp = (u32_t)pInitCtx;
|
||||
thread->arch.basepri = 0;
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
thread->arch.mode = 0;
|
||||
thread->arch.priv_stack_start = 0;
|
||||
thread->arch.priv_stack_size = 0;
|
||||
#endif
|
||||
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
/*
|
||||
|
@ -94,3 +121,23 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
thread_monitor_init(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
/* Set up privileged stack before entering user mode */
|
||||
_current->arch.priv_stack_start =
|
||||
(u32_t)_k_priv_stack_find(_current->stack_obj);
|
||||
_current->arch.priv_stack_size =
|
||||
(u32_t)CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
|
||||
_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||
(u32_t)_current->stack_info.start,
|
||||
_current->stack_info.size);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
182
arch/arm/core/userspace.S
Normal file
182
arch/arm/core/userspace.S
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Userspace and service handler hooks
|
||||
*
|
||||
* Copyright (c) 2017 Linaro Limited
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
*/
|
||||
|
||||
#include <offsets_short.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <syscall.h>
|
||||
|
||||
_ASM_FILE_PROLOGUE
|
||||
|
||||
GTEXT(_arm_userspace_enter)
|
||||
GTEXT(_arm_do_syscall)
|
||||
GDATA(_kernel)
|
||||
|
||||
/* Imports */
|
||||
GTEXT(_k_syscall_table)
|
||||
|
||||
/**
|
||||
*
|
||||
* User space entry function
|
||||
*
|
||||
* This function is the entry point to user mode from privileged execution.
|
||||
* The conversion is one way, and threads which transition to user mode do
|
||||
* not transition back later, unless they are doing system calls.
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT,_arm_userspace_enter)
|
||||
/* move user_entry to lr */
|
||||
mov lr, r0
|
||||
|
||||
/* set stack to priviliged stack */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
ldr r0, [r0, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr ip, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r0, r0, ip
|
||||
|
||||
mov ip, sp
|
||||
msr PSP, r0
|
||||
|
||||
/* load up stack info from user stack */
|
||||
ldr r0, [ip]
|
||||
ldr ip, [ip, #4]
|
||||
|
||||
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Guard is taken out of size, so adjust beginning and size of stack */
|
||||
subs ip, #MPU_GUARD_ALIGN_AND_SIZE
|
||||
#endif
|
||||
|
||||
/* push args to stack */
|
||||
push {r0,r1,r2,r3,ip,lr}
|
||||
|
||||
/* clear the user stack area to clean out privileged data */
|
||||
/* from right past the guard right up to the end */
|
||||
mov r2, ip
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
ldr r1,=0xaaaaaaaa
|
||||
#else
|
||||
eors.n r1, r1
|
||||
#endif
|
||||
bl memset
|
||||
|
||||
/* setup arguments to configure_mpu_mem_domain */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl configure_mpu_mem_domain
|
||||
|
||||
/* setup arguments configure_mpu_user_context */
|
||||
ldr r0, =_kernel
|
||||
ldr r0, [r0, #_kernel_offset_to_current]
|
||||
bl configure_mpu_user_context
|
||||
|
||||
pop {r0,r1,r2,r3,ip,lr}
|
||||
|
||||
/* r0 contains user stack start, ip contains user stack size */
|
||||
add r0, r0, ip /* calculate top of stack */
|
||||
|
||||
/* set stack to user stack */
|
||||
msr PSP, r0
|
||||
|
||||
/* restore r0 */
|
||||
mov r0, lr
|
||||
|
||||
/* change processor mode to unprivileged */
|
||||
mrs ip, CONTROL
|
||||
orrs ip, ip, #1
|
||||
msr CONTROL, ip
|
||||
|
||||
/* jump to _thread_entry entry */
|
||||
ldr ip, =_thread_entry
|
||||
bx ip
|
||||
|
||||
/**
|
||||
*
|
||||
* Userspace system call function
|
||||
*
|
||||
* This function is used to do system calls from unprivileged code. This
|
||||
* function is responsible for the following:
|
||||
* 1) Fixing up bad syscalls
|
||||
* 2) Configuring privileged stack and loading up stack arguments
|
||||
* 3) Dispatching the system call
|
||||
* 4) Restoring stack and calling back to the caller of the SVC
|
||||
*
|
||||
*/
|
||||
SECTION_FUNC(TEXT, _arm_do_syscall)
|
||||
/*
|
||||
* r0-r3 are values from pre-SVC from stack frame stored during SVC
|
||||
* 16 bytes of storage reside on the stack:
|
||||
* arg5, arg6, call_id, and LR from SVC frame
|
||||
*/
|
||||
push {r4,r5,r6,lr}
|
||||
|
||||
ldr ip, =_k_syscall_table
|
||||
ldr r4, [sp, #24] /* load call_id from stack */
|
||||
lsl r4, #2
|
||||
add ip, r4
|
||||
ldr ip, [ip] /* load table address */
|
||||
ldr r5, =_SYSCALL_BAD
|
||||
lsl r5, #2 /* shift to match the shift we did on the call_id */
|
||||
cmp r4, r5
|
||||
bne valid_syscall
|
||||
|
||||
/* BAD SYSCALL path */
|
||||
/* fixup stack frame on unprivileged stack, adding ssf */
|
||||
/* pop registers and lr as this is a one way jump */
|
||||
mov r4, sp
|
||||
str r4, [sp, #24]
|
||||
pop {r4,r5,r6,lr}
|
||||
b dispatch_syscall
|
||||
|
||||
valid_syscall:
|
||||
/* setup priviliged stack */
|
||||
ldr r4, =_kernel
|
||||
ldr r4, [r4, #_kernel_offset_to_current]
|
||||
ldr r5, [r4, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
|
||||
ldr r6, =CONFIG_PRIVILEGED_STACK_SIZE
|
||||
add r5, r6
|
||||
|
||||
/* setup privileged stack frame */
|
||||
/* 16 bytes: arg5, arg6, ssf, 4 bytes padding */
|
||||
sub r5, #16
|
||||
ldr r6, [sp, #16]
|
||||
str r6, [r5, #0]
|
||||
ldr r6, [sp, #20]
|
||||
str r6, [r5, #4]
|
||||
mov r6, sp
|
||||
str r6, [r5, #8] /* store ssf of unprivileged stack */
|
||||
ldr r6, =0
|
||||
str r6, [r5, #12] /* store zeroed padding */
|
||||
|
||||
/* switch to privileged stack */
|
||||
msr PSP, r5
|
||||
dispatch_syscall:
|
||||
/* execute function from dispatch table */
|
||||
blx ip
|
||||
|
||||
/* set stack back to unprivileged stack */
|
||||
ldr ip, [sp,#8]
|
||||
msr PSP, ip
|
||||
|
||||
pop {r4,r5,r6,lr}
|
||||
|
||||
/* drop privileges by setting bit 0 in CONTROL */
|
||||
mrs ip, CONTROL
|
||||
orrs ip, ip, #1
|
||||
msr CONTROL, ip
|
||||
|
||||
/*
|
||||
* return back to original function that called SVC, add 1 to force thumb
|
||||
* mode
|
||||
*/
|
||||
ldr ip, [sp, #12]
|
||||
orrs ip, ip, #1
|
||||
bx ip
|
|
@ -96,6 +96,12 @@ extern void k_cpu_atomic_idle(unsigned int key);
|
|||
|
||||
extern void _IntLibInit(void);
|
||||
|
||||
|
||||
extern FUNC_NORETURN void _arm_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
u32_t stack_end,
|
||||
u32_t stack_start);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -94,6 +94,12 @@ struct _thread_arch {
|
|||
*/
|
||||
struct _preempt_float preempt_float;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
u32_t mode;
|
||||
u32_t priv_stack_start;
|
||||
u32_t priv_stack_size;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct _thread_arch _thread_arch_t;
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
(___thread_t_arch_OFFSET + ___thread_arch_t_preempt_float_OFFSET)
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#define _thread_offset_to_mode \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_mode_OFFSET)
|
||||
|
||||
#define _thread_offset_to_priv_stack_start \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
|
||||
#endif
|
||||
|
|
|
@ -103,17 +103,6 @@ extern "C" {
|
|||
#define MPU_GUARD_ALIGN_AND_SIZE 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Define alignment of a stack buffer
|
||||
*
|
||||
* This is used for two different things:
|
||||
* 1) Used in checks for stack size to be a multiple of the stack buffer
|
||||
* alignment
|
||||
* 2) Used to determine the alignment of a stack buffer
|
||||
*
|
||||
*/
|
||||
#define STACK_ALIGN max(STACK_ALIGN_SIZE, MPU_GUARD_ALIGN_AND_SIZE)
|
||||
|
||||
/**
|
||||
* @brief Declare a toplevel thread stack memory region
|
||||
*
|
||||
|
@ -134,9 +123,40 @@ extern "C" {
|
|||
* @param sym Thread stack symbol name
|
||||
* @param size Size of the stack memory region
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Define alignment of a stack buffer
|
||||
*
|
||||
* This is used for two different things:
|
||||
* 1) Used in checks for stack size to be a multiple of the stack buffer
|
||||
* alignment
|
||||
* 2) Used to determine the alignment of a stack buffer
|
||||
*
|
||||
*/
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
#define STACK_ALIGN 32
|
||||
#else
|
||||
#define STACK_ALIGN max(STACK_ALIGN_SIZE, MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Calculate power of two ceiling for a buffer size input
|
||||
*
|
||||
*/
|
||||
#define POW2_CEIL(x) ((1 << (31 - __builtin_clz(x))) < x ? \
|
||||
1 << (31 - __builtin_clz(x) + 1) : \
|
||||
1 << (31 - __builtin_clz(x)))
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Declare a toplevel array of thread stack memory regions
|
||||
|
@ -151,9 +171,18 @@ extern "C" {
|
|||
* @param nmemb Number of stacks to declare
|
||||
* @param size Size of the stack memory region
|
||||
*/
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
__aligned(POW2_CEIL(size)) \
|
||||
sym[nmemb][POW2_CEIL(size)]
|
||||
#else
|
||||
#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __kernel_noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[nmemb][size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Declare an embedded stack memory region
|
||||
|
@ -167,9 +196,16 @@ extern "C" {
|
|||
* @param sym Thread stack symbol name
|
||||
* @param size Size of the stack memory region
|
||||
*/
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(POW2_CEIL(size)) \
|
||||
sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define _ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Return the size in bytes of a stack memory region
|
||||
|
@ -178,9 +214,15 @@ extern "C" {
|
|||
* since the underlying implementation may actually create something larger
|
||||
* (for instance a guard area).
|
||||
*
|
||||
* The value returned here is guaranteed to match the 'size' parameter
|
||||
* The value returned here is NOT guaranteed to match the 'size' parameter
|
||||
* passed to K_THREAD_STACK_DEFINE and related macros.
|
||||
*
|
||||
* In the case of CONFIG_USERSPACE=y and
|
||||
* CONFIG_MPU_REQUIRES_POWER_OF_2_ALIGNMENT, the size will be larger than the
|
||||
* requested size.
|
||||
*
|
||||
* In all other configurations, the size will be correct.
|
||||
*
|
||||
* @param sym Stack memory symbol
|
||||
* @return Size of the stack
|
||||
*/
|
||||
|
@ -304,59 +346,172 @@ extern "C" {
|
|||
typedef u32_t k_mem_partition_attr_t;
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef CONFIG_ARM_USERSPACE
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifndef _ASMLANGUAGE
|
||||
/* Syscall invocation macros. arm-specific machine constraints used to ensure
|
||||
* args land in the proper registers. Currently, they are all stub functions
|
||||
* just for enabling CONFIG_USERSPACE on arm w/o errors.
|
||||
*/
|
||||
|
||||
/* Syscall invocation macros. arm-specific machine constraints used to ensure
|
||||
* args land in the proper registers.
|
||||
*/
|
||||
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t arg4, u32_t arg5, u32_t arg6,
|
||||
u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
|
||||
__asm__ volatile("sub sp, #16\n"
|
||||
"str %[a5], [sp, #0]\n"
|
||||
"str %[a6], [sp, #4]\n"
|
||||
"str %[cid], [sp, #8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
|
||||
[a5] "r" (arg5), [a6] "r" (arg6)
|
||||
: "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t arg4, u32_t arg5, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
|
||||
__asm__ volatile("sub sp, #16\n"
|
||||
"str %[a5], [sp, #0]\n"
|
||||
"str %[cid], [sp, #8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3),
|
||||
[a5] "r" (arg5)
|
||||
: "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t arg4, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
register u32_t r3 __asm__("r3") = arg4;
|
||||
|
||||
__asm__ volatile("sub sp, #16\n"
|
||||
"str %[cid], [sp,#8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2), "r" (r3)
|
||||
: "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
|
||||
u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
register u32_t r2 __asm__("r2") = arg3;
|
||||
|
||||
__asm__ volatile("sub sp, #16\n"
|
||||
"str %[cid], [sp,#8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1), "r" (r2)
|
||||
: "r3", "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
register u32_t r1 __asm__("r1") = arg2;
|
||||
|
||||
__asm__ volatile(
|
||||
"sub sp, #16\n"
|
||||
"str %[cid], [sp,#8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret), "r" (r1)
|
||||
: "r2", "r3", "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0") = arg1;
|
||||
|
||||
__asm__ volatile(
|
||||
"sub sp, #16\n"
|
||||
"str %[cid], [sp,#8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret)
|
||||
: "r1", "r2", "r3", "ip", "memory");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
|
||||
{
|
||||
return 0;
|
||||
register u32_t ret __asm__("r0");
|
||||
|
||||
__asm__ volatile(
|
||||
"sub sp, #16\n"
|
||||
"str %[cid], [sp,#8]\n"
|
||||
"svc %[svid]\n"
|
||||
"add sp, #16\n"
|
||||
: "=r"(ret)
|
||||
: [cid] "r" (call_id),
|
||||
[svid] "i" (_SVC_CALL_SYSTEM_CALL),
|
||||
"r" (ret)
|
||||
: "r1", "r2", "r3", "ip", "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int _arch_is_user_context(void)
|
||||
{
|
||||
return 0;
|
||||
u32_t value;
|
||||
|
||||
/* check for handler mode */
|
||||
__asm__ volatile("mrs %0, IPSR\n\t" : "=r"(value));
|
||||
if (value) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* if not handler mode, return mode information */
|
||||
__asm__ volatile("mrs %0, CONTROL\n\t" : "=r"(value));
|
||||
return value & 0x1;
|
||||
}
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
#endif /* CONFIG_ARM_USERSPACE */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ extern void _SysFatalErrorHandler(unsigned int reason, const NANO_ESF *esf);
|
|||
|
||||
#define _SVC_CALL_IRQ_OFFLOAD 1
|
||||
#define _SVC_CALL_RUNTIME_EXCEPT 2
|
||||
#define _SVC_CALL_SYSTEM_CALL 3
|
||||
|
||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||
/* ARMv6 will hard-fault if SVC is called with interrupts locked. Just
|
||||
|
|
|
@ -319,7 +319,10 @@ _SYSCALL_HANDLER(k_thread_create,
|
|||
new_thread_p, stack_p, stack_size, entry, p1, more_args)
|
||||
{
|
||||
int prio;
|
||||
u32_t options, delay, guard_size, total_size;
|
||||
u32_t options, delay;
|
||||
#ifndef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
u32_t guard_size, total_size;
|
||||
#endif
|
||||
struct _k_object *stack_object;
|
||||
struct k_thread *new_thread = (struct k_thread *)new_thread_p;
|
||||
volatile struct _syscall_10_args *margs =
|
||||
|
@ -334,18 +337,25 @@ _SYSCALL_HANDLER(k_thread_create,
|
|||
_OBJ_INIT_FALSE),
|
||||
"bad stack object");
|
||||
|
||||
#ifndef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Verify that the stack size passed in is OK by computing the total
|
||||
* size and comparing it with the size value in the object metadata
|
||||
*
|
||||
* We skip this check for SoCs which utilize MPUs with power of two
|
||||
* alignment requirements as the guard is allocated out of the stack
|
||||
* size and not allocated in addition to the stack size
|
||||
*/
|
||||
guard_size = (u32_t)K_THREAD_STACK_BUFFER(stack) - (u32_t)stack;
|
||||
_SYSCALL_VERIFY_MSG(!__builtin_uadd_overflow(guard_size, stack_size,
|
||||
&total_size),
|
||||
"stack size overflow (%u+%u)", stack_size,
|
||||
guard_size);
|
||||
|
||||
/* They really ought to be equal, make this more strict? */
|
||||
_SYSCALL_VERIFY_MSG(total_size <= stack_object->data,
|
||||
"stack size %u is too big, max is %u",
|
||||
total_size, stack_object->data);
|
||||
#endif
|
||||
|
||||
/* Verify the struct containing args 6-10 */
|
||||
_SYSCALL_MEMORY_READ(margs, sizeof(*margs));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue