diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 54b4427d826..5941a87ccfd 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -41,6 +41,7 @@ menu "ARCv2 Family Options" config CPU_ARCV2 bool select ARCH_HAS_STACK_PROTECTION + select ARCH_HAS_USERSPACE if ARC_CORE_MPU default y help This option signifies the use of a CPU of the ARCv2 family. diff --git a/arch/arc/core/CMakeLists.txt b/arch/arc/core/CMakeLists.txt index 6fd3c7159d8..1427e063218 100644 --- a/arch/arc/core/CMakeLists.txt +++ b/arch/arc/core/CMakeLists.txt @@ -22,3 +22,4 @@ zephyr_sources_ifdef(CONFIG_ARC_FIRQ fast_irq.S) zephyr_sources_if_kconfig(irq_offload.c) zephyr_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_CUSTOM atomic.c) add_subdirectory_ifdef(CONFIG_CPU_HAS_MPU mpu) +zephyr_sources_ifdef(CONFIG_USERSPACE userspace.S) diff --git a/arch/arc/core/fast_irq.S b/arch/arc/core/fast_irq.S index 7ce27fe6e02..51f80c9617b 100644 --- a/arch/arc/core/fast_irq.S +++ b/arch/arc/core/fast_irq.S @@ -257,21 +257,13 @@ _firq_reschedule: */ _load_callee_saved_regs -#ifdef CONFIG_MPU_STACK_GUARD +#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) push_s r2 mov r0, r2 - bl configure_mpu_stack_guard + bl configure_mpu_thread pop_s r2 #endif -#ifdef CONFIG_USERSPACE - push_s r2 - mov r0, r2 - bl configure_mpu_mem_domain - pop_s r2 -#endif - - ld_s r3, [r2, _thread_offset_to_relinquish_cause] breq r3, _CAUSE_RIRQ, _firq_return_from_rirq diff --git a/arch/arc/core/fatal.c b/arch/arc/core/fatal.c index 9ae2548277b..341616f1f61 100644 --- a/arch/arc/core/fatal.c +++ b/arch/arc/core/fatal.c @@ -84,3 +84,10 @@ FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason, for (;;) ; } + + +FUNC_NORETURN void _arch_syscall_oops(void *ssf_ptr) +{ + _SysFatalErrorHandler(_NANO_ERR_KERNEL_OOPS, ssf_ptr); + CODE_UNREACHABLE; +} diff --git a/arch/arc/core/fault_s.S b/arch/arc/core/fault_s.S index c6af71db13e..f04832fa221 100644 --- a/arch/arc/core/fault_s.S +++ b/arch/arc/core/fault_s.S @@ -47,7 +47,7 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_tlb_miss_d) SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_prot_v) SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_privilege_v) SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_swi) -#ifndef CONFIG_IRQ_OFFLOAD +#if !defined(CONFIG_IRQ_OFFLOAD) && !defined(CONFIG_USERSPACE) SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap) #endif SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_extension) @@ -109,8 +109,38 @@ exc_nest_handle: #ifdef CONFIG_IRQ_OFFLOAD GTEXT(_irq_do_offload); +#endif +#if defined(CONFIG_IRQ_OFFLOAD) || defined(CONFIG_USERSPACE) SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap) +#ifdef CONFIG_USERSPACE + /* get the id of trap_s */ + lr ilink, [_ARC_V2_ECR] + and ilink, ilink, 0x3f + cmp ilink, 0x3 + bne _do_other_trap +/* do sys_call */ + mov ilink, _SYSCALL_LIMIT + cmp r6, ilink + blt valid_syscall_id + + mov r0, r6 + mov r6, _SYSCALL_BAD + +valid_syscall_id: + lr ilink, [_ARC_V2_ERET] + push ilink + lr ilink, [_ARC_V2_ERSTATUS] + push ilink + + bclr ilink, ilink, _ARC_V2_STATUS32_U_BIT + sr ilink, [_ARC_V2_ERSTATUS] + + mov ilink, _arc_do_syscall + sr ilink, [_ARC_V2_ERET] + + rtie + /* * Before invoking exception handler, the kernel switches to an exception * stack to save the faulting thread's registers. @@ -118,6 +148,8 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap) * a diagnostic message and halt. */ +_do_other_trap: +#endif /* CONFIG_USERSPACE */ #ifdef CONFIG_ARC_STACK_CHECKING push_s r2 /* disable stack checking */ @@ -148,7 +180,9 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap) trap_nest_handle: push_s r0 +#ifdef CONFIG_IRQ_OFFLOAD jl _irq_do_offload +#endif pop sp @@ -200,5 +234,4 @@ _trap_check_for_swap: /* Assumption: r2 has current thread */ b _rirq_common_interrupt_swap - -#endif /* CONFIG_IRQ_OFFLOAD */ +#endif /* CONFIG_IRQ_OFFLOAD || CONFIG_USERSPACE */ diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S index ad5ef5ac4c4..7899386c7db 100644 --- a/arch/arc/core/isr_wrapper.S +++ b/arch/arc/core/isr_wrapper.S @@ -229,6 +229,13 @@ From RIRQ: */ SECTION_FUNC(TEXT, _isr_wrapper) +#if CONFIG_USERSPACE +/* utilize the fact that Z bit is set if interrupt taken in U mode*/ + bnz _isr_from_privilege +/* get the correct stack pointer, don't touch _ARC_V2_USER_SP in the future */ + aex sp, [_ARC_V2_USER_SP] +_isr_from_privilege: +#endif #if CONFIG_ARC_FIRQ #if CONFIG_RGF_NUM_BANKS == 1 st r0,[saved_r0] diff --git a/arch/arc/core/mpu/arc_core_mpu.c b/arch/arc/core/mpu/arc_core_mpu.c index 16882a28063..f3b554a7c20 100644 --- a/arch/arc/core/mpu/arc_core_mpu.c +++ b/arch/arc/core/mpu/arc_core_mpu.c @@ -11,6 +11,27 @@ #include #include +/* + * @brief Configure MPU for the thread + * + * This function configures per thread memory map reprogramming the MPU. + * + * @param thread thread info data structure. + */ +void configure_mpu_thread(struct k_thread *thread) +{ + arc_core_mpu_disable(); +#if defined(CONFIG_MPU_STACK_GUARD) + configure_mpu_stack_guard(thread); +#endif + +#if defined(CONFIG_USERSPACE) + configure_mpu_user_context(thread); + configure_mpu_mem_domain(thread); +#endif + arc_core_mpu_enable(); +} + #if defined(CONFIG_MPU_STACK_GUARD) /* * @brief Configure MPU stack guard @@ -22,15 +43,44 @@ */ void configure_mpu_stack_guard(struct k_thread *thread) { - arc_core_mpu_disable(); +#if defined(CONFIG_USERSPACE) + if (!thread->arch.priv_stack_start) { + /* the areas before and after the user stack of thread is + * kernel only. These area can be used as stack guard. + * ----------------------- + * | kernel only access | + * |---------------------| + * | user stack | + * |---------------------- + * | privilege stack | + * ----------------------- + */ + return; + } +#endif arc_core_mpu_configure(THREAD_STACK_GUARD_REGION, thread->stack_info.start - STACK_GUARD_SIZE, STACK_GUARD_SIZE); - arc_core_mpu_enable(); + } #endif #if defined(CONFIG_USERSPACE) +/* + * @brief Configure MPU user context + * + * This function configures the thread's user context. + * The functionality is meant to be used during context switch. + * + * @param thread thread info data structure. + */ +void configure_mpu_user_context(struct k_thread *thread) +{ + SYS_LOG_DBG("configure user thread %p's context", thread); + arc_core_mpu_configure_user_context(thread); +} + + /* * @brief Configure MPU memory domain * @@ -42,9 +92,7 @@ void configure_mpu_stack_guard(struct k_thread *thread) void configure_mpu_mem_domain(struct k_thread *thread) { SYS_LOG_DBG("configure thread %p's domain", thread); - arc_core_mpu_disable(); arc_core_mpu_configure_mem_domain(thread->mem_domain_info.mem_domain); - arc_core_mpu_enable(); } int _arch_mem_domain_max_partitions_get(void) diff --git a/arch/arc/core/mpu/arc_mpu.c b/arch/arc/core/mpu/arc_mpu.c index 1f282d91ebc..c64dfd35413 100644 --- a/arch/arc/core/mpu/arc_mpu.c +++ b/arch/arc/core/mpu/arc_mpu.c @@ -11,6 +11,7 @@ #include #include #include +#include #include @@ -66,8 +67,12 @@ static inline u8_t _get_num_regions(void) static inline u32_t _get_region_attr_by_type(u32_t type) { switch (type) { + case THREAD_STACK_USER_REGION: + return REGION_RAM_ATTR; case THREAD_STACK_REGION: - return 0; + return AUX_MPU_RDP_KW | AUX_MPU_RDP_KR; + case THREAD_APP_DATA_REGION: + return REGION_RAM_ATTR; case THREAD_STACK_GUARD_REGION: /* no Write and Execute to guard region */ return AUX_MPU_RDP_UR | AUX_MPU_RDP_KR; @@ -161,8 +166,11 @@ static inline u32_t _get_region_index_by_type(u32_t type) */ switch (type) { #if CONFIG_ARC_MPU_VER == 2 + case THREAD_STACK_USER_REGION: + return _get_num_regions() - mpu_config.num_regions + - THREAD_STACK_REGION; case THREAD_STACK_REGION: - return _get_num_regions() - mpu_config.num_regions - type; + case THREAD_APP_DATA_REGION: case THREAD_STACK_GUARD_REGION: return _get_num_regions() - mpu_config.num_regions - type; case THREAD_DOMAIN_PARTITION_REGION: @@ -176,8 +184,10 @@ static inline u32_t _get_region_index_by_type(u32_t type) return _get_num_regions() - mpu_config.num_regions - type + 1; #endif #elif CONFIG_ARC_MPU_VER == 3 + case THREAD_STACK_USER_REGION: + return mpu_config.num_regions + THREAD_STACK_REGION - 1; case THREAD_STACK_REGION: - return mpu_config.num_regions + type - 1; + case THREAD_APP_DATA_REGION: case THREAD_STACK_GUARD_REGION: return mpu_config.num_regions + type - 1; case THREAD_DOMAIN_PARTITION_REGION: @@ -417,6 +427,37 @@ void arc_core_mpu_region(u32_t index, u32_t base, u32_t size, } #if defined(CONFIG_USERSPACE) +void arc_core_mpu_configure_user_context(struct k_thread *thread) +{ + u32_t base = (u32_t)thread->stack_obj; + u32_t size = thread->stack_info.size; + + /* for kernel threads, no need to configure user context */ + if (!thread->arch.priv_stack_start) { + return; + } + + arc_core_mpu_configure(THREAD_STACK_USER_REGION, base, size); + + /* configure app data portion */ +#ifdef CONFIG_APPLICATION_MEMORY +#if CONFIG_ARC_MPU_VER == 2 + base = (u32_t)&__app_ram_start; + size = (u32_t)&__app_ram_end - (u32_t)&__app_ram_start; + + /* set up app data region if exists, otherwise disable */ + if (size > 0) { + arc_core_mpu_configure(THREAD_APP_DATA_REGION, base, size); + } +#elif CONFIG_ARC_MPU_VER == 3 + /* + * ARC MPV v3 doesn't support MPU region overlap. + * Application memory should be a static memory, defined in mpu_config + */ +#endif +#endif +} + /** * @brief configure MPU regions for the memory partitions of the memory domain * diff --git a/arch/arc/core/offsets/offsets.c b/arch/arc/core/offsets/offsets.c index fd2152431db..f9a57920689 100644 --- a/arch/arc/core/offsets/offsets.c +++ b/arch/arc/core/offsets/offsets.c @@ -79,6 +79,9 @@ GEN_OFFSET_SYM(_callee_saved_stack_t, r24); GEN_OFFSET_SYM(_callee_saved_stack_t, r25); GEN_OFFSET_SYM(_callee_saved_stack_t, r26); GEN_OFFSET_SYM(_callee_saved_stack_t, fp); +#ifdef CONFIG_USERSPACE +GEN_OFFSET_SYM(_callee_saved_stack_t, user_sp); +#endif GEN_OFFSET_SYM(_callee_saved_stack_t, r30); #ifdef CONFIG_FP_SHARING GEN_OFFSET_SYM(_callee_saved_stack_t, r58); diff --git a/arch/arc/core/regular_irq.S b/arch/arc/core/regular_irq.S index 643f34a1839..f8df077c215 100644 --- a/arch/arc/core/regular_irq.S +++ b/arch/arc/core/regular_irq.S @@ -161,17 +161,10 @@ _rirq_common_interrupt_swap: */ _load_callee_saved_regs -#ifdef CONFIG_MPU_STACK_GUARD +#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) push_s r2 mov r0, r2 - bl configure_mpu_stack_guard - pop_s r2 -#endif - -#ifdef CONFIG_USERSPACE - push_s r2 - mov r0, r2 - bl configure_mpu_mem_domain + bl configure_mpu_thread pop_s r2 #endif diff --git a/arch/arc/core/swap.S b/arch/arc/core/swap.S index 2f9ac8e8a5b..700bb4783f4 100644 --- a/arch/arc/core/swap.S +++ b/arch/arc/core/swap.S @@ -110,17 +110,10 @@ SECTION_FUNC(TEXT, __swap) _load_callee_saved_regs -#ifdef CONFIG_MPU_STACK_GUARD +#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) push_s r2 mov r0, r2 - bl configure_mpu_stack_guard - pop_s r2 -#endif - -#ifdef CONFIG_USERSPACE - push_s r2 - mov r0, r2 - bl configure_mpu_mem_domain + bl configure_mpu_thread pop_s r2 #endif diff --git a/arch/arc/core/thread.c b/arch/arc/core/thread.c index 42c763ca670..aacfcb779d6 100644 --- a/arch/arc/core/thread.c +++ b/arch/arc/core/thread.c @@ -64,14 +64,41 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stackEnd = pStackMem + stackSize; struct init_stack_frame *pInitCtx; - +#if CONFIG_USERSPACE + /* for kernel thread, the privilege stack is merged into thread stack */ + if (!(options & K_USER)) { + /* if MPU_STACK_GUARD is enabled, reserve the the stack area + * |---------------------|----------------| + * | |(MPU STACK AREA)| + * | |----------------| + * | user stack | kernel thread | + * |---------------------| stack | + * | privilege stack | | + * ---------------------------------------- + */ +#ifdef CONFIG_MPU_STACK_GUARD + pStackmem += STACK_GUARD_SIZE; + stackSize = stackSize + CONFIG_PRIVILEGED_STACK_SIZE + - STACK_GUARD_SIZE; +#endif + stackEnd += CONFIG_PRIVILEGED_STACK_SIZE; + stackSize += CONFIG_PRIVILEGED_STACK_SIZE; + } +#endif _new_thread_init(thread, pStackMem, stackSize, priority, options); /* carve the thread entry struct from the "base" of the stack */ pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd) - sizeof(struct init_stack_frame)); - +#if CONFIG_USERSPACE + if (options & K_USER) { + pInitCtx->pc = ((u32_t)_arch_user_mode_enter); + } else { + pInitCtx->pc = ((u32_t)_thread_entry_wrapper); + } +#else pInitCtx->pc = ((u32_t)_thread_entry_wrapper); +#endif pInitCtx->r0 = (u32_t)pEntry; pInitCtx->r1 = (u32_t)parameter1; pInitCtx->r2 = (u32_t)parameter2; @@ -84,12 +111,24 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, * value. */ #ifdef CONFIG_ARC_STACK_CHECKING - pInitCtx->status32 = _ARC_V2_STATUS32_SC | _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); + pInitCtx->status32 = _ARC_V2_STATUS32_SC | + _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); thread->arch.stack_base = (u32_t) stackEnd; #else pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL); #endif +#if CONFIG_USERSPACE + if (options & K_USER) { + thread->arch.priv_stack_start = (u32_t) stackEnd; + thread->arch.priv_stack_size = + (u32_t)CONFIG_PRIVILEGED_STACK_SIZE; + } else { + thread->arch.priv_stack_start = 0; + thread->arch.priv_stack_size = 0; + } +#endif + #ifdef CONFIG_THREAD_MONITOR /* * In debug mode thread->entry give direct access to the thread entry @@ -113,3 +152,17 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack, thread_monitor_init(thread); } + + +#ifdef CONFIG_USERSPACE + +FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry, + void *p1, void *p2, void *p3) +{ + _arc_userspace_enter(user_entry, p1, p2, p3, + (u32_t)_current->stack_obj, + _current->stack_info.size); + CODE_UNREACHABLE; +} + +#endif diff --git a/arch/arc/core/thread_entry_wrapper.S b/arch/arc/core/thread_entry_wrapper.S index 08769315b78..17026134392 100644 --- a/arch/arc/core/thread_entry_wrapper.S +++ b/arch/arc/core/thread_entry_wrapper.S @@ -15,7 +15,6 @@ #include GTEXT(_thread_entry_wrapper) -GTEXT(_thread_entry) /* * @brief Wrapper for _thread_entry diff --git a/arch/arc/core/userspace.S b/arch/arc/core/userspace.S new file mode 100644 index 00000000000..44f51a45b20 --- /dev/null +++ b/arch/arc/core/userspace.S @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2017 Synopsys. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include + +GTEXT(_arc_userspace_enter) +GTEXT(_arc_do_syscall) +GTEXT(_arch_is_user_context) + + +/** + * + * User space entry function + * + * This function is the entry point to user mode from privileged execution. + * The conversion is one way, and threads which transition to user mode do + * not transition back later, unless they are doing system calls. + * + */ +SECTION_FUNC(TEXT, _arc_userspace_enter) + /* + * In ARCv2, the U bit can only be set through exception return + */ + /* the end of user stack in r5 */ + add r5, r4, r5 + /* start of privilege stack */ + add r2, r5, CONFIG_PRIVILEGED_STACK_SIZE + sub r5, r5, 16 /* skip r0, r1, r2, r3 */ + +#ifdef CONFIG_INIT_STACKS + mov r0, 0xaaaaaaaa +#else + mov r0, 0x0 +#endif +_clear_user_stack: + st.ab r0, [r4, 4] + cmp r4, r5 + jlt _clear_user_stack + + lr r0, [_ARC_V2_STATUS32] + bset r0, r0, _ARC_V2_STATUS32_U_BIT + + mov r1, _thread_entry_wrapper + + /* fake exception return */ + kflag _ARC_V2_STATUS32_AE + + sr r0, [_ARC_V2_ERSTATUS] + sr r1, [_ARC_V2_ERET] + + /* when exception returns from kernel to user, sp and _ARC_V2_USER_SP + * will be switched + */ + sr r5, [_ARC_V2_USER_SP] + mov sp, r2 + + rtie + +/** + * + * Userspace system call function + * + * This function is used to do system calls from unprivileged code. This + * function is responsible for the following: + * 1) Dispatching the system call + * 2) Restoring stack and calling back to the caller of the system call + * + */ +SECTION_FUNC(TEXT, _arc_do_syscall) + /* r0-r5: arg1-arg6, r6 is call id */ + /* the call id is already checked in trap_s handler */ + push_s blink + push_s r0 + + mov r0, _k_syscall_table + ld.as blink, [r0, r6] + + pop_s r0 + + jl [blink] + + + pop_s blink + + + /* through fake exception return, go back to the caller */ + kflag _ARC_V2_STATUS32_AE + + /* the status and return addesss are saved in trap_s handler */ + pop r6 + sr r6, [_ARC_V2_ERSTATUS] + pop r6 + sr r6, [_ARC_V2_ERET] + rtie + +SECTION_FUNC(TEXT, _arch_is_user_context) + lr r0, [_ARC_V2_STATUS32] + bbit1 r0, 20, 1f + bset r1, r0, 20 + bclr r1, r1, 31 + kflag r1 + lr r1, [_ARC_V2_STATUS32] + bbit0 r1, 20, 2f + kflag r0 +1: + j_s.d [blink] + mov r0, 0 +2: + j_s.d [blink] + mov r0, 1 diff --git a/arch/arc/include/kernel_arch_data.h b/arch/arc/include/kernel_arch_data.h index 34cf9bb0fc6..de436d4fea6 100644 --- a/arch/arc/include/kernel_arch_data.h +++ b/arch/arc/include/kernel_arch_data.h @@ -129,6 +129,10 @@ struct _callee_saved_stack { u32_t r25; u32_t r26; u32_t fp; /* r27 */ + +#ifdef CONFIG_USERSPACE + u32_t user_sp; +#endif /* r28 is the stack pointer and saved separately */ /* r29 is ILINK and does not need to be saved */ u32_t r30; diff --git a/arch/arc/include/kernel_arch_func.h b/arch/arc/include/kernel_arch_func.h index 9a5808df7e6..760b9326a0f 100644 --- a/arch/arc/include/kernel_arch_func.h +++ b/arch/arc/include/kernel_arch_func.h @@ -78,6 +78,9 @@ static inline void _IntLibInit(void) /* nothing needed, here because the kernel requires it */ } +extern void _arc_userspace_enter(k_thread_entry_t user_entry, void *p1, + void *p2, void *p3, u32_t stack, u32_t size); + #endif /* _ASMLANGUAGE */ #ifdef __cplusplus diff --git a/arch/arc/include/kernel_arch_thread.h b/arch/arc/include/kernel_arch_thread.h index 8a4ddca8837..1af17e04e55 100644 --- a/arch/arc/include/kernel_arch_thread.h +++ b/arch/arc/include/kernel_arch_thread.h @@ -63,6 +63,11 @@ struct _thread_arch { */ u32_t stack_base; #endif + +#ifdef CONFIG_USERSPACE + u32_t priv_stack_start; + u32_t priv_stack_size; +#endif }; typedef struct _thread_arch _thread_arch_t; diff --git a/arch/arc/include/swap_macros.h b/arch/arc/include/swap_macros.h index f1da57c07b9..8a9c75e67ac 100644 --- a/arch/arc/include/swap_macros.h +++ b/arch/arc/include/swap_macros.h @@ -41,6 +41,11 @@ extern "C" { st r25, [sp, ___callee_saved_stack_t_r25_OFFSET] st r26, [sp, ___callee_saved_stack_t_r26_OFFSET] st fp, [sp, ___callee_saved_stack_t_fp_OFFSET] + +#ifdef CONFIG_USERSPACE + lr r13, [_ARC_V2_USER_SP] + st r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET] +#endif st r30, [sp, ___callee_saved_stack_t_r30_OFFSET] #ifdef CONFIG_FP_SHARING @@ -93,6 +98,11 @@ extern "C" { sr r13, [_ARC_V2_FPU_DPFP2H] #endif +#endif + +#ifdef CONFIG_USERSPACE + ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET] + sr r13, [_ARC_V2_USER_SP] #endif ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET] diff --git a/arch/arc/include/v2/irq.h b/arch/arc/include/v2/irq.h index 18da988cf66..7bdeeedb257 100644 --- a/arch/arc/include/v2/irq.h +++ b/arch/arc/include/v2/irq.h @@ -21,6 +21,7 @@ extern "C" { #define _ARC_V2_AUX_IRQ_CTRL_BLINK (1 << 9) #define _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS (1 << 10) +#define _ARC_V2_AUX_IRQ_CTRL_U (1 << 11) #define _ARC_V2_AUX_IRQ_CTRL_LP (1 << 13) #define _ARC_V2_AUX_IRQ_CTRL_14_REGS 7 #define _ARC_V2_AUX_IRQ_CTRL_16_REGS 8 @@ -45,6 +46,9 @@ static ALWAYS_INLINE void _irq_setup(void) _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */ #ifdef CONFIG_CODE_DENSITY _ARC_V2_AUX_IRQ_CTRL_LP | /* save code density registers */ +#endif +#ifdef CONFIG_USERSPACE + _ARC_V2_AUX_IRQ_CTRL_U | /* save context into user stack */ #endif _ARC_V2_AUX_IRQ_CTRL_BLINK | /* save blink */ _ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */ diff --git a/arch/arc/soc/em7d/CMakeLists.txt b/arch/arc/soc/em7d/CMakeLists.txt index 1ea2570a48b..0954e6599ff 100644 --- a/arch/arc/soc/em7d/CMakeLists.txt +++ b/arch/arc/soc/em7d/CMakeLists.txt @@ -12,5 +12,3 @@ zephyr_sources( soc.c soc_config.c ) - -zephyr_sources_ifdef(CONFIG_ARC_MPU_ENABLE arc_mpu_regions.c) diff --git a/arch/arc/soc/em7d/arc_mpu_regions.c b/arch/arc/soc/em7d/arc_mpu_regions.c deleted file mode 100644 index b8d23a125c6..00000000000 --- a/arch/arc/soc/em7d/arc_mpu_regions.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2017 Synopsys - * - * SPDX-License-Identifier: Apache-2.0 - */ - -#include -#include - -static struct arc_mpu_region mpu_regions[] = { -#if CONFIG_ICCM_SIZE > 0 - /* Region ICCM */ - MPU_REGION_ENTRY("ICCM", - CONFIG_ICCM_BASE_ADDRESS, - CONFIG_ICCM_SIZE * 1024, - REGION_FLASH_ATTR), -#endif -#if CONFIG_DCCM_SIZE > 0 - /* Region DCCM */ - MPU_REGION_ENTRY("DCCM", - CONFIG_DCCM_BASE_ADDRESS, - CONFIG_DCCM_SIZE * 1024, - REGION_RAM_ATTR), -#endif -#if CONFIG_SRAM_SIZE > 0 - /* Region DDR RAM */ - MPU_REGION_ENTRY("DDR RAM", - CONFIG_SRAM_BASE_ADDRESS, - CONFIG_SRAM_SIZE * 1024, - REGION_ALL_ATTR), -#endif - /* Region Peripheral */ - MPU_REGION_ENTRY("PERIPHERAL", - 0xF0000000, - 64 * 1024, - REGION_IO_ATTR), -}; - -struct arc_mpu_config mpu_config = { - .num_regions = ARRAY_SIZE(mpu_regions), - .mpu_regions = mpu_regions, -}; diff --git a/boards/arc/em_starterkit/CMakeLists.txt b/boards/arc/em_starterkit/CMakeLists.txt new file mode 100644 index 00000000000..7a9d4e7049d --- /dev/null +++ b/boards/arc/em_starterkit/CMakeLists.txt @@ -0,0 +1 @@ +zephyr_sources_ifdef(CONFIG_ARC_MPU_ENABLE arc_mpu_regions.c) \ No newline at end of file diff --git a/boards/arc/em_starterkit/arc_mpu_regions.c b/boards/arc/em_starterkit/arc_mpu_regions.c new file mode 100644 index 00000000000..65ff90ada6f --- /dev/null +++ b/boards/arc/em_starterkit/arc_mpu_regions.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2017 Synopsys + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#ifdef CONFIG_USERSPACE +static struct arc_mpu_region mpu_regions[] = { +#if CONFIG_ARC_MPU_VER == 3 && defined(CONFIG_APPLICATION_MEMORY) + /* Region ICCM */ + MPU_REGION_ENTRY("IMAGE ROM", + _image_rom_start, + _image_rom_end, + REGION_FLASH_ATTR), + MPU_REGION_ENTRY("APP MEMORY", + __app_ram_start, + __app_ram_size, + REGION_RAM_ATTR), + MPU_REGION_ENTRY("KERNEL MEMORY", + __kernel_ram_start, + __kernel_ram_size, + AUX_MPU_RDP_KW | AUX_MPU_RDP_KR), + +#else +#if CONFIG_ICCM_SIZE > 0 + /* Region ICCM */ + MPU_REGION_ENTRY("ICCM", + CONFIG_ICCM_BASE_ADDRESS, + CONFIG_ICCM_SIZE * 1024, + REGION_FLASH_ATTR), +#endif +#if CONFIG_DCCM_SIZE > 0 + /* Region DCCM */ + MPU_REGION_ENTRY("DCCM", + CONFIG_DCCM_BASE_ADDRESS, + CONFIG_DCCM_SIZE * 1024, + AUX_MPU_RDP_KW | AUX_MPU_RDP_KR), +#endif +#if CONFIG_SRAM_SIZE > 0 + /* Region DDR RAM */ + MPU_REGION_ENTRY("DDR RAM", + CONFIG_SRAM_BASE_ADDRESS, + CONFIG_SRAM_SIZE * 1024, + AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | + AUX_MPU_RDP_KE | AUX_MPU_RDP_UE), +#endif +#endif /* ARC_MPU_VER == 3 */ + /* Region Peripheral */ + MPU_REGION_ENTRY("PERIPHERAL", + 0xF0000000, + 64 * 1024, + AUX_MPU_RDP_KW | AUX_MPU_RDP_KR), +}; +#else +static struct arc_mpu_region mpu_regions[] = { +#if CONFIG_ICCM_SIZE > 0 + /* Region ICCM */ + MPU_REGION_ENTRY("ICCM", + CONFIG_ICCM_BASE_ADDRESS, + CONFIG_ICCM_SIZE * 1024, + REGION_FLASH_ATTR), +#endif +#if CONFIG_DCCM_SIZE > 0 + /* Region DCCM */ + MPU_REGION_ENTRY("DCCM", + CONFIG_DCCM_BASE_ADDRESS, + CONFIG_DCCM_SIZE * 1024, + REGION_RAM_ATTR), +#endif +#if CONFIG_SRAM_SIZE > 0 + /* Region DDR RAM */ + MPU_REGION_ENTRY("DDR RAM", + CONFIG_SRAM_BASE_ADDRESS, + CONFIG_SRAM_SIZE * 1024, + REGION_ALL_ATTR), +#endif + /* Region Peripheral */ + MPU_REGION_ENTRY("PERIPHERAL", + 0xF0000000, + 64 * 1024, + REGION_IO_ATTR), +}; +#endif + +struct arc_mpu_config mpu_config = { + .num_regions = ARRAY_SIZE(mpu_regions), + .mpu_regions = mpu_regions, +}; diff --git a/include/arch/arc/arch.h b/include/arch/arc/arch.h index c799aa75c02..f37aeef9b3f 100644 --- a/include/arch/arc/arch.h +++ b/include/arch/arc/arch.h @@ -39,7 +39,7 @@ extern "C" { #include #endif -#if defined(CONFIG_MPU_STACK_GUARD) +#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE) #if defined(CONFIG_ARC_CORE_MPU) #if CONFIG_ARC_MPU_VER == 2 /* @@ -62,22 +62,93 @@ extern "C" { #define STACK_GUARD_SIZE 0 #endif +#define STACK_SIZE_ALIGN(x) max(STACK_ALIGN, x) + + + +/** + * @brief Calculate power of two ceiling for a buffer size input + * + */ +#define POW2_CEIL(x) ((1 << (31 - __builtin_clz(x))) < x ? \ + 1 << (31 - __builtin_clz(x) + 1) : \ + 1 << (31 - __builtin_clz(x))) + +#if defined(CONFIG_USERSPACE) + +/* + * if user space is enabled, for user thread, no STACK_GUARD area; + * for kernel thread, the privilege stack can be used as STACK_GUARD + * area, and the privilege stack size must be greater than STACK_GUARD_SIZE + */ +#if defined(CONFIG_MPU_STACK_GUARD) && \ + CONFIG_PRIVILEGED_STACK_SIZE < STACK_GUARD_SIZE +#error "CONFIG_PRIVILEGED_STACK_SIZE must be larger than STACK_GUARD_SIZE" +#endif + + +#if CONFIG_ARC_MPU_VER == 2 + #define _ARCH_THREAD_STACK_DEFINE(sym, size) \ - struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \ - sym[size+STACK_GUARD_SIZE] + struct _k_thread_stack_element __kernel_noinit \ + __aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \ + sym[POW2_CEIL(STACK_SIZE_ALIGN(size)) + \ + CONFIG_PRIVILEGED_STACK_SIZE] #define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ - struct _k_thread_stack_element __noinit __aligned(STACK_ALIGN) \ - sym[nmemb][size+STACK_GUARD_SIZE] + struct _k_thread_stack_element __kernel_noinit \ + __aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \ + sym[nmemb][POW2_CEIL(STACK_SIZE_ALIGN(size)) + \ + CONFIG_PRIVILEGED_STACK_SIZE] + +#define _ARCH_THREAD_STACK_MEMBER(sym, size) \ + struct _k_thread_stack_element \ + __aligned(POW2_CEIL(STACK_SIZE_ALIGN(size))) \ + sym[POW2_CEIL(size) + CONFIG_PRIVILEGED_STACK_SIZE] + +#elif CONFIG_ARC_MPU_VER == 3 + +#define _ARCH_THREAD_STACK_DEFINE(sym, size) \ + struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \ + sym[size + CONFIG_PRIVILEGED_STACK_SIZE] + +#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ + struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \ + sym[nmemb][size + CONFIG_PRIVILEGED_STACK_SIZE] #define _ARCH_THREAD_STACK_MEMBER(sym, size) \ struct _k_thread_stack_element __aligned(STACK_ALIGN) \ - sym[size+STACK_GUARD_SIZE] + sym[size + CONFIG_PRIVILEGED_STACK_SIZE] + +#endif /* CONFIG_ARC_MPU_VER */ + +#define _ARCH_THREAD_STACK_SIZEOF(sym) \ + (sizeof(sym) - CONFIG_PRIVILEGED_STACK_SIZE) + +#define _ARCH_THREAD_STACK_BUFFER(sym) \ + ((char *)(sym)) + +#else /* CONFIG_USERSPACE */ + +#define _ARCH_THREAD_STACK_DEFINE(sym, size) \ + struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \ + sym[size + STACK_GUARD_SIZE] + +#define _ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ + struct _k_thread_stack_element __kernel_noinit __aligned(STACK_ALIGN) \ + sym[nmemb][size + STACK_GUARD_SIZE] + +#define _ARCH_THREAD_STACK_MEMBER(sym, size) \ + struct _k_thread_stack_element __aligned(STACK_ALIGN) \ + sym[size + STACK_GUARD_SIZE] #define _ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - STACK_GUARD_SIZE) #define _ARCH_THREAD_STACK_BUFFER(sym) ((char *)(sym + STACK_GUARD_SIZE)) +#endif /* CONFIG_USERSPACE */ + + #ifdef CONFIG_USERSPACE #ifdef CONFIG_ARC_MPU #ifndef _ASMLANGUAGE @@ -158,46 +229,125 @@ static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3, u32_t arg4, u32_t arg5, u32_t arg6, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r5 __asm__("r5") = arg6; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r5), "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3, u32_t arg4, u32_t arg5, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r4 __asm__("r4") = arg5; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r4), "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3, u32_t arg4, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r3 __asm__("r3") = arg4; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r3), + "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r2 __asm__("r2") = arg3; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r2), "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r1 __asm__("r1") = arg2; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r1), "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id) { - return 0; + register u32_t ret __asm__("r0") = arg1; + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6)); + + return ret; } static inline u32_t _arch_syscall_invoke0(u32_t call_id) { - return 0; + register u32_t ret __asm__("r0"); + register u32_t r6 __asm__("r6") = call_id; + + __asm__ volatile( + "trap_s %[trap_s_id]\n" + : "=r"(ret) + : [trap_s_id] "i" (_TRAP_S_CALL_SYSTEM_CALL), + "r" (ret), "r" (r6)); + + return ret; } -static inline int _arch_is_user_context(void) -{ - return 0; -} +extern int _arch_is_user_context(void); + #endif /* _ASMLANGUAGE */ #endif /* CONFIG_USERSPACE */ #ifdef __cplusplus diff --git a/include/arch/arc/v2/aux_regs.h b/include/arch/arc/v2/aux_regs.h index 70f7f85c57a..010d270c299 100644 --- a/include/arch/arc/v2/aux_regs.h +++ b/include/arch/arc/v2/aux_regs.h @@ -25,6 +25,7 @@ extern "C" { #define _ARC_V2_SEC_STAT 0x09 #define _ARC_V2_STATUS32 0x00a #define _ARC_V2_STATUS32_P0 0x00b +#define _ARC_V2_USER_SP 0x00d #define _ARC_V2_AUX_IRQ_CTRL 0x00e #define _ARC_V2_IC_IVIC 0x010 #define _ARC_V2_IC_CTRL 0x011 @@ -102,7 +103,8 @@ extern "C" { #define _ARC_V2_STATUS32_AE_BIT 5 #define _ARC_V2_STATUS32_AE (1 << _ARC_V2_STATUS32_AE_BIT) #define _ARC_V2_STATUS32_DE (1 << 6) -#define _ARC_V2_STATUS32_U (1 << 7) +#define _ARC_V2_STATUS32_U_BIT 7 +#define _ARC_V2_STATUS32_U (1 << _ARC_V2_STATUS32_U_BIT) #define _ARC_V2_STATUS32_V (1 << 8) #define _ARC_V2_STATUS32_C (1 << 9) #define _ARC_V2_STATUS32_N (1 << 10) diff --git a/include/arch/arc/v2/error.h b/include/arch/arc/v2/error.h index 2dc517c1c8f..a05a0ad93bc 100644 --- a/include/arch/arc/v2/error.h +++ b/include/arch/arc/v2/error.h @@ -33,6 +33,11 @@ extern void _SysFatalErrorHandler(unsigned int cause, const NANO_ESF *esf); #define _NANO_ERR_KERNEL_OOPS (4) /* Kernel oops (fatal to thread) */ #define _NANO_ERR_KERNEL_PANIC (5) /* Kernel panic (fatal to system) */ + +#define _TRAP_S_SCALL_IRQ_OFFLOAD 1 +#define _TRAP_S_CALL_RUNTIME_EXCEPT 2 +#define _TRAP_S_CALL_SYSTEM_CALL 3 + #ifdef __cplusplus } #endif diff --git a/include/arch/arc/v2/linker.ld b/include/arch/arc/v2/linker.ld index 56ab03603e0..769d5063f2e 100644 --- a/include/arch/arc/v2/linker.ld +++ b/include/arch/arc/v2/linker.ld @@ -37,6 +37,16 @@ #endif #endif +#ifdef CONFIG_ARC_MPU_ENABLE + #if CONFIG_ARC_MPU_VER == 2 + #define MPU_ADDR_ALIGN . = ALIGN(2048); + #elif CONFIG_ARC_MPU_VER == 3 + #define MPU_ADDR_ALIGN . = ALIGN(32); + #endif +#else + #define MPU_ADDR_ALIGN +#endif + #if defined(CONFIG_XIP) #define _DATA_IN_ROM __data_rom_start #else @@ -108,7 +118,9 @@ SECTIONS { } GROUP_LINK_IN(ROMABLE_REGION) _image_rodata_end = .; + MPU_ADDR_ALIGN _image_rom_end = .; + _image_rom_size = _image_rom_end - _image_rom_start; GROUP_END(ROMABLE_REGION) @@ -117,6 +129,7 @@ SECTIONS { #ifdef CONFIG_APPLICATION_MEMORY SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),) { + MPU_ADDR_ALIGN __app_ram_start = .; __app_data_ram_start = .; _image_ram_start = .; @@ -142,12 +155,19 @@ SECTIONS { { APP_INPUT_SECTION(.noinit) APP_INPUT_SECTION(".noinit.*") +/* + * for MPU v2,the application memory section must be aligned to the size of + * section + */ + MPU_ADDR_ALIGN } GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION) __app_ram_end = .; + __app_ram_size = __app_ram_end - __app_ram_start; #endif /* CONFIG_APPLICATION_MEMORY */ SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) { + MPU_ADDR_ALIGN /* * For performance, BSS section is assumed to be 4 byte aligned and * a multiple of 4 bytes @@ -204,6 +224,8 @@ SECTIONS { __data_ram_end = .; + MPU_ADDR_ALIGN + /* Define linker symbols */ _image_ram_end = .; _end = .; /* end of image */ diff --git a/include/arch/arc/v2/mpu/arc_core_mpu.h b/include/arch/arc/v2/mpu/arc_core_mpu.h index 9c67541eb07..89479e2717c 100644 --- a/include/arch/arc/v2/mpu/arc_core_mpu.h +++ b/include/arch/arc/v2/mpu/arc_core_mpu.h @@ -28,9 +28,11 @@ extern "C" { * be managed inside the MPU driver and not escalated. */ /* Thread Region Intent Type */ +#define THREAD_STACK_USER_REGION 0x0 #define THREAD_STACK_REGION 0x1 -#define THREAD_STACK_GUARD_REGION 0x2 -#define THREAD_DOMAIN_PARTITION_REGION 0x3 +#define THREAD_APP_DATA_REGION 0x2 +#define THREAD_STACK_GUARD_REGION 0x3 +#define THREAD_DOMAIN_PARTITION_REGION 0x4 #if defined(CONFIG_ARC_CORE_MPU) /* ARC Core MPU Driver API */ @@ -95,7 +97,7 @@ void configure_mpu_stack_guard(struct k_thread *thread); #endif #if defined(CONFIG_USERSPACE) - +void arc_core_mpu_configure_user_context(struct k_thread *thread); void arc_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain); void arc_core_mpu_mem_partition_remove(u32_t part_index); void arc_core_mpu_configure_mem_partition(u32_t part_index, @@ -112,8 +114,11 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write); * @param thread thread info data structure. */ void configure_mpu_mem_domain(struct k_thread *thread); + +void configure_mpu_user_context(struct k_thread *thread); #endif +void configure_mpu_thread(struct k_thread *thread); #ifdef __cplusplus }