arch: riscv: add memory protection support
The IRQ handler has had a major changes to manage syscall, reschedule and interrupt from user thread and stack guard. Add userspace support: - Use a global variable to know if the current execution is user or machine. The location of this variable is read only for all user thread and read/write for kernel thread. - Memory shared is supported. - Use dynamic allocation to optimize PMP slot usage. If the area size is a power of 2, only one PMP slot is used, else 2 are used. Add stack guard support: - Use MPRV bit to force PMP rules to machine mode execution. - IRQ stack have a locked stack guard to avoid re-write PMP configuration registers for each interruption and then win some cycle. - The IRQ stack is used as "temporary" stack at the beginning of IRQ handler to save current ESF. That avoid to trigger write fault on thread stack during store ESF which that call IRQ handler to infinity. - A stack guard is also setup for privileged stack of a user thread. Thread: - A PMP setup is specific to each thread. PMP setup are saved in each thread structure to improve reschedule performance. Signed-off-by: Alexandre Mergnat <amergnat@baylibre.com> Reviewed-by: Nicolas Royer <nroyer@baylibre.com>
This commit is contained in:
parent
18962e4ab8
commit
542a7fa25d
20 changed files with 1950 additions and 89 deletions
|
@ -117,10 +117,13 @@ menuconfig RISCV_PMP
|
|||
bool "RISC-V PMP Support"
|
||||
default n
|
||||
select THREAD_STACK_INFO
|
||||
select MEMORY_PROTECTION
|
||||
select MEMORY_PROTECTION if !BOARD_QEMU_RISCV32
|
||||
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
|
||||
select PMP_POWER_OF_TWO_ALIGNMENT if USERSPACE
|
||||
help
|
||||
MCU implements Physical Memory Protection.
|
||||
Memory protection against read-only area writing
|
||||
is natively supported on real HW.
|
||||
|
||||
if RISCV_PMP
|
||||
source "arch/riscv/core/pmp/Kconfig"
|
||||
|
|
|
@ -17,3 +17,4 @@ zephyr_library_sources(
|
|||
|
||||
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||
|
|
|
@ -7,9 +7,18 @@
|
|||
#include <kernel.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <inttypes.h>
|
||||
#include <exc_handle.h>
|
||||
#include <logging/log.h>
|
||||
LOG_MODULE_DECLARE(os);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_EXC_DECLARE(z_riscv_user_string_nlen);
|
||||
|
||||
static const struct z_exc_handle exceptions[] = {
|
||||
Z_EXC_HANDLE(z_riscv_user_string_nlen),
|
||||
};
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
|
||||
const z_arch_esf_t *esf)
|
||||
{
|
||||
|
@ -52,8 +61,23 @@ static char *cause_str(ulong_t cause)
|
|||
}
|
||||
}
|
||||
|
||||
FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
|
||||
void _Fault(z_arch_esf_t *esf)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/*
|
||||
* Perform an assessment whether an PMP fault shall be
|
||||
* treated as recoverable.
|
||||
*/
|
||||
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
|
||||
uint32_t start = (uint32_t)exceptions[i].start;
|
||||
uint32_t end = (uint32_t)exceptions[i].end;
|
||||
|
||||
if (esf->mepc >= start && esf->mepc < end) {
|
||||
esf->mepc = (uint32_t)exceptions[i].fixup;
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
ulong_t mcause;
|
||||
|
||||
__asm__ volatile("csrr %0, mcause" : "=r" (mcause));
|
||||
|
@ -63,3 +87,30 @@ FUNC_NORETURN void _Fault(const z_arch_esf_t *esf)
|
|||
|
||||
z_riscv_fatal_error(K_ERR_CPU_EXCEPTION, esf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
FUNC_NORETURN void arch_syscall_oops(void *ssf_ptr)
|
||||
{
|
||||
user_fault(K_ERR_KERNEL_OOPS);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
void z_impl_user_fault(unsigned int reason)
|
||||
{
|
||||
z_arch_esf_t *oops_esf = _current->syscall_frame;
|
||||
|
||||
if (((_current->base.user_options & K_USER) != 0) &&
|
||||
reason != K_ERR_STACK_CHK_FAIL) {
|
||||
reason = K_ERR_KERNEL_OOPS;
|
||||
}
|
||||
z_riscv_fatal_error(reason, oops_esf);
|
||||
}
|
||||
|
||||
static void z_vrfy_user_fault(unsigned int reason)
|
||||
{
|
||||
z_impl_user_fault(reason);
|
||||
}
|
||||
|
||||
#include <syscalls/user_fault_mrsh.c>
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
|
||||
* Copyright (c) 2018 Foundries.io Ltd
|
||||
* Copyright (c) 2020 BayLibre, SAS
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
@ -11,6 +12,8 @@
|
|||
#include <arch/cpu.h>
|
||||
#include <sys/util.h>
|
||||
#include <kernel.h>
|
||||
#include <syscall.h>
|
||||
#include <arch/riscv/csr.h>
|
||||
|
||||
/* Convenience macros for loading/storing register states. */
|
||||
|
||||
|
@ -66,6 +69,156 @@
|
|||
fscsr x0, t2 ;\
|
||||
DO_FP_CALLEE_SAVED(RV_OP_LOADFPREG, reg)
|
||||
|
||||
#define COPY_ESF_FP_STATE(to_reg, from_reg, temp) \
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fp_state_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fp_state_OFFSET(to_reg) ;
|
||||
|
||||
#define COPY_ESF_FP(to_reg, from_reg, temp) \
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft0_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft0_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft1_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft1_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft2_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft2_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft3_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft3_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft4_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft4_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft5_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft5_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft6_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft6_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft7_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft7_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft8_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft8_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft9_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft9_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft10_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft10_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ft11_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ft11_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa0_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa0_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa1_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa1_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa2_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa2_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa3_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa3_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa4_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa4_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa5_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa5_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa6_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa6_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_fa7_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_fa7_OFFSET(to_reg) ;
|
||||
|
||||
#define COPY_ESF(to_reg, from_reg, temp) \
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_mepc_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_mepc_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_mstatus_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_mstatus_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_ra_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_ra_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_gp_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_gp_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_tp_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_tp_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t0_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t0_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t1_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t1_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t2_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t2_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t3_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t3_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t4_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t4_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t5_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t5_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_t6_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_t6_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a0_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a0_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a1_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a1_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a2_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a2_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a3_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a3_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a4_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a4_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a5_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a5_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a6_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a6_OFFSET(to_reg) ;\
|
||||
RV_OP_LOADREG temp, __z_arch_esf_t_a7_OFFSET(from_reg) ;\
|
||||
RV_OP_STOREREG temp, __z_arch_esf_t_a7_OFFSET(to_reg) ;
|
||||
|
||||
#define DO_CALLEE_SAVED(op, reg) \
|
||||
op s0, _thread_offset_to_s0(reg) ;\
|
||||
op s1, _thread_offset_to_s1(reg) ;\
|
||||
op s2, _thread_offset_to_s2(reg) ;\
|
||||
op s3, _thread_offset_to_s3(reg) ;\
|
||||
op s4, _thread_offset_to_s4(reg) ;\
|
||||
op s5, _thread_offset_to_s5(reg) ;\
|
||||
op s6, _thread_offset_to_s6(reg) ;\
|
||||
op s7, _thread_offset_to_s7(reg) ;\
|
||||
op s8, _thread_offset_to_s8(reg) ;\
|
||||
op s9, _thread_offset_to_s9(reg) ;\
|
||||
op s10, _thread_offset_to_s10(reg) ;\
|
||||
op s11, _thread_offset_to_s11(reg) ;
|
||||
|
||||
#define STORE_CALLEE_SAVED(reg) \
|
||||
DO_CALLEE_SAVED(RV_OP_STOREREG, reg)
|
||||
|
||||
#define LOAD_CALLER_SAVED(reg) \
|
||||
DO_CALLEE_SAVED(RV_OP_LOADREG, reg)
|
||||
|
||||
#define DO_CALLER_SAVED(op) \
|
||||
op ra, __z_arch_esf_t_ra_OFFSET(sp) ;\
|
||||
op gp, __z_arch_esf_t_gp_OFFSET(sp) ;\
|
||||
op tp, __z_arch_esf_t_tp_OFFSET(sp) ;\
|
||||
op t0, __z_arch_esf_t_t0_OFFSET(sp) ;\
|
||||
op t1, __z_arch_esf_t_t1_OFFSET(sp) ;\
|
||||
op t2, __z_arch_esf_t_t2_OFFSET(sp) ;\
|
||||
op t3, __z_arch_esf_t_t3_OFFSET(sp) ;\
|
||||
op t4, __z_arch_esf_t_t4_OFFSET(sp) ;\
|
||||
op t5, __z_arch_esf_t_t5_OFFSET(sp) ;\
|
||||
op t6, __z_arch_esf_t_t6_OFFSET(sp) ;\
|
||||
op a0, __z_arch_esf_t_a0_OFFSET(sp) ;\
|
||||
op a1, __z_arch_esf_t_a1_OFFSET(sp) ;\
|
||||
op a2, __z_arch_esf_t_a2_OFFSET(sp) ;\
|
||||
op a3, __z_arch_esf_t_a3_OFFSET(sp) ;\
|
||||
op a4, __z_arch_esf_t_a4_OFFSET(sp) ;\
|
||||
op a5, __z_arch_esf_t_a5_OFFSET(sp) ;\
|
||||
op a6, __z_arch_esf_t_a6_OFFSET(sp) ;\
|
||||
op a7, __z_arch_esf_t_a7_OFFSET(sp) ;
|
||||
|
||||
#define STORE_CALLER_SAVED() \
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF ;\
|
||||
DO_CALLER_SAVED(RV_OP_STOREREG) ;
|
||||
|
||||
#define LOAD_CALLEE_SAVED() \
|
||||
DO_CALLER_SAVED(RV_OP_LOADREG) ;\
|
||||
addi sp, sp, __z_arch_esf_t_SIZEOF ;
|
||||
|
||||
/*
|
||||
* @brief Check previous mode.
|
||||
*
|
||||
* @param ret Register to return value.
|
||||
* @param temp Register used foor temporary value.
|
||||
*
|
||||
* @return 0 if previous mode is user.
|
||||
*/
|
||||
#define WAS_NOT_USER(ret, temp) \
|
||||
RV_OP_LOADREG ret, __z_arch_esf_t_mstatus_OFFSET(sp) ;\
|
||||
li temp, MSTATUS_MPP ;\
|
||||
and ret, ret, temp ;
|
||||
|
||||
|
||||
/* imports */
|
||||
GDATA(_sw_isr_table)
|
||||
GTEXT(__soc_is_irq)
|
||||
|
@ -89,6 +242,18 @@ GTEXT(sys_trace_isr_enter)
|
|||
GTEXT(_offload_routine)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
GTEXT(z_riscv_do_syscall)
|
||||
GTEXT(z_riscv_configure_user_allowed_stack)
|
||||
GTEXT(z_interrupt_stacks)
|
||||
GTEXT(z_riscv_do_syscall_start)
|
||||
GTEXT(z_riscv_do_syscall_end)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
GTEXT(z_riscv_configure_stack_guard)
|
||||
#endif
|
||||
|
||||
/* exports */
|
||||
GTEXT(__irq_wrapper)
|
||||
|
||||
|
@ -121,28 +286,19 @@ GTEXT(__irq_wrapper)
|
|||
* switching or IRQ offloading (when enabled).
|
||||
*/
|
||||
SECTION_FUNC(exception.entry, __irq_wrapper)
|
||||
/* Allocate space on thread stack to save registers */
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
|
||||
/* Save caller-saved registers on current thread stack. */
|
||||
RV_OP_STOREREG ra, __z_arch_esf_t_ra_OFFSET(sp)
|
||||
RV_OP_STOREREG gp, __z_arch_esf_t_gp_OFFSET(sp)
|
||||
RV_OP_STOREREG tp, __z_arch_esf_t_tp_OFFSET(sp)
|
||||
RV_OP_STOREREG t0, __z_arch_esf_t_t0_OFFSET(sp)
|
||||
RV_OP_STOREREG t1, __z_arch_esf_t_t1_OFFSET(sp)
|
||||
RV_OP_STOREREG t2, __z_arch_esf_t_t2_OFFSET(sp)
|
||||
RV_OP_STOREREG t3, __z_arch_esf_t_t3_OFFSET(sp)
|
||||
RV_OP_STOREREG t4, __z_arch_esf_t_t4_OFFSET(sp)
|
||||
RV_OP_STOREREG t5, __z_arch_esf_t_t5_OFFSET(sp)
|
||||
RV_OP_STOREREG t6, __z_arch_esf_t_t6_OFFSET(sp)
|
||||
RV_OP_STOREREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
RV_OP_STOREREG a1, __z_arch_esf_t_a1_OFFSET(sp)
|
||||
RV_OP_STOREREG a2, __z_arch_esf_t_a2_OFFSET(sp)
|
||||
RV_OP_STOREREG a3, __z_arch_esf_t_a3_OFFSET(sp)
|
||||
RV_OP_STOREREG a4, __z_arch_esf_t_a4_OFFSET(sp)
|
||||
RV_OP_STOREREG a5, __z_arch_esf_t_a5_OFFSET(sp)
|
||||
RV_OP_STOREREG a6, __z_arch_esf_t_a6_OFFSET(sp)
|
||||
RV_OP_STOREREG a7, __z_arch_esf_t_a7_OFFSET(sp)
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/* Jump at the beginning of IRQ stack to avoid stack overflow */
|
||||
csrrw sp, mscratch, sp
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
/*
|
||||
* Save caller-saved registers on current thread stack.
|
||||
* NOTE: need to be updated to account for floating-point registers
|
||||
* floating-point registers should be accounted for when corresponding
|
||||
* config variable is set
|
||||
*/
|
||||
STORE_CALLER_SAVED()
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
/* Assess whether floating-point registers need to be saved. */
|
||||
|
@ -155,7 +311,7 @@ SECTION_FUNC(exception.entry, __irq_wrapper)
|
|||
STORE_FP_CALLER_SAVED(sp)
|
||||
|
||||
skip_store_fp_caller_saved:
|
||||
#endif
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
/* Save MEPC register */
|
||||
csrr t0, mepc
|
||||
|
@ -171,6 +327,44 @@ skip_store_fp_caller_saved:
|
|||
jal ra, __soc_save_context
|
||||
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Check if we are in user stack by checking previous privilege mode */
|
||||
WAS_NOT_USER(t0, t1)
|
||||
bnez t0, is_priv_sp
|
||||
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
|
||||
/* Save user stack pointer */
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
csrr t2, mscratch
|
||||
#else
|
||||
mv t2, sp
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
RV_OP_STOREREG t2, _thread_offset_to_user_sp(t1)
|
||||
/*
|
||||
* Save callee-saved registers of user thread here
|
||||
* because rescheduling will occur in nested ecall,
|
||||
* that mean these registers will be out of context
|
||||
* at reschedule time.
|
||||
*/
|
||||
STORE_CALLEE_SAVED(t1)
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
/* Assess whether floating-point registers need to be saved. */
|
||||
RV_OP_LOADREG t2, _thread_offset_to_user_options(t1)
|
||||
andi t2, t2, K_FP_REGS
|
||||
beqz t2, skip_store_fp_callee_saved_user
|
||||
STORE_FP_CALLEE_SAVED(t1)
|
||||
skip_store_fp_callee_saved_user:
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
is_priv_sp:
|
||||
/* Clear user mode variable */
|
||||
la t0, is_user_mode
|
||||
sb zero, 0x00(t0)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/*
|
||||
* Check if exception is the result of an interrupt or not.
|
||||
* (SOC dependent). Following the RISC-V architecture spec, the MSB
|
||||
|
@ -187,6 +381,12 @@ skip_store_fp_caller_saved:
|
|||
addi t1, x0, 0
|
||||
bnez a0, is_interrupt
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Reset IRQ flag */
|
||||
la t1, irq_flag
|
||||
sb zero, 0x00(t1)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/*
|
||||
* If the exception is the result of an ECALL, check whether to
|
||||
* perform a context-switch or an IRQ offload. Otherwise call _Fault
|
||||
|
@ -198,23 +398,54 @@ skip_store_fp_caller_saved:
|
|||
li t1, SOC_MCAUSE_ECALL_EXP
|
||||
|
||||
/*
|
||||
* If mcause == SOC_MCAUSE_ECALL_EXP, handle system call,
|
||||
* otherwise handle fault
|
||||
* If mcause == SOC_MCAUSE_ECALL_EXP, handle system call from
|
||||
* kernel thread.
|
||||
*/
|
||||
beq t0, t1, is_syscall
|
||||
beq t0, t1, is_kernel_syscall
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
li t1, SOC_MCAUSE_USER_ECALL_EXP
|
||||
|
||||
/*
|
||||
* If mcause == SOC_MCAUSE_USER_ECALL_EXP, handle system call from
|
||||
* user thread, otherwise handle fault.
|
||||
*/
|
||||
beq t0, t1, is_user_syscall
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/*
|
||||
* Call _Fault to handle exception.
|
||||
* Stack pointer is pointing to a z_arch_esf_t structure, pass it
|
||||
* to _Fault (via register a0).
|
||||
* If _Fault shall return, set return address to no_reschedule
|
||||
* to restore stack.
|
||||
* If _Fault shall return, set return address to
|
||||
* no_reschedule to restore stack.
|
||||
*/
|
||||
addi a0, sp, 0
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
la ra, no_reschedule_from_fault
|
||||
/* Switch to privilege stack */
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1)
|
||||
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */
|
||||
addi sp, t0, CONFIG_PRIVILEGED_STACK_SIZE
|
||||
#else
|
||||
la ra, no_reschedule
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
tail _Fault
|
||||
|
||||
is_syscall:
|
||||
is_kernel_syscall:
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Check if it is a return from user syscall */
|
||||
csrr t0, mepc
|
||||
la t1, z_riscv_do_syscall_start
|
||||
bltu t0, t1, not_user_syscall
|
||||
la t1, z_riscv_do_syscall_end
|
||||
bleu t0, t1, return_from_syscall
|
||||
not_user_syscall:
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
/*
|
||||
* A syscall is the result of an ecall instruction, in which case the
|
||||
* MEPC will contain the address of the ecall instruction.
|
||||
|
@ -238,14 +469,186 @@ is_syscall:
|
|||
la t0, _offload_routine
|
||||
RV_OP_LOADREG t1, 0x00(t0)
|
||||
bnez t1, is_interrupt
|
||||
#endif
|
||||
#endif /* CONFIG_IRQ_OFFLOAD */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
li t0, MSTATUS_MPRV
|
||||
csrs mstatus, t0
|
||||
|
||||
/* Move to current thread SP and move ESF */
|
||||
csrrw sp, mscratch, sp
|
||||
csrr t0, mscratch
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0)
|
||||
beqz t1, skip_fp_move_kernel_syscall
|
||||
COPY_ESF_FP(sp, t0, t1)
|
||||
skip_fp_move_kernel_syscall:
|
||||
COPY_ESF_FP_STATE(sp, t0, t1)
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
COPY_ESF(sp, t0, t1)
|
||||
addi t0, t0, __z_arch_esf_t_SIZEOF
|
||||
csrw mscratch, t0
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/*
|
||||
* Check for forced syscall,
|
||||
* otherwise go to reschedule to handle context-switch
|
||||
*/
|
||||
li t0, FORCE_SYSCALL_ID
|
||||
bne a7, t0, reschedule
|
||||
|
||||
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
|
||||
/* Check for user_mode_enter function */
|
||||
la t0, arch_user_mode_enter
|
||||
bne t0, a0, reschedule
|
||||
|
||||
RV_OP_LOADREG a0, __z_arch_esf_t_a1_OFFSET(sp)
|
||||
RV_OP_LOADREG a1, __z_arch_esf_t_a2_OFFSET(sp)
|
||||
RV_OP_LOADREG a2, __z_arch_esf_t_a3_OFFSET(sp)
|
||||
RV_OP_LOADREG a3, __z_arch_esf_t_a4_OFFSET(sp)
|
||||
|
||||
/*
|
||||
* MRET will be done in the following function because
|
||||
* restore caller-saved registers is not need anymore
|
||||
* due to user mode jump (new stack/context).
|
||||
*/
|
||||
j z_riscv_user_mode_enter_syscall
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
/*
|
||||
* Go to reschedule to handle context-switch
|
||||
*/
|
||||
j reschedule
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
is_user_syscall:
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG a0, _kernel_offset_to_current(t0)
|
||||
jal ra, z_riscv_configure_stack_guard
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
/*
|
||||
* A syscall is the result of an ecall instruction, in which case the
|
||||
* MEPC will contain the address of the ecall instruction.
|
||||
* Increment saved MEPC by 4 to prevent triggering the same ecall
|
||||
* again upon exiting the ISR.
|
||||
*
|
||||
* It is safe to always increment by 4, even with compressed
|
||||
* instructions, because the ecall instruction is always 4 bytes.
|
||||
*/
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
|
||||
addi t1, t1, 4
|
||||
RV_OP_STOREREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/*
|
||||
* Copy ESF to user stack in case of rescheduling
|
||||
* directly from kernel ECALL (nested ECALL)
|
||||
*/
|
||||
csrrw sp, mscratch, sp
|
||||
csrr t0, mscratch
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0)
|
||||
beqz t1, skip_fp_copy_user_syscall
|
||||
COPY_ESF_FP(sp, t0, t1)
|
||||
skip_fp_copy_user_syscall:
|
||||
COPY_ESF_FP_STATE(sp, t0, t1)
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
COPY_ESF(sp, t0, t1)
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
/* Restore argument registers from user stack */
|
||||
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
|
||||
RV_OP_LOADREG a2, __z_arch_esf_t_a2_OFFSET(sp)
|
||||
RV_OP_LOADREG a3, __z_arch_esf_t_a3_OFFSET(sp)
|
||||
RV_OP_LOADREG a4, __z_arch_esf_t_a4_OFFSET(sp)
|
||||
RV_OP_LOADREG a5, __z_arch_esf_t_a5_OFFSET(sp)
|
||||
mv a6, sp
|
||||
RV_OP_LOADREG a7, __z_arch_esf_t_a7_OFFSET(sp)
|
||||
|
||||
/* Switch to privilege stack */
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1)
|
||||
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */
|
||||
addi sp, t0, CONFIG_PRIVILEGED_STACK_SIZE
|
||||
|
||||
/* validate syscall limit */
|
||||
li t0, K_SYSCALL_LIMIT
|
||||
bltu a7, t0, valid_syscall_id
|
||||
|
||||
/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
|
||||
mv a0, a7
|
||||
li a7, K_SYSCALL_BAD
|
||||
|
||||
valid_syscall_id:
|
||||
|
||||
/* Prepare to jump into do_syscall function */
|
||||
la t0, z_riscv_do_syscall
|
||||
csrw mepc, t0
|
||||
|
||||
/* Force kernel mode for syscall execution */
|
||||
li t0, MSTATUS_MPP
|
||||
csrs mstatus, t0
|
||||
SOC_ERET
|
||||
|
||||
return_from_syscall:
|
||||
/*
|
||||
* Retrieve a0 (returned value) from privilege stack
|
||||
* (or IRQ stack if stack guard is enabled).
|
||||
*/
|
||||
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
|
||||
no_reschedule_from_fault:
|
||||
/* Restore User SP */
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
RV_OP_LOADREG sp, _thread_offset_to_user_sp(t1)
|
||||
|
||||
/* Update a0 (return value) */
|
||||
RV_OP_STOREREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/* Move to IRQ stack start */
|
||||
csrw mscratch, sp /* Save user sp */
|
||||
la t2, z_interrupt_stacks
|
||||
li t3, CONFIG_ISR_STACK_SIZE
|
||||
add sp, t2, t3
|
||||
|
||||
/*
|
||||
* Copy ESF to IRQ stack from user stack
|
||||
* to execute "no_reschedule" properly.
|
||||
*/
|
||||
csrr t0, mscratch
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0)
|
||||
beqz t1, skip_fp_copy_return_user_syscall
|
||||
COPY_ESF_FP(sp, t0, t1)
|
||||
skip_fp_copy_return_user_syscall:
|
||||
COPY_ESF_FP_STATE(sp, t0, t1)
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
COPY_ESF(sp, t0, t1)
|
||||
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
j no_reschedule
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
is_interrupt:
|
||||
#ifdef CONFIG_USERSPACE
|
||||
la t0, irq_flag
|
||||
li t2, 0x1
|
||||
sb t2, 0x00(t0)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#if (CONFIG_USERSPACE == 0) && (CONFIG_PMP_STACK_GUARD == 0)
|
||||
/*
|
||||
* Save current thread stack pointer and switch
|
||||
* stack pointer to interrupt stack.
|
||||
|
@ -264,6 +667,9 @@ is_interrupt:
|
|||
*/
|
||||
addi sp, sp, -16
|
||||
RV_OP_STOREREG t0, 0x00(sp)
|
||||
#else
|
||||
la t2, _kernel
|
||||
#endif /* !CONFIG_USERSPACE && !CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
on_irq_stack:
|
||||
/* Increment _kernel.cpus[0].nested variable */
|
||||
|
@ -329,9 +735,11 @@ on_thread_stack:
|
|||
addi t2, t2, -1
|
||||
sw t2, _kernel_offset_to_nested(t1)
|
||||
|
||||
#if !defined(CONFIG_USERSPACE) && !defined(CONFIG_PMP_STACK_GUARD)
|
||||
/* Restore thread stack pointer */
|
||||
RV_OP_LOADREG t0, 0x00(sp)
|
||||
addi sp, t0, 0
|
||||
#endif /* !CONFIG_USERSPACE && !CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
call z_check_stack_sentinel
|
||||
|
@ -356,7 +764,54 @@ on_thread_stack:
|
|||
j no_reschedule
|
||||
#endif /* CONFIG_PREEMPT_ENABLED */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
RV_OP_LOADREG a0, _kernel_offset_to_current(t1)
|
||||
jal ra, z_riscv_configure_stack_guard
|
||||
|
||||
/*
|
||||
* Move to saved SP and move ESF to retrieve it
|
||||
* after reschedule.
|
||||
*/
|
||||
csrrw sp, mscratch, sp
|
||||
csrr t0, mscratch
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0)
|
||||
beqz t1, skip_fp_move_irq
|
||||
COPY_ESF_FP(sp, t0, t1)
|
||||
skip_fp_move_irq:
|
||||
COPY_ESF_FP_STATE(sp, t0, t1)
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
COPY_ESF(sp, t0, t1)
|
||||
addi t0, t0, __z_arch_esf_t_SIZEOF
|
||||
csrw mscratch, t0
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Check if we are in user thread */
|
||||
WAS_NOT_USER(t3, t4)
|
||||
bnez t3, reschedule
|
||||
|
||||
/*
|
||||
* Switch to privilege stack because we want
|
||||
* this starting point after reschedule.
|
||||
*/
|
||||
RV_OP_LOADREG t3, _thread_offset_to_priv_stack_start(t2)
|
||||
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t2) /* Save user SP */
|
||||
mv t0, sp
|
||||
addi sp, t3, CONFIG_PRIVILEGED_STACK_SIZE
|
||||
|
||||
/*
|
||||
* Copy Saved ESF to priv stack, that will allow us to know during
|
||||
* rescheduling if the thread was working on user mode.
|
||||
*/
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
COPY_ESF(sp, t0, t1)
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
reschedule:
|
||||
|
||||
#if CONFIG_TRACING
|
||||
call sys_trace_thread_switched_out
|
||||
#endif
|
||||
|
@ -366,22 +821,20 @@ reschedule:
|
|||
/* Get pointer to _kernel.current */
|
||||
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/*
|
||||
* Save callee-saved registers of current thread
|
||||
* Check the thread mode and skip callee saved storing
|
||||
* because it is already done for user
|
||||
*/
|
||||
WAS_NOT_USER(t6, t4)
|
||||
beqz t6, skip_callee_saved_reg
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/*
|
||||
* Save callee-saved registers of current kernel thread
|
||||
* prior to handle context-switching
|
||||
*/
|
||||
RV_OP_STOREREG s0, _thread_offset_to_s0(t1)
|
||||
RV_OP_STOREREG s1, _thread_offset_to_s1(t1)
|
||||
RV_OP_STOREREG s2, _thread_offset_to_s2(t1)
|
||||
RV_OP_STOREREG s3, _thread_offset_to_s3(t1)
|
||||
RV_OP_STOREREG s4, _thread_offset_to_s4(t1)
|
||||
RV_OP_STOREREG s5, _thread_offset_to_s5(t1)
|
||||
RV_OP_STOREREG s6, _thread_offset_to_s6(t1)
|
||||
RV_OP_STOREREG s7, _thread_offset_to_s7(t1)
|
||||
RV_OP_STOREREG s8, _thread_offset_to_s8(t1)
|
||||
RV_OP_STOREREG s9, _thread_offset_to_s9(t1)
|
||||
RV_OP_STOREREG s10, _thread_offset_to_s10(t1)
|
||||
RV_OP_STOREREG s11, _thread_offset_to_s11(t1)
|
||||
STORE_CALLEE_SAVED(t1)
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
/* Assess whether floating-point registers need to be saved. */
|
||||
|
@ -391,7 +844,20 @@ reschedule:
|
|||
STORE_FP_CALLEE_SAVED(t1)
|
||||
|
||||
skip_store_fp_callee_saved:
|
||||
#endif
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
skip_callee_saved_reg:
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/*
|
||||
* Reset mscratch value because is simpler
|
||||
* than remove user ESF, and prevent unknown corner cases
|
||||
*/
|
||||
la t2, z_interrupt_stacks
|
||||
li t3, CONFIG_ISR_STACK_SIZE
|
||||
add t2, t2, t3
|
||||
csrw mscratch, t2
|
||||
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
/*
|
||||
* Save stack pointer of current thread and set the default return value
|
||||
|
@ -414,18 +880,7 @@ skip_store_fp_callee_saved:
|
|||
RV_OP_LOADREG sp, _thread_offset_to_sp(t1)
|
||||
|
||||
/* Restore callee-saved registers of new thread */
|
||||
RV_OP_LOADREG s0, _thread_offset_to_s0(t1)
|
||||
RV_OP_LOADREG s1, _thread_offset_to_s1(t1)
|
||||
RV_OP_LOADREG s2, _thread_offset_to_s2(t1)
|
||||
RV_OP_LOADREG s3, _thread_offset_to_s3(t1)
|
||||
RV_OP_LOADREG s4, _thread_offset_to_s4(t1)
|
||||
RV_OP_LOADREG s5, _thread_offset_to_s5(t1)
|
||||
RV_OP_LOADREG s6, _thread_offset_to_s6(t1)
|
||||
RV_OP_LOADREG s7, _thread_offset_to_s7(t1)
|
||||
RV_OP_LOADREG s8, _thread_offset_to_s8(t1)
|
||||
RV_OP_LOADREG s9, _thread_offset_to_s9(t1)
|
||||
RV_OP_LOADREG s10, _thread_offset_to_s10(t1)
|
||||
RV_OP_LOADREG s11, _thread_offset_to_s11(t1)
|
||||
LOAD_CALLER_SAVED(t1)
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
/* Determine if we need to restore floating-point registers. */
|
||||
|
@ -445,13 +900,107 @@ skip_store_fp_callee_saved:
|
|||
LOAD_FP_CALLEE_SAVED(t1)
|
||||
|
||||
skip_load_fp_callee_saved:
|
||||
#endif
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
mv a0, t1 /* kernel current */
|
||||
jal ra, z_riscv_configure_stack_guard
|
||||
#endif // CONFIG_PMP_STACK_GUARD
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* t0 still reference to _kernel */
|
||||
/* t1 still pointer to _kernel.current */
|
||||
|
||||
/* Check the thread mode */
|
||||
WAS_NOT_USER(t2, t4)
|
||||
bnez t2, kernel_swap
|
||||
|
||||
/* Switch to user stack */
|
||||
RV_OP_LOADREG sp, _thread_offset_to_user_sp(t1)
|
||||
|
||||
/* Setup User allowed stack */
|
||||
li t0, MSTATUS_MPRV
|
||||
csrc mstatus, t0
|
||||
mv a0, t1
|
||||
jal ra, z_riscv_configure_user_allowed_stack
|
||||
|
||||
/* Set user mode variable */
|
||||
li t2, 0x1
|
||||
la t3, is_user_mode
|
||||
sb t2, 0x00(t3)
|
||||
|
||||
kernel_swap:
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#if CONFIG_TRACING
|
||||
call sys_trace_thread_switched_in
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
||||
/* Restore context at SOC level */
|
||||
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
|
||||
jal ra, __soc_restore_context
|
||||
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
||||
|
||||
/* Restore MEPC register */
|
||||
RV_OP_LOADREG t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
||||
csrw mepc, t0
|
||||
|
||||
/* Restore SOC-specific MSTATUS register */
|
||||
RV_OP_LOADREG t0, __z_arch_esf_t_mstatus_OFFSET(sp)
|
||||
csrw mstatus, t0
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
/*
|
||||
* Determine if we need to restore floating-point registers. This needs
|
||||
* to happen before restoring integer registers to avoid stomping on
|
||||
* t0.
|
||||
*/
|
||||
RV_OP_LOADREG t0, __z_arch_esf_t_fp_state_OFFSET(sp)
|
||||
beqz t0, skip_load_fp_caller_saved_resched
|
||||
LOAD_FP_CALLER_SAVED(sp)
|
||||
|
||||
skip_load_fp_caller_saved_resched:
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
/* Restore caller-saved registers from thread stack */
|
||||
LOAD_CALLEE_SAVED()
|
||||
|
||||
/* Call SOC_ERET to exit ISR */
|
||||
SOC_ERET
|
||||
|
||||
no_reschedule:
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
/* Check if we are in user thread */
|
||||
WAS_NOT_USER(t2, t4)
|
||||
bnez t2, no_enter_user
|
||||
|
||||
li t0, MSTATUS_MPRV
|
||||
csrc mstatus, t0
|
||||
|
||||
la t0, _kernel
|
||||
RV_OP_LOADREG a0, _kernel_offset_to_current(t0)
|
||||
jal ra, z_riscv_configure_user_allowed_stack
|
||||
|
||||
/* Set user mode variable */
|
||||
li t1, 0x1
|
||||
la t0, is_user_mode
|
||||
sb t1, 0x00(t0)
|
||||
|
||||
la t0, irq_flag
|
||||
lb t0, 0x00(t0)
|
||||
bnez t0, no_enter_user
|
||||
|
||||
/* Clear ESF saved in User Stack */
|
||||
csrr t0, mscratch
|
||||
addi t0, t0, __z_arch_esf_t_SIZEOF
|
||||
csrw mscratch, t0
|
||||
|
||||
no_enter_user:
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
||||
/* Restore context at SOC level */
|
||||
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
|
||||
|
@ -477,30 +1026,13 @@ no_reschedule:
|
|||
LOAD_FP_CALLER_SAVED(sp)
|
||||
|
||||
skip_load_fp_caller_saved:
|
||||
#endif
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
/* Restore caller-saved registers from thread stack */
|
||||
RV_OP_LOADREG ra, __z_arch_esf_t_ra_OFFSET(sp)
|
||||
RV_OP_LOADREG gp, __z_arch_esf_t_gp_OFFSET(sp)
|
||||
RV_OP_LOADREG tp, __z_arch_esf_t_tp_OFFSET(sp)
|
||||
RV_OP_LOADREG t0, __z_arch_esf_t_t0_OFFSET(sp)
|
||||
RV_OP_LOADREG t1, __z_arch_esf_t_t1_OFFSET(sp)
|
||||
RV_OP_LOADREG t2, __z_arch_esf_t_t2_OFFSET(sp)
|
||||
RV_OP_LOADREG t3, __z_arch_esf_t_t3_OFFSET(sp)
|
||||
RV_OP_LOADREG t4, __z_arch_esf_t_t4_OFFSET(sp)
|
||||
RV_OP_LOADREG t5, __z_arch_esf_t_t5_OFFSET(sp)
|
||||
RV_OP_LOADREG t6, __z_arch_esf_t_t6_OFFSET(sp)
|
||||
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
|
||||
RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
|
||||
RV_OP_LOADREG a2, __z_arch_esf_t_a2_OFFSET(sp)
|
||||
RV_OP_LOADREG a3, __z_arch_esf_t_a3_OFFSET(sp)
|
||||
RV_OP_LOADREG a4, __z_arch_esf_t_a4_OFFSET(sp)
|
||||
RV_OP_LOADREG a5, __z_arch_esf_t_a5_OFFSET(sp)
|
||||
RV_OP_LOADREG a6, __z_arch_esf_t_a6_OFFSET(sp)
|
||||
RV_OP_LOADREG a7, __z_arch_esf_t_a7_OFFSET(sp)
|
||||
|
||||
/* Release stack space */
|
||||
addi sp, sp, __z_arch_esf_t_SIZEOF
|
||||
LOAD_CALLEE_SAVED()
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
csrrw sp, mscratch, sp
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
/* Call SOC_ERET to exit ISR */
|
||||
SOC_ERET
|
||||
|
|
|
@ -27,6 +27,10 @@
|
|||
|
||||
/* thread_arch_t member offsets */
|
||||
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, user_sp);
|
||||
#endif
|
||||
|
||||
/* struct coop member offsets */
|
||||
GEN_OFFSET_SYM(_callee_saved_t, sp);
|
||||
|
|
|
@ -9,3 +9,30 @@ config PMP_SLOT
|
|||
help
|
||||
Depend of the arch/board. Take care to don't put value higher
|
||||
than the Hardware allow you.
|
||||
|
||||
config PMP_POWER_OF_TWO_ALIGNMENT
|
||||
bool "Enable power of two alignment"
|
||||
default n
|
||||
select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
select GEN_PRIV_STACKS
|
||||
help
|
||||
This option will reduce the PMP slot number usage but increase
|
||||
the memory consumption.
|
||||
|
||||
config PMP_STACK_GUARD
|
||||
bool "Thread Stack Guard"
|
||||
default n
|
||||
help
|
||||
Enable Thread Stack Guards via PMP
|
||||
|
||||
if PMP_STACK_GUARD
|
||||
|
||||
config PMP_STACK_GUARD_MIN_SIZE
|
||||
int "Guard size"
|
||||
default 16
|
||||
help
|
||||
Minimum size (and alignment when applicable) of an stack guard
|
||||
region, which guards the stack of a thread. The width of the
|
||||
guard is set to 4, to accommodate the riscv granularity.
|
||||
|
||||
endif # PMP_STACK_GUARD
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include <kernel.h>
|
||||
#include <kernel_internal.h>
|
||||
#include <sys/__assert.h>
|
||||
#include "core_pmp.h"
|
||||
#include <arch/riscv/csr.h>
|
||||
#include <stdio.h>
|
||||
|
@ -145,8 +146,9 @@ int z_riscv_pmp_set(unsigned int index, ulong_t cfg_val, ulong_t addr_val)
|
|||
int pmpcfg_csr;
|
||||
int pmpaddr_csr;
|
||||
|
||||
if ((index >= PMP_SLOT_NUMBER) | (index < 0))
|
||||
if ((index >= PMP_SLOT_NUMBER) | (index < 0)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Calculate PMP config/addr register, shift and mask */
|
||||
#ifdef CONFIG_64BIT
|
||||
|
@ -170,7 +172,6 @@ int z_riscv_pmp_set(unsigned int index, ulong_t cfg_val, ulong_t addr_val)
|
|||
|
||||
csr_write_enum(pmpaddr_csr, addr_val);
|
||||
csr_write_enum(pmpcfg_csr, reg_val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -180,8 +181,9 @@ int pmp_get(unsigned int index, ulong_t *cfg_val, ulong_t *addr_val)
|
|||
int pmpcfg_csr;
|
||||
int pmpaddr_csr;
|
||||
|
||||
if ((index >= PMP_SLOT_NUMBER) | (index < 0))
|
||||
if ((index >= PMP_SLOT_NUMBER) | (index < 0)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Calculate PMP config/addr register and shift */
|
||||
#ifdef CONFIG_64BIT
|
||||
|
@ -211,11 +213,439 @@ void z_riscv_pmp_print(unsigned int index)
|
|||
ulong_t cfg_val;
|
||||
ulong_t addr_val;
|
||||
|
||||
if (pmp_get(index, &cfg_val, &addr_val))
|
||||
if (pmp_get(index, &cfg_val, &addr_val)) {
|
||||
return;
|
||||
}
|
||||
#ifdef CONFIG_64BIT
|
||||
printf("PMP[%d] :\t%02lX %16lX\n", index, cfg_val, addr_val);
|
||||
#else
|
||||
printf("PMP[%d] :\t%02lX %08lX\n", index, cfg_val, addr_val);
|
||||
#endif /* CONFIG_64BIT */
|
||||
}
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
#include <linker/linker-defs.h>
|
||||
void z_riscv_init_user_accesses(struct k_thread *thread)
|
||||
{
|
||||
unsigned char index;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
ulong_t rom_start = (ulong_t) _image_rom_start;
|
||||
#if defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
|
||||
ulong_t rom_size = (ulong_t) _image_rom_size;
|
||||
#else /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
ulong_t rom_end = (ulong_t) _image_rom_end;
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
index = 0;
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
index++;
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
/* MCU state */
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR((ulong_t) &is_user_mode);
|
||||
uchar_pmpcfg[index++] = PMP_NA4 | PMP_R;
|
||||
#if defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
|
||||
/* Program and RO data */
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(rom_start, rom_size);
|
||||
uchar_pmpcfg[index++] = PMP_NAPOT | PMP_R | PMP_X;
|
||||
|
||||
/* RAM */
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(thread->stack_info.start,
|
||||
thread->stack_info.size);
|
||||
|
||||
uchar_pmpcfg[index++] = PMP_NAPOT | PMP_R | PMP_W;
|
||||
#else /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
/* Program and RO data */
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(rom_start);
|
||||
uchar_pmpcfg[index++] = PMP_NA4 | PMP_R | PMP_X;
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(rom_end);
|
||||
uchar_pmpcfg[index++] = PMP_TOR | PMP_R | PMP_X;
|
||||
|
||||
/* RAM */
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start);
|
||||
uchar_pmpcfg[index++] = PMP_NA4 | PMP_R | PMP_W;
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start +
|
||||
thread->stack_info.size);
|
||||
uchar_pmpcfg[index++] = PMP_TOR | PMP_R | PMP_W;
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
}
|
||||
|
||||
void z_riscv_configure_user_allowed_stack(struct k_thread *thread)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
z_riscv_pmp_clear_config();
|
||||
|
||||
for (i = 0; i < CONFIG_PMP_SLOT; i++)
|
||||
csr_write_enum(CSR_PMPADDR0 + i, thread->arch.u_pmpaddr[i]);
|
||||
|
||||
for (i = 0; i < RISCV_PMP_CFG_NUM; i++)
|
||||
csr_write_enum(CSR_PMPCFG0 + i, thread->arch.u_pmpcfg[i]);
|
||||
}
|
||||
|
||||
void z_riscv_pmp_add_dynamic(struct k_thread *thread,
|
||||
ulong_t addr,
|
||||
ulong_t size,
|
||||
unsigned char flags)
|
||||
{
|
||||
unsigned char index = 0;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
|
||||
/* Check 4 bytes alignment */
|
||||
__ASSERT(((addr & 0x3) == 0) && ((size & 0x3) == 0) && size,
|
||||
"address/size are not 4 bytes aligned\n");
|
||||
|
||||
/* Get next free entry */
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
|
||||
index = PMP_REGION_NUM_FOR_U_THREAD;
|
||||
|
||||
while ((index < CONFIG_PMP_SLOT) && uchar_pmpcfg[index]) {
|
||||
index++;
|
||||
}
|
||||
|
||||
__ASSERT((index < CONFIG_PMP_SLOT), "no free PMP entry\n");
|
||||
|
||||
/* Select the best type */
|
||||
if (size == 4) {
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
|
||||
uchar_pmpcfg[index] = flags | PMP_NA4;
|
||||
}
|
||||
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT)
|
||||
else if ((addr & (size - 1)) || (size & (size - 1))) {
|
||||
__ASSERT(((index + 1) < CONFIG_PMP_SLOT),
|
||||
"not enough free PMP entries\n");
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr);
|
||||
uchar_pmpcfg[index++] = flags | PMP_NA4;
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_ADDR(addr + size);
|
||||
uchar_pmpcfg[index++] = flags | PMP_TOR;
|
||||
}
|
||||
#endif /* !CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
else {
|
||||
thread->arch.u_pmpaddr[index] = TO_PMP_NAPOT(addr, size);
|
||||
uchar_pmpcfg[index] = flags | PMP_NAPOT;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_buffer_validate(void *addr, size_t size, int write)
|
||||
{
|
||||
uint32_t index, i;
|
||||
ulong_t pmp_type, pmp_addr_start, pmp_addr_stop;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
struct k_thread *thread = _current;
|
||||
ulong_t start = (ulong_t) addr;
|
||||
ulong_t access_type = PMP_R;
|
||||
ulong_t napot_mask;
|
||||
#ifdef CONFIG_64BIT
|
||||
ulong_t max_bit = 64;
|
||||
#else
|
||||
ulong_t max_bit = 32;
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
if (write) {
|
||||
access_type |= PMP_W;
|
||||
}
|
||||
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
index = 1;
|
||||
#else
|
||||
index = 0;
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
|
||||
__ASSERT((uchar_pmpcfg[index] & PMP_TYPE_MASK) != PMP_TOR,
|
||||
"The 1st PMP entry shouldn't configured as TOR");
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
for (; (index < CONFIG_PMP_SLOT) && uchar_pmpcfg[index]; index++) {
|
||||
if ((uchar_pmpcfg[index] & access_type) != access_type) {
|
||||
continue;
|
||||
}
|
||||
|
||||
pmp_type = uchar_pmpcfg[index] & PMP_TYPE_MASK;
|
||||
|
||||
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
|
||||
if (pmp_type == PMP_TOR) {
|
||||
continue;
|
||||
}
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
if (pmp_type == PMP_NA4) {
|
||||
pmp_addr_start =
|
||||
FROM_PMP_ADDR(thread->arch.u_pmpaddr[index]);
|
||||
|
||||
if ((index == CONFIG_PMP_SLOT - 1) ||
|
||||
((uchar_pmpcfg[index + 1] & PMP_TYPE_MASK)
|
||||
!= PMP_TOR)) {
|
||||
pmp_addr_stop = pmp_addr_start + 4;
|
||||
} else {
|
||||
pmp_addr_stop = FROM_PMP_ADDR(
|
||||
thread->arch.u_pmpaddr[index + 1]);
|
||||
index++;
|
||||
}
|
||||
} else { /* pmp_type == PMP_NAPOT */
|
||||
for (i = 0; i < max_bit; i++) {
|
||||
if (!(thread->arch.u_pmpaddr[index] & (1 << i))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
napot_mask = (1 << i) - 1;
|
||||
pmp_addr_start = FROM_PMP_ADDR(
|
||||
thread->arch.u_pmpaddr[index] & ~napot_mask);
|
||||
pmp_addr_stop = pmp_addr_start + (1 << (i + 3));
|
||||
}
|
||||
|
||||
if ((start >= pmp_addr_start) && ((start + size - 1) <
|
||||
pmp_addr_stop)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int arch_mem_domain_max_partitions_get(void)
|
||||
{
|
||||
return PMP_MAX_DYNAMIC_REGION;
|
||||
}
|
||||
|
||||
void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||
uint32_t partition_id)
|
||||
{
|
||||
sys_dnode_t *node, *next_node;
|
||||
uint32_t index, i, num;
|
||||
ulong_t pmp_type, pmp_addr;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
struct k_thread *thread;
|
||||
ulong_t size = (ulong_t) domain->partitions[partition_id].size;
|
||||
ulong_t start = (ulong_t) domain->partitions[partition_id].start;
|
||||
|
||||
if (size == 4) {
|
||||
pmp_type = PMP_NA4;
|
||||
pmp_addr = TO_PMP_ADDR(start);
|
||||
num = 1;
|
||||
}
|
||||
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
|
||||
else if ((start & (size - 1)) || (size & (size - 1))) {
|
||||
pmp_type = PMP_TOR;
|
||||
pmp_addr = TO_PMP_ADDR(start + size);
|
||||
num = 2;
|
||||
}
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
|
||||
else {
|
||||
pmp_type = PMP_NAPOT;
|
||||
pmp_addr = TO_PMP_NAPOT(start, size);
|
||||
num = 1;
|
||||
}
|
||||
|
||||
node = sys_dlist_peek_head(&domain->mem_domain_q);
|
||||
if (!node) {
|
||||
return;
|
||||
}
|
||||
|
||||
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
|
||||
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
for (index = PMP_REGION_NUM_FOR_U_THREAD;
|
||||
index < CONFIG_PMP_SLOT;
|
||||
index++) {
|
||||
if (((uchar_pmpcfg[index] & PMP_TYPE_MASK) == pmp_type) &&
|
||||
(pmp_addr == thread->arch.u_pmpaddr[index])) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
__ASSERT((index < CONFIG_PMP_SLOT), "partition not found\n");
|
||||
|
||||
#if !defined(CONFIG_PMP_POWER_OF_TWO_ALIGNMENT) || defined(CONFIG_PMP_STACK_GUARD)
|
||||
if (pmp_type == PMP_TOR) {
|
||||
index--;
|
||||
}
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT || CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
|
||||
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
|
||||
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
|
||||
for (i = index + num; i < CONFIG_PMP_SLOT; i++) {
|
||||
uchar_pmpcfg[i - num] = uchar_pmpcfg[i];
|
||||
thread->arch.u_pmpaddr[i - num] =
|
||||
thread->arch.u_pmpaddr[i];
|
||||
}
|
||||
|
||||
uchar_pmpcfg[CONFIG_PMP_SLOT - 1] = 0;
|
||||
if (num == 2) {
|
||||
uchar_pmpcfg[CONFIG_PMP_SLOT - 2] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_domain_thread_add(struct k_thread *thread)
|
||||
{
|
||||
struct k_mem_partition *partition;
|
||||
|
||||
for (int i = 0, pcount = 0;
|
||||
pcount < thread->mem_domain_info.mem_domain->num_partitions;
|
||||
i++) {
|
||||
partition = &thread->mem_domain_info.mem_domain->partitions[i];
|
||||
if (partition->size == 0) {
|
||||
continue;
|
||||
}
|
||||
pcount++;
|
||||
|
||||
z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
|
||||
(ulong_t) partition->size, partition->attr.pmp_attr);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_domain_destroy(struct k_mem_domain *domain)
|
||||
{
|
||||
sys_dnode_t *node, *next_node;
|
||||
struct k_thread *thread;
|
||||
|
||||
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
|
||||
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
|
||||
|
||||
arch_mem_domain_thread_remove(thread);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_domain_partition_add(struct k_mem_domain *domain,
|
||||
uint32_t partition_id)
|
||||
{
|
||||
sys_dnode_t *node, *next_node;
|
||||
struct k_thread *thread;
|
||||
struct k_mem_partition *partition;
|
||||
|
||||
partition = &domain->partitions[partition_id];
|
||||
|
||||
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
|
||||
thread = CONTAINER_OF(node, struct k_thread, mem_domain_info);
|
||||
|
||||
z_riscv_pmp_add_dynamic(thread, (ulong_t) partition->start,
|
||||
(ulong_t) partition->size, partition->attr.pmp_attr);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_mem_domain_thread_remove(struct k_thread *thread)
|
||||
{
|
||||
uint32_t i;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.u_pmpcfg;
|
||||
|
||||
for (i = PMP_REGION_NUM_FOR_U_THREAD; i < CONFIG_PMP_SLOT; i++) {
|
||||
uchar_pmpcfg[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
|
||||
void z_riscv_init_stack_guard(struct k_thread *thread)
|
||||
{
|
||||
unsigned char index = 0;
|
||||
unsigned char *uchar_pmpcfg;
|
||||
ulong_t stack_guard_addr;
|
||||
|
||||
uchar_pmpcfg = (unsigned char *) thread->arch.s_pmpcfg;
|
||||
|
||||
uchar_pmpcfg++;
|
||||
|
||||
/* stack guard: None */
|
||||
thread->arch.s_pmpaddr[index] = TO_PMP_ADDR(thread->stack_info.start);
|
||||
uchar_pmpcfg[index++] = PMP_NA4;
|
||||
thread->arch.s_pmpaddr[index] =
|
||||
TO_PMP_ADDR(thread->stack_info.start +
|
||||
PMP_GUARD_ALIGN_AND_SIZE);
|
||||
uchar_pmpcfg[index++] = PMP_TOR;
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (thread->arch.priv_stack_start) {
|
||||
#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
|
||||
stack_guard_addr = thread->arch.priv_stack_start;
|
||||
#else
|
||||
stack_guard_addr = (ulong_t) thread->stack_obj;
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
thread->arch.s_pmpaddr[index] =
|
||||
TO_PMP_ADDR(stack_guard_addr);
|
||||
uchar_pmpcfg[index++] = PMP_NA4;
|
||||
thread->arch.s_pmpaddr[index] =
|
||||
TO_PMP_ADDR(stack_guard_addr +
|
||||
PMP_GUARD_ALIGN_AND_SIZE);
|
||||
uchar_pmpcfg[index++] = PMP_TOR;
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/* RAM: RW */
|
||||
thread->arch.s_pmpaddr[index] = TO_PMP_ADDR(CONFIG_SRAM_BASE_ADDRESS |
|
||||
TO_NAPOT_RANGE(KB(CONFIG_SRAM_SIZE)));
|
||||
uchar_pmpcfg[index++] = (PMP_NAPOT | PMP_R | PMP_W);
|
||||
|
||||
/* All other memory: RWX */
|
||||
#ifdef CONFIG_64BIT
|
||||
thread->arch.s_pmpaddr[index] = 0x1FFFFFFFFFFFFFFF;
|
||||
#else
|
||||
thread->arch.s_pmpaddr[index] = 0x1FFFFFFF;
|
||||
#endif /* CONFIG_64BIT */
|
||||
uchar_pmpcfg[index] = PMP_NAPOT | PMP_R | PMP_W | PMP_X;
|
||||
}
|
||||
|
||||
void z_riscv_configure_stack_guard(struct k_thread *thread)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Disable PMP for machine mode */
|
||||
csr_clear(mstatus, MSTATUS_MPRV);
|
||||
|
||||
z_riscv_pmp_clear_config();
|
||||
|
||||
for (i = 0; i < PMP_REGION_NUM_FOR_STACK_GUARD; i++)
|
||||
csr_write_enum(CSR_PMPADDR1 + i, thread->arch.s_pmpaddr[i]);
|
||||
|
||||
for (i = 0; i < PMP_CFG_CSR_NUM_FOR_STACK_GUARD; i++)
|
||||
csr_write_enum(CSR_PMPCFG0 + i, thread->arch.s_pmpcfg[i]);
|
||||
|
||||
/* Enable PMP for machine mode */
|
||||
csr_set(mstatus, MSTATUS_MPRV);
|
||||
}
|
||||
|
||||
void z_riscv_configure_interrupt_stack_guard(void)
|
||||
{
|
||||
if (PMP_GUARD_ALIGN_AND_SIZE > 4) {
|
||||
z_riscv_pmp_set(0, PMP_NAPOT | PMP_L,
|
||||
(ulong_t) z_interrupt_stacks[0] |
|
||||
TO_NAPOT_RANGE(PMP_GUARD_ALIGN_AND_SIZE));
|
||||
} else {
|
||||
z_riscv_pmp_set(0, PMP_NA4 | PMP_L,
|
||||
(ulong_t) z_interrupt_stacks[0]);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
|
||||
void z_riscv_pmp_init_thread(struct k_thread *thread)
|
||||
{
|
||||
unsigned char i;
|
||||
ulong_t *pmpcfg;
|
||||
|
||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||
pmpcfg = thread->arch.s_pmpcfg;
|
||||
for (i = 0; i < PMP_CFG_CSR_NUM_FOR_STACK_GUARD; i++)
|
||||
pmpcfg[i] = 0;
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
pmpcfg = thread->arch.u_pmpcfg;
|
||||
for (i = 0; i < RISCV_PMP_CFG_NUM; i++)
|
||||
pmpcfg[i] = 0;
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
}
|
||||
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <toolchain.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <kernel_internal.h>
|
||||
#include <core_pmp.h>
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -37,6 +38,9 @@ void _PrepC(void)
|
|||
#endif
|
||||
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
|
||||
soc_interrupt_init();
|
||||
#endif
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
z_riscv_configure_interrupt_stack_guard();
|
||||
#endif
|
||||
z_cstart();
|
||||
CODE_UNREACHABLE;
|
||||
|
|
|
@ -82,6 +82,8 @@ aa_loop:
|
|||
li t0, CONFIG_ISR_STACK_SIZE
|
||||
add sp, sp, t0
|
||||
|
||||
csrw mscratch, sp
|
||||
|
||||
#ifdef CONFIG_WDOG_INIT
|
||||
call _WdogInit
|
||||
#endif
|
||||
|
|
|
@ -1,11 +1,25 @@
|
|||
/*
|
||||
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
|
||||
* Copyright (c) 2020 BayLibre, SAS
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <ksched.h>
|
||||
#include <arch/riscv/csr.h>
|
||||
#include <stdio.h>
|
||||
#include <core_pmp.h>
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
/*
|
||||
* Glogal variable used to know the current mode running.
|
||||
* Is not boolean because it must match the PMP granularity of the arch.
|
||||
*/
|
||||
ulong_t is_user_mode;
|
||||
bool irq_flag;
|
||||
#endif
|
||||
|
||||
void z_thread_entry_wrapper(k_thread_entry_t thread,
|
||||
void *arg1,
|
||||
|
@ -59,14 +73,45 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
* thread stack.
|
||||
*/
|
||||
stack_init->mstatus = MSTATUS_DEF_RESTORE;
|
||||
|
||||
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
z_riscv_pmp_init_thread(thread);
|
||||
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
|
||||
|
||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||
if ((thread->base.user_options & K_USER) == 0) {
|
||||
/* Enable pmp for machine mode if thread isn't a user*/
|
||||
stack_init->mstatus |= MSTATUS_MPRV;
|
||||
}
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
if ((thread->base.user_options & K_FP_REGS) != 0) {
|
||||
stack_init->mstatus |= MSTATUS_FS_INIT;
|
||||
}
|
||||
stack_init->fp_state = 0;
|
||||
#endif
|
||||
|
||||
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
thread->arch.priv_stack_start = 0;
|
||||
thread->arch.user_sp = 0;
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
stack_init->mepc = (ulong_t)k_thread_user_mode_enter;
|
||||
} else {
|
||||
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
|
||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||
z_riscv_init_stack_guard(thread);
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
}
|
||||
#else
|
||||
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
|
||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||
z_riscv_init_stack_guard(thread);
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
||||
stack_init->soc_context = soc_esf_init;
|
||||
#endif
|
||||
|
@ -138,3 +183,97 @@ int arch_float_enable(struct k_thread *thread)
|
|||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
/* Function used by Zephyr to switch a supervisor thread to a user thread */
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
arch_syscall_invoke5((uintptr_t) arch_user_mode_enter,
|
||||
(uintptr_t) user_entry,
|
||||
(uintptr_t) p1,
|
||||
(uintptr_t) p2,
|
||||
(uintptr_t) p3,
|
||||
FORCE_SYSCALL_ID);
|
||||
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* User space entry function
|
||||
*
|
||||
* This function is the entry point to user mode from privileged execution.
|
||||
* The conversion is one way, and threads which transition to user mode do
|
||||
* not transition back later, unless they are doing system calls.
|
||||
*/
|
||||
FUNC_NORETURN void z_riscv_user_mode_enter_syscall(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
ulong_t top_of_user_stack = 0U;
|
||||
uintptr_t status;
|
||||
|
||||
/* Set up privileged stack */
|
||||
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||
_current->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
||||
#else
|
||||
_current->arch.priv_stack_start =
|
||||
(uint32_t)(_current->stack_obj) +
|
||||
Z_RISCV_STACK_GUARD_SIZE;
|
||||
#endif /* CONFIG_GEN_PRIV_STACKS */
|
||||
|
||||
top_of_user_stack = Z_STACK_PTR_ALIGN(
|
||||
_current->stack_info.start +
|
||||
_current->stack_info.size -
|
||||
_current->stack_info.delta);
|
||||
|
||||
/* Set next CPU status to user mode */
|
||||
status = csr_read(mstatus);
|
||||
status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U);
|
||||
status = INSERT_FIELD(status, MSTATUS_MPRV, 0);
|
||||
|
||||
csr_write(mstatus, status);
|
||||
csr_write(mepc, z_thread_entry_wrapper);
|
||||
|
||||
/* Set up Physical Memory Protection */
|
||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||
z_riscv_init_stack_guard(_current);
|
||||
#endif
|
||||
|
||||
z_riscv_init_user_accesses(_current);
|
||||
z_riscv_configure_user_allowed_stack(_current);
|
||||
|
||||
is_user_mode = true;
|
||||
|
||||
__asm__ volatile ("mv a0, %1"
|
||||
: "=r" (user_entry)
|
||||
: "r" (user_entry)
|
||||
: "memory");
|
||||
|
||||
__asm__ volatile ("mv a1, %1"
|
||||
: "=r" (p1)
|
||||
: "r" (p1)
|
||||
: "memory");
|
||||
|
||||
__asm__ volatile ("mv a2, %1"
|
||||
: "=r" (p2)
|
||||
: "r" (p2)
|
||||
: "memory");
|
||||
|
||||
__asm__ volatile ("mv a3, %1"
|
||||
: "=r" (p3)
|
||||
: "r" (p3)
|
||||
: "memory");
|
||||
|
||||
__asm__ volatile ("mv sp, %1"
|
||||
: "=r" (top_of_user_stack)
|
||||
: "r" (top_of_user_stack)
|
||||
: "memory");
|
||||
|
||||
__asm__ volatile ("mret");
|
||||
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
71
arch/riscv/core/userspace.S
Normal file
71
arch/riscv/core/userspace.S
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Userspace and service handler hooks
|
||||
*
|
||||
* Copyright (c) 2020 BayLibre, SAS
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <offsets_short.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <syscall.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <arch/riscv/csr.h>
|
||||
|
||||
/* exports */
|
||||
GTEXT(z_riscv_do_syscall)
|
||||
GTEXT(arch_user_string_nlen)
|
||||
GTEXT(z_riscv_user_string_nlen_fault_start)
|
||||
GTEXT(z_riscv_user_string_nlen_fault_end)
|
||||
GTEXT(z_riscv_user_string_nlen_fixup)
|
||||
GTEXT(z_riscv_do_syscall_start)
|
||||
GTEXT(z_riscv_do_syscall_end)
|
||||
|
||||
/* Imports */
|
||||
GDATA(_k_syscall_table)
|
||||
|
||||
SECTION_FUNC(TEXT,z_riscv_do_syscall)
|
||||
la t0, _k_syscall_table
|
||||
|
||||
slli t1, a7, RV_REGSHIFT # Determine offset from indice value
|
||||
add t0, t0, t1 # Table addr + offset = function addr
|
||||
RV_OP_LOADREG t3, 0x00(t0) # Load function address
|
||||
|
||||
/* Execute syscall function */
|
||||
jalr t3
|
||||
|
||||
/* Return to ISR environment to switch-back in user mode */
|
||||
z_riscv_do_syscall_start:
|
||||
ECALL
|
||||
z_riscv_do_syscall_end:
|
||||
|
||||
/*
|
||||
* size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err_arg)
|
||||
*/
|
||||
SECTION_FUNC(TEXT, arch_user_string_nlen)
|
||||
li a5, 0 # Counter
|
||||
sw a5, 0(a2) # Init error value to 0
|
||||
|
||||
loop:
|
||||
add a4, a0, a5 # Determine character address
|
||||
z_riscv_user_string_nlen_fault_start:
|
||||
lbu a4, 0(a4) # Load string's character
|
||||
z_riscv_user_string_nlen_fault_end:
|
||||
beqz a4, exit # Test string's end of line
|
||||
|
||||
bne a5, a1, continue # Check if max length is reached
|
||||
|
||||
exit:
|
||||
mv a0, a5 # Return counter value (length)
|
||||
ret
|
||||
|
||||
continue:
|
||||
addi a5, a5, 1 # Increment counter
|
||||
j loop
|
||||
|
||||
z_riscv_user_string_nlen_fixup:
|
||||
li a4, -1 # Put error to -1
|
||||
sw a4, 0(a2)
|
||||
j exit
|
|
@ -26,6 +26,16 @@
|
|||
#define TO_PMP_NAPOT(addr, size) TO_PMP_ADDR(addr | \
|
||||
TO_NAPOT_RANGE(size))
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
|
||||
#define PMP_GUARD_ALIGN_AND_SIZE CONFIG_PMP_STACK_GUARD_MIN_SIZE
|
||||
|
||||
#else
|
||||
|
||||
#define PMP_GUARD_ALIGN_AND_SIZE 0
|
||||
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_RISCV_PMP
|
||||
|
||||
/*
|
||||
|
@ -54,4 +64,79 @@ void z_riscv_pmp_clear_config(void);
|
|||
void z_riscv_pmp_print(unsigned int index);
|
||||
#endif /* CONFIG_RISCV_PMP */
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
||||
/*
|
||||
* @brief Configure RISCV user thread access to the stack
|
||||
*
|
||||
* Determine and save allow access setup in thread structure.
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
*/
|
||||
void z_riscv_init_user_accesses(struct k_thread *thread);
|
||||
|
||||
/*
|
||||
* @brief Apply RISCV user thread access to the stack
|
||||
*
|
||||
* Write user access setup saved in this thread structure.
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
*/
|
||||
void z_riscv_configure_user_allowed_stack(struct k_thread *thread);
|
||||
|
||||
/*
|
||||
* @brief Add a new RISCV stack access
|
||||
*
|
||||
* Add a new memory permission area in the existing
|
||||
* pmp setup of the thread.
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
* @param addr Start address of the memory area.
|
||||
* @param size Size of the memory area.
|
||||
* @param flags Pemissions: PMP_R, PMP_W, PMP_X, PMP_L
|
||||
*/
|
||||
void z_riscv_pmp_add_dynamic(struct k_thread *thread,
|
||||
ulong_t addr,
|
||||
ulong_t size,
|
||||
unsigned char flags);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
|
||||
/*
|
||||
* @brief Configure RISCV stack guard for interrupt stack
|
||||
*
|
||||
* Write PMP registers to prevent RWX access from all privilege mode.
|
||||
*/
|
||||
void z_riscv_configure_interrupt_stack_guard(void);
|
||||
|
||||
/*
|
||||
* @brief Configure RISCV stack guard
|
||||
*
|
||||
* Determine and save stack guard setup in thread structure.
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
*/
|
||||
void z_riscv_init_stack_guard(struct k_thread *thread);
|
||||
|
||||
/*
|
||||
* @brief Apply RISCV stack guard
|
||||
*
|
||||
* Write stack guard setup saved in this thread structure.
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
*/
|
||||
void z_riscv_configure_stack_guard(struct k_thread *thread);
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
|
||||
/*
|
||||
* @brief Initialize thread PMP setup value to 0
|
||||
*
|
||||
* @param thread Thread info data pointer.
|
||||
*/
|
||||
void z_riscv_pmp_init_thread(struct k_thread *thread);
|
||||
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
|
||||
|
||||
#endif /* CORE_PMP_H_ */
|
||||
|
|
|
@ -40,6 +40,11 @@ static inline bool arch_is_in_isr(void)
|
|||
return _kernel.cpus[0].nested != 0U;
|
||||
}
|
||||
|
||||
extern FUNC_NORETURN void z_riscv_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
uint32_t stack_end,
|
||||
uint32_t stack_start);
|
||||
|
||||
#ifdef CONFIG_IRQ_OFFLOAD
|
||||
int z_irq_do_offload(void);
|
||||
#endif
|
||||
|
|
|
@ -102,6 +102,13 @@
|
|||
|
||||
#endif /* defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#define _thread_offset_to_priv_stack_start \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
|
||||
#define _thread_offset_to_user_sp \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_user_sp_OFFSET)
|
||||
#endif
|
||||
|
||||
/* end - threads */
|
||||
|
||||
#endif /* ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ */
|
||||
|
|
|
@ -20,15 +20,172 @@
|
|||
#include <arch/common/sys_bitops.h>
|
||||
#include <arch/common/sys_io.h>
|
||||
#include <arch/common/ffs.h>
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
#include <arch/riscv/syscall.h>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#include <irq.h>
|
||||
#include <sw_isr_table.h>
|
||||
#include <soc.h>
|
||||
#include <devicetree.h>
|
||||
#include <arch/riscv/csr.h>
|
||||
|
||||
/* stacks, for RISCV architecture stack should be 16byte-aligned */
|
||||
#define ARCH_STACK_PTR_ALIGN 16
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
#define Z_RISCV_PMP_ALIGN CONFIG_PMP_STACK_GUARD_MIN_SIZE
|
||||
#define Z_RISCV_STACK_GUARD_SIZE Z_RISCV_PMP_ALIGN
|
||||
#else
|
||||
#define Z_RISCV_PMP_ALIGN 4
|
||||
#define Z_RISCV_STACK_GUARD_SIZE 0
|
||||
#endif
|
||||
|
||||
/* Kernel-only stacks have the following layout if a stack guard is enabled:
|
||||
*
|
||||
* +------------+ <- thread.stack_obj
|
||||
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Kernel |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
#define ARCH_KERNEL_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
|
||||
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_RISCV_PMP_ALIGN
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Any thread running In user mode will have full access to the region denoted
|
||||
* by thread.stack_info.
|
||||
*
|
||||
* Thread-local storage is at the very highest memory locations of this area.
|
||||
* Memory for TLS and any initial random stack pointer offset is captured
|
||||
* in thread.stack_info.delta.
|
||||
*/
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Use defaults for everything. The privilege elevation stack is located
|
||||
* in another area of memory generated at build time by gen_kobject_list.py
|
||||
*
|
||||
* +------------+ <- thread.arch.priv_stack_start
|
||||
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
|
||||
* +------------+
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE - Z_RISCV_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.arch.priv_stack_start +
|
||||
* CONFIG_PRIVILEGED_STACK_SIZE
|
||||
*
|
||||
* +------------+ <- thread.stack_obj = thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
Z_POW2_CEIL(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
|
||||
ARCH_THREAD_STACK_SIZE_ADJUST(size)
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
/* The stack object will contain the PMP guard, the privilege stack, and then
|
||||
* the stack buffer in that order:
|
||||
*
|
||||
* +------------+ <- thread.stack_obj
|
||||
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.arch.priv_stack_start
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED (Z_RISCV_STACK_GUARD_SIZE + \
|
||||
CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_PMP_ALIGN
|
||||
/* We need to be able to exactly cover the stack buffer with an PMP region,
|
||||
* so round its size up to the required granularity of the PMP
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
|
||||
|
||||
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
#else /* !CONFIG_PMP_STACK_GUARD */
|
||||
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Use defaults for everything. The privilege elevation stack is located
|
||||
* in another area of memory generated at build time by gen_kobject_list.py
|
||||
*
|
||||
* +------------+ <- thread.arch.priv_stack_start
|
||||
* | Priv Stack | } Z_KERNEL_STACK_LEN(CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
* +------------+
|
||||
*
|
||||
* +------------+ <- thread.stack_obj = thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
Z_POW2_CEIL(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
|
||||
ARCH_THREAD_STACK_SIZE_ADJUST(size)
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
/* Userspace enabled, but supervisor stack guards are not in use */
|
||||
|
||||
/* Reserved area of the thread object just contains the privilege stack:
|
||||
*
|
||||
* +------------+ <- thread.stack_obj = thread.arch.priv_stack_start
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
(ROUND_UP((size), Z_RISCV_PMP_ALIGN))
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_PMP_ALIGN
|
||||
|
||||
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#else /* !CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/* Reserve some memory for the stack guard.
|
||||
* This is just a minimally-sized region at the beginning of the stack
|
||||
* object, which is programmed to produce an exception if written to.
|
||||
*
|
||||
* +------------+ <- thread.stack_obj
|
||||
* | Guard | } Z_RISCV_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED Z_RISCV_STACK_GUARD_SIZE
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_RISCV_PMP_ALIGN
|
||||
/* Default for ARCH_THREAD_STACK_SIZE_ADJUST */
|
||||
#else /* !CONFIG_PMP_STACK_GUARD */
|
||||
/* No stack guard, no userspace, Use defaults for everything. */
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define RV_OP_LOADREG ld
|
||||
#define RV_OP_STOREREG sd
|
||||
|
@ -87,6 +244,40 @@ extern "C" {
|
|||
#define DO_CONCAT(x, y) x ## y
|
||||
#define CONCAT(x, y) DO_CONCAT(x, y)
|
||||
|
||||
/* Kernel macros for memory attribution
|
||||
* (access permissions and cache-ability).
|
||||
*
|
||||
* The macros are to be stored in k_mem_partition_attr_t
|
||||
* objects. The format of a k_mem_partition_attr_t object
|
||||
* is an uint8_t composed by configuration register flags
|
||||
* located in arch/riscv/include/core_pmp.h
|
||||
*/
|
||||
|
||||
/* Read-Write access permission attributes */
|
||||
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
|
||||
{PMP_R | PMP_W})
|
||||
#define K_MEM_PARTITION_P_RW_U_RO ((k_mem_partition_attr_t) \
|
||||
{PMP_R})
|
||||
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
|
||||
{PMP_R})
|
||||
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
|
||||
{0})
|
||||
|
||||
/* Execution-allowed attributes */
|
||||
#define K_MEM_PARTITION_P_RWX_U_RWX ((k_mem_partition_attr_t) \
|
||||
{PMP_R | PMP_W | PMP_X})
|
||||
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
|
||||
{PMP_R | PMP_X})
|
||||
|
||||
/* Typedef for the k_mem_partition attribute */
|
||||
typedef struct {
|
||||
uint8_t pmp_attr;
|
||||
} k_mem_partition_attr_t;
|
||||
|
||||
/*
|
||||
* SOC-specific function to get the IRQ number generating the interrupt.
|
||||
* __soc_get_irq returns a bitfield of pending IRQs.
|
||||
|
@ -168,6 +359,10 @@ static inline uint32_t arch_k_cycle_get_32(void)
|
|||
return z_timer_cycle_get_32();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#include <arch/riscv/error.h>
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
62
include/arch/riscv/error.h
Normal file
62
include/arch/riscv/error.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright (c) 2020 BayLibre, SAS
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief RISCV public error handling
|
||||
*
|
||||
* RISCV-specific kernel error handling interface. Included by riscv/arch.h.
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_
|
||||
#define ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_
|
||||
|
||||
#include <arch/riscv/syscall.h>
|
||||
#include <arch/riscv/exp.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
/*
|
||||
* Kernel features like canary (software stack guard) are built
|
||||
* with an argument to bypass the test before syscall (test if CPU
|
||||
* is running in user or kernel) and directly execute the function.
|
||||
* Then if this kind of code wishes to trigger a CPU exception,
|
||||
* the implemented syscall is useless because the function is directly
|
||||
* called even if the CPU is running in user (which happens during
|
||||
* sanity check). To fix that, I bypass the generated test code by writing
|
||||
* the test myself to remove the bypass ability.
|
||||
*/
|
||||
|
||||
#define ARCH_EXCEPT(reason_p) do { \
|
||||
if (_is_user_context()) { \
|
||||
arch_syscall_invoke1(reason_p, \
|
||||
K_SYSCALL_USER_FAULT); \
|
||||
} else { \
|
||||
compiler_barrier(); \
|
||||
z_impl_user_fault(reason_p); \
|
||||
} \
|
||||
CODE_UNREACHABLE; \
|
||||
} while (false)
|
||||
#else
|
||||
#define ARCH_EXCEPT(reason_p) do { \
|
||||
z_impl_user_fault(reason_p); \
|
||||
} while (false)
|
||||
#endif
|
||||
|
||||
__syscall void user_fault(unsigned int reason);
|
||||
|
||||
#include <syscalls/error.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_ERROR_H_ */
|
164
include/arch/riscv/syscall.h
Normal file
164
include/arch/riscv/syscall.h
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright (c) 2020 BayLibre, SAS
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* @brief RISCV specific syscall header
|
||||
*
|
||||
* This header contains the RISCV specific syscall interface. It is
|
||||
* included by the syscall interface architecture-abstraction header
|
||||
* (include/arch/syscall.h)
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
|
||||
#define ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_
|
||||
|
||||
#define _SVC_CALL_CONTEXT_SWITCH 0
|
||||
#define _SVC_CALL_IRQ_OFFLOAD 1
|
||||
#define _SVC_CALL_RUNTIME_EXCEPT 2
|
||||
#define _SVC_CALL_SYSTEM_CALL 3
|
||||
|
||||
#define FORCE_SYSCALL_ID -1
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#include <zephyr/types.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Syscall invocation macros. riscv-specific machine constraints used to ensure
|
||||
* args land in the proper registers.
|
||||
*/
|
||||
static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5, uintptr_t arg6,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a1 __asm__ ("a1") = arg2;
|
||||
register uint32_t a2 __asm__ ("a2") = arg3;
|
||||
register uint32_t a3 __asm__ ("a3") = arg4;
|
||||
register uint32_t a4 __asm__ ("a4") = arg5;
|
||||
register uint32_t a5 __asm__ ("a5") = arg6;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5),
|
||||
"r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t arg5,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a1 __asm__ ("a1") = arg2;
|
||||
register uint32_t a2 __asm__ ("a2") = arg3;
|
||||
register uint32_t a3 __asm__ ("a3") = arg4;
|
||||
register uint32_t a4 __asm__ ("a4") = arg5;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3, uintptr_t arg4,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a1 __asm__ ("a1") = arg2;
|
||||
register uint32_t a2 __asm__ ("a2") = arg3;
|
||||
register uint32_t a3 __asm__ ("a3") = arg4;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a1), "r" (a2), "r" (a3), "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t arg3,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a1 __asm__ ("a1") = arg2;
|
||||
register uint32_t a2 __asm__ ("a2") = arg3;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a1), "r" (a2), "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
|
||||
uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a1 __asm__ ("a1") = arg2;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a1), "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0") = arg1;
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
|
||||
{
|
||||
register uint32_t a0 __asm__ ("a0");
|
||||
register uint32_t a7 __asm__ ("a7") = call_id;
|
||||
|
||||
__asm__ volatile ("ecall"
|
||||
: "+r" (a0)
|
||||
: "r" (a7)
|
||||
: "memory");
|
||||
return a0;
|
||||
}
|
||||
|
||||
static inline bool arch_is_user_context(void)
|
||||
{
|
||||
/* Defined in arch/riscv/core/thread.c */
|
||||
extern ulong_t is_user_mode;
|
||||
return is_user_mode;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_RISCV_SYSCALL_H_ */
|
|
@ -30,6 +30,71 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RISCV_PMP
|
||||
#ifdef CONFIG_64BIT
|
||||
#define RISCV_PMP_CFG_NUM (CONFIG_PMP_SLOT >> 3)
|
||||
#else
|
||||
#define RISCV_PMP_CFG_NUM (CONFIG_PMP_SLOT >> 2)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/*
|
||||
* PMP entries:
|
||||
* (1 for interrupt stack guard: None)
|
||||
* 4 for stacks guard: None
|
||||
* 1 for RAM: RW
|
||||
* 1 for other address space: RWX
|
||||
*/
|
||||
#define PMP_REGION_NUM_FOR_STACK_GUARD 6
|
||||
#define PMP_CFG_CSR_NUM_FOR_STACK_GUARD 2
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
|
||||
#ifdef CONFIG_PMP_POWER_OF_TWO_ALIGNMENT
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/*
|
||||
* 1 for interrupt stack guard: None
|
||||
* 1 for core state: R
|
||||
* 1 for program and read only data: RX
|
||||
* 1 for user thread stack: RW
|
||||
*/
|
||||
#define PMP_REGION_NUM_FOR_U_THREAD 4
|
||||
#else /* CONFIG_PMP_STACK_GUARD */
|
||||
/*
|
||||
* 1 for core state: R
|
||||
* 1 for program and read only data: RX
|
||||
* 1 for user thread stack: RW
|
||||
*/
|
||||
#define PMP_REGION_NUM_FOR_U_THREAD 3
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
#define PMP_MAX_DYNAMIC_REGION (CONFIG_PMP_SLOT - PMP_REGION_NUM_FOR_U_THREAD)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#else /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
/*
|
||||
* 1 for interrupt stack guard: None
|
||||
* 1 for core state: R
|
||||
* 2 for program and read only data: RX
|
||||
* 2 for user thread stack: RW
|
||||
*/
|
||||
#define PMP_REGION_NUM_FOR_U_THREAD 6
|
||||
#else /* CONFIG_PMP_STACK_GUARD */
|
||||
/*
|
||||
* 1 for core state: R
|
||||
* 2 for program and read only data: RX
|
||||
* 2 for user thread stack: RW
|
||||
*/
|
||||
#define PMP_REGION_NUM_FOR_U_THREAD 5
|
||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||
#define PMP_MAX_DYNAMIC_REGION ((CONFIG_PMP_SLOT - \
|
||||
PMP_REGION_NUM_FOR_U_THREAD) >> 1)
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif /* CONFIG_PMP_POWER_OF_TWO_ALIGNMENT */
|
||||
|
||||
/*
|
||||
* The following structure defines the list of registers that need to be
|
||||
* saved/restored when a cooperative context switch occurs.
|
||||
|
@ -70,6 +135,19 @@ typedef struct _callee_saved _callee_saved_t;
|
|||
|
||||
struct _thread_arch {
|
||||
uint32_t swap_return_value; /* Return value of z_swap() */
|
||||
|
||||
#ifdef CONFIG_PMP_STACK_GUARD
|
||||
ulong_t s_pmpcfg[PMP_CFG_CSR_NUM_FOR_STACK_GUARD];
|
||||
ulong_t s_pmpaddr[PMP_REGION_NUM_FOR_STACK_GUARD];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
ulong_t priv_stack_start;
|
||||
ulong_t user_sp;
|
||||
ulong_t unfinished_syscall;
|
||||
ulong_t u_pmpcfg[RISCV_PMP_CFG_NUM];
|
||||
ulong_t u_pmpaddr[CONFIG_PMP_SLOT];
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct _thread_arch _thread_arch_t;
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
#include <arch/arm/aarch32/syscall.h>
|
||||
#elif defined(CONFIG_ARC)
|
||||
#include <arch/arc/syscall.h>
|
||||
#elif defined(CONFIG_RISCV)
|
||||
#include <arch/riscv/syscall.h>
|
||||
#endif
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_SYSCALL_H_ */
|
||||
|
|
|
@ -17,8 +17,9 @@
|
|||
#define RISCV_MACHINE_TIMER_IRQ 7 /* Machine Timer Interrupt */
|
||||
#define RISCV_MACHINE_EXT_IRQ 11 /* Machine External Interrupt */
|
||||
|
||||
/* Exception numbers */
|
||||
#define RISCV_MACHINE_ECALL_EXP 11 /* Machine ECALL instruction */
|
||||
/* ECALL Exception numbers */
|
||||
#define SOC_MCAUSE_ECALL_EXP 11 /* Machine ECALL instruction */
|
||||
#define SOC_MCAUSE_USER_ECALL_EXP 8 /* User ECALL instruction */
|
||||
|
||||
/* SOC-specific MCAUSE bitfields */
|
||||
#ifdef CONFIG_64BIT
|
||||
|
@ -32,8 +33,6 @@
|
|||
/* Exception code Mask */
|
||||
#define SOC_MCAUSE_EXP_MASK 0x7FFFFFFF
|
||||
#endif
|
||||
/* ECALL exception number */
|
||||
#define SOC_MCAUSE_ECALL_EXP RISCV_MACHINE_ECALL_EXP
|
||||
|
||||
/* SOC-Specific EXIT ISR command */
|
||||
#define SOC_ERET mret
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue