arch: riscv: add memory protection support

The IRQ handler has had a major changes to manage syscall, reschedule
and interrupt from user thread and stack guard.

Add userspace support:
- Use a global variable to know if the current execution is user or
  machine. The location of this variable is read only for all user
  thread and read/write for kernel thread.
- Memory shared is supported.
- Use dynamic allocation to optimize PMP slot usage. If the area size
  is a power of 2, only one PMP slot is used, else 2 are used.

Add stack guard support:
- Use MPRV bit to force PMP rules to machine mode execution.
- IRQ stack have a locked stack guard to avoid re-write PMP
  configuration registers for each interruption and then win some
  cycle.
- The IRQ stack is used as "temporary" stack at the beginning of IRQ
  handler to save current ESF. That avoid to trigger write fault on
  thread stack during store ESF which that call IRQ handler to
  infinity.
- A stack guard is also setup for privileged stack of a user thread.

Thread:
- A PMP setup is specific to each thread. PMP setup are saved in each
  thread structure to improve reschedule performance.

Signed-off-by: Alexandre Mergnat <amergnat@baylibre.com>
Reviewed-by: Nicolas Royer <nroyer@baylibre.com>
This commit is contained in:
Alexandre Mergnat 2020-07-21 16:00:39 +02:00 committed by Anas Nashif
commit 542a7fa25d
20 changed files with 1950 additions and 89 deletions

View file

@ -1,11 +1,25 @@
/*
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
* Copyright (c) 2020 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <arch/riscv/csr.h>
#include <stdio.h>
#include <core_pmp.h>
#ifdef CONFIG_USERSPACE
/*
* Glogal variable used to know the current mode running.
* Is not boolean because it must match the PMP granularity of the arch.
*/
ulong_t is_user_mode;
bool irq_flag;
#endif
void z_thread_entry_wrapper(k_thread_entry_t thread,
void *arg1,
@ -59,14 +73,45 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* thread stack.
*/
stack_init->mstatus = MSTATUS_DEF_RESTORE;
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
z_riscv_pmp_init_thread(thread);
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
#if defined(CONFIG_PMP_STACK_GUARD)
if ((thread->base.user_options & K_USER) == 0) {
/* Enable pmp for machine mode if thread isn't a user*/
stack_init->mstatus |= MSTATUS_MPRV;
}
#endif /* CONFIG_PMP_STACK_GUARD */
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
if ((thread->base.user_options & K_FP_REGS) != 0) {
stack_init->mstatus |= MSTATUS_FS_INIT;
}
stack_init->fp_state = 0;
#endif
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
#if defined(CONFIG_USERSPACE)
thread->arch.priv_stack_start = 0;
thread->arch.user_sp = 0;
if ((thread->base.user_options & K_USER) != 0) {
stack_init->mepc = (ulong_t)k_thread_user_mode_enter;
} else {
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
#if defined(CONFIG_PMP_STACK_GUARD)
z_riscv_init_stack_guard(thread);
#endif /* CONFIG_PMP_STACK_GUARD */
}
#else
stack_init->mepc = (ulong_t)z_thread_entry_wrapper;
#if defined(CONFIG_PMP_STACK_GUARD)
z_riscv_init_stack_guard(thread);
#endif /* CONFIG_PMP_STACK_GUARD */
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
stack_init->soc_context = soc_esf_init;
#endif
@ -138,3 +183,97 @@ int arch_float_enable(struct k_thread *thread)
return 0;
}
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#ifdef CONFIG_USERSPACE
/* Function used by Zephyr to switch a supervisor thread to a user thread */
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
arch_syscall_invoke5((uintptr_t) arch_user_mode_enter,
(uintptr_t) user_entry,
(uintptr_t) p1,
(uintptr_t) p2,
(uintptr_t) p3,
FORCE_SYSCALL_ID);
CODE_UNREACHABLE;
}
/*
* User space entry function
*
* This function is the entry point to user mode from privileged execution.
* The conversion is one way, and threads which transition to user mode do
* not transition back later, unless they are doing system calls.
*/
FUNC_NORETURN void z_riscv_user_mode_enter_syscall(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
ulong_t top_of_user_stack = 0U;
uintptr_t status;
/* Set up privileged stack */
#ifdef CONFIG_GEN_PRIV_STACKS
_current->arch.priv_stack_start =
(uint32_t)z_priv_stack_find(_current->stack_obj);
#else
_current->arch.priv_stack_start =
(uint32_t)(_current->stack_obj) +
Z_RISCV_STACK_GUARD_SIZE;
#endif /* CONFIG_GEN_PRIV_STACKS */
top_of_user_stack = Z_STACK_PTR_ALIGN(
_current->stack_info.start +
_current->stack_info.size -
_current->stack_info.delta);
/* Set next CPU status to user mode */
status = csr_read(mstatus);
status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U);
status = INSERT_FIELD(status, MSTATUS_MPRV, 0);
csr_write(mstatus, status);
csr_write(mepc, z_thread_entry_wrapper);
/* Set up Physical Memory Protection */
#if defined(CONFIG_PMP_STACK_GUARD)
z_riscv_init_stack_guard(_current);
#endif
z_riscv_init_user_accesses(_current);
z_riscv_configure_user_allowed_stack(_current);
is_user_mode = true;
__asm__ volatile ("mv a0, %1"
: "=r" (user_entry)
: "r" (user_entry)
: "memory");
__asm__ volatile ("mv a1, %1"
: "=r" (p1)
: "r" (p1)
: "memory");
__asm__ volatile ("mv a2, %1"
: "=r" (p2)
: "r" (p2)
: "memory");
__asm__ volatile ("mv a3, %1"
: "=r" (p3)
: "r" (p3)
: "memory");
__asm__ volatile ("mv sp, %1"
: "=r" (top_of_user_stack)
: "r" (top_of_user_stack)
: "memory");
__asm__ volatile ("mret");
CODE_UNREACHABLE;
}
#endif /* CONFIG_USERSPACE */