riscv: pmp: switch over to the new implementation
Add the appropriate hooks effectively replacing the old implementation with the new one. Also the stackguard wasn't properly enforced especially with the usermode combination. This is now fixed. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
7a55bda7e1
commit
2fece49a14
9 changed files with 118 additions and 50 deletions
|
@ -147,12 +147,43 @@ menuconfig RISCV_PMP
|
||||||
select MPU
|
select MPU
|
||||||
select SRAM_REGION_PERMISSIONS
|
select SRAM_REGION_PERMISSIONS
|
||||||
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
|
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
|
||||||
select PMP_POWER_OF_TWO_ALIGNMENT if USERSPACE
|
select ARCH_MEM_DOMAIN_DATA if USERSPACE
|
||||||
help
|
help
|
||||||
MCU implements Physical Memory Protection.
|
MCU implements Physical Memory Protection.
|
||||||
|
|
||||||
if RISCV_PMP
|
if RISCV_PMP
|
||||||
source "arch/riscv/core/pmp/Kconfig"
|
|
||||||
|
config PMP_SLOT
|
||||||
|
int "Number of PMP slots"
|
||||||
|
default 8
|
||||||
|
help
|
||||||
|
This is the number of PMP entries implemented by the hardware.
|
||||||
|
Typical values are 8 or 16.
|
||||||
|
|
||||||
|
config PMP_STACK_GUARD
|
||||||
|
bool "Thread Stack Guard"
|
||||||
|
help
|
||||||
|
This implements a trap using the PMP to catch stack overflows
|
||||||
|
by marking the bottom stack area as not accessible.
|
||||||
|
|
||||||
|
config PMP_STACK_GUARD_MIN_SIZE
|
||||||
|
int "Guard size"
|
||||||
|
depends on PMP_STACK_GUARD
|
||||||
|
default 64
|
||||||
|
help
|
||||||
|
Size of the stack guard area. This should be large enough to
|
||||||
|
accommodate the stack overflow exception stack usage.
|
||||||
|
|
||||||
|
config PMP_POWER_OF_TWO_ALIGNMENT
|
||||||
|
bool "Power-of-two alignment for PMP memory areas"
|
||||||
|
default y if TEST_USERSPACE
|
||||||
|
default y if (PMP_SLOT = 8)
|
||||||
|
select MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||||
|
select GEN_PRIV_STACKS
|
||||||
|
help
|
||||||
|
This option reduces the PMP slot usage but increases
|
||||||
|
memory consumption.
|
||||||
|
|
||||||
endif #RISCV_PMP
|
endif #RISCV_PMP
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
add_subdirectory_ifdef(CONFIG_RISCV_PMP pmp)
|
|
||||||
|
|
||||||
zephyr_library()
|
zephyr_library()
|
||||||
|
|
||||||
zephyr_library_sources(
|
zephyr_library_sources(
|
||||||
|
@ -19,6 +17,7 @@ zephyr_library_sources(
|
||||||
|
|
||||||
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
|
zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
|
||||||
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
|
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
|
||||||
|
zephyr_library_sources_ifdef(CONFIG_RISCV_PMP pmp.c pmp.S)
|
||||||
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
|
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
|
||||||
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||||
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
|
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
|
||||||
|
|
|
@ -246,11 +246,6 @@ skip_store_fp_caller_saved:
|
||||||
/* If a0 != 0, jump to is_interrupt */
|
/* If a0 != 0, jump to is_interrupt */
|
||||||
bnez a0, is_interrupt
|
bnez a0, is_interrupt
|
||||||
|
|
||||||
#ifdef CONFIG_PMP_STACK_GUARD
|
|
||||||
li t0, MSTATUS_MPRV
|
|
||||||
csrs mstatus, t0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the exception is the result of an ECALL, check whether to
|
* If the exception is the result of an ECALL, check whether to
|
||||||
* perform a context-switch or an IRQ offload. Otherwise call _Fault
|
* perform a context-switch or an IRQ offload. Otherwise call _Fault
|
||||||
|
@ -301,6 +296,14 @@ is_kernel_syscall:
|
||||||
addi t0, t0, 4
|
addi t0, t0, 4
|
||||||
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
||||||
|
|
||||||
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
|
/* Re-activate PMP for m-mode */
|
||||||
|
li t1, MSTATUS_MPP
|
||||||
|
csrc mstatus, t1
|
||||||
|
li t1, MSTATUS_MPRV
|
||||||
|
csrs mstatus, t1
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Determine what to do. Operation code is in a7. */
|
/* Determine what to do. Operation code is in a7. */
|
||||||
lr a7, __z_arch_esf_t_a7_OFFSET(sp)
|
lr a7, __z_arch_esf_t_a7_OFFSET(sp)
|
||||||
|
|
||||||
|
@ -356,6 +359,16 @@ do_irq_offload:
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
is_user_syscall:
|
is_user_syscall:
|
||||||
|
|
||||||
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
|
/*
|
||||||
|
* We came from userspace and need to reconfigure the
|
||||||
|
* PMP for kernel mode stack guard.
|
||||||
|
*/
|
||||||
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
||||||
|
call z_riscv_pmp_stackguard_enable
|
||||||
|
#endif
|
||||||
|
|
||||||
/* It is safe to re-enable IRQs now */
|
/* It is safe to re-enable IRQs now */
|
||||||
csrs mstatus, MSTATUS_IEN
|
csrs mstatus, MSTATUS_IEN
|
||||||
|
|
||||||
|
@ -405,6 +418,29 @@ valid_syscall_id:
|
||||||
#endif /* CONFIG_USERSPACE */
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
|
||||||
is_interrupt:
|
is_interrupt:
|
||||||
|
|
||||||
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
|
#ifdef CONFIG_USERSPACE
|
||||||
|
/*
|
||||||
|
* If we came from userspace then we need to reconfigure the
|
||||||
|
* PMP for kernel mode stack guard.
|
||||||
|
*/
|
||||||
|
lr t0, __z_arch_esf_t_mstatus_OFFSET(sp)
|
||||||
|
li t1, MSTATUS_MPP
|
||||||
|
and t0, t0, t1
|
||||||
|
bnez t0, 1f
|
||||||
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
||||||
|
call z_riscv_pmp_stackguard_enable
|
||||||
|
j 2f
|
||||||
|
#endif /* CONFIG_USERSPACE */
|
||||||
|
1: /* Re-activate PMP for m-mode */
|
||||||
|
li t1, MSTATUS_MPP
|
||||||
|
csrc mstatus, t1
|
||||||
|
li t1, MSTATUS_MPRV
|
||||||
|
csrs mstatus, t1
|
||||||
|
2:
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Increment _current_cpu->nested */
|
/* Increment _current_cpu->nested */
|
||||||
lw t3, ___cpu_t_nested_OFFSET(s0)
|
lw t3, ___cpu_t_nested_OFFSET(s0)
|
||||||
addi t4, t3, 1
|
addi t4, t3, 1
|
||||||
|
@ -547,6 +583,12 @@ no_fp: /* make sure this is reflected in the restored mstatus */
|
||||||
and t0, t4, t1
|
and t0, t4, t1
|
||||||
bnez t0, 1f
|
bnez t0, 1f
|
||||||
|
|
||||||
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
|
/* Remove kernel stack guard and Reconfigure PMP for user mode */
|
||||||
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
||||||
|
call z_riscv_pmp_usermode_enable
|
||||||
|
#endif
|
||||||
|
|
||||||
#if !defined(CONFIG_SMP)
|
#if !defined(CONFIG_SMP)
|
||||||
/* Set user mode variable */
|
/* Set user mode variable */
|
||||||
li t0, 1
|
li t0, 1
|
||||||
|
|
|
@ -35,8 +35,8 @@ void z_riscv_secondary_cpu_init(int cpu_num)
|
||||||
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
|
#if defined(CONFIG_RISCV_SOC_INTERRUPT_INIT)
|
||||||
soc_interrupt_init();
|
soc_interrupt_init();
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_PMP_STACK_GUARD
|
#ifdef CONFIG_RISCV_PMP
|
||||||
z_riscv_configure_interrupt_stack_guard();
|
z_riscv_pmp_init();
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
irq_enable(RISCV_MACHINE_SOFT_IRQ);
|
irq_enable(RISCV_MACHINE_SOFT_IRQ);
|
||||||
|
|
|
@ -75,19 +75,25 @@ skip_store_fp_callee_saved:
|
||||||
/* Get the new thread's stack pointer */
|
/* Get the new thread's stack pointer */
|
||||||
lr sp, _thread_offset_to_sp(a0)
|
lr sp, _thread_offset_to_sp(a0)
|
||||||
|
|
||||||
#ifdef CONFIG_PMP_STACK_GUARD
|
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||||
/* Preserve a0 across following call. s0 is not yet restored. */
|
/*
|
||||||
|
* Stack guard has priority over user space for PMP usage.
|
||||||
|
* Preserve a0 across following call. s0 is not yet restored.
|
||||||
|
*/
|
||||||
mv s0, a0
|
mv s0, a0
|
||||||
call z_riscv_configure_stack_guard
|
call z_riscv_pmp_stackguard_enable
|
||||||
mv a0, s0
|
mv a0, s0
|
||||||
#endif
|
#elif defined(CONFIG_USERSPACE)
|
||||||
|
/*
|
||||||
#ifdef CONFIG_USERSPACE
|
* When stackguard is not enabled, we need to configure the PMP only
|
||||||
|
* at context switch time as the PMP is not in effect while inm-mode.
|
||||||
|
* (it is done on every exception return otherwise).
|
||||||
|
*/
|
||||||
lb t0, _thread_offset_to_user_options(a0)
|
lb t0, _thread_offset_to_user_options(a0)
|
||||||
andi t0, t0, K_USER
|
andi t0, t0, K_USER
|
||||||
beqz t0, not_user_task
|
beqz t0, not_user_task
|
||||||
mv s0, a0
|
mv s0, a0
|
||||||
call z_riscv_configure_user_allowed_stack
|
call z_riscv_pmp_usermode_enable
|
||||||
mv a0, s0
|
mv a0, s0
|
||||||
not_user_task:
|
not_user_task:
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#include <ksched.h>
|
#include <ksched.h>
|
||||||
#include <arch/riscv/csr.h>
|
#include <arch/riscv/csr.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <core_pmp.h>
|
#include <pmp.h>
|
||||||
|
|
||||||
#if defined(CONFIG_USERSPACE) && !defined(CONFIG_SMP)
|
#if defined(CONFIG_USERSPACE) && !defined(CONFIG_SMP)
|
||||||
/*
|
/*
|
||||||
|
@ -81,13 +81,9 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
stack_init->mstatus |= MSTATUS_FS_INIT;
|
stack_init->mstatus |= MSTATUS_FS_INIT;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
|
||||||
/* Clear PMP context if RISC-V PMP is used. */
|
|
||||||
z_riscv_pmp_init_thread(thread);
|
|
||||||
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_USERSPACE */
|
|
||||||
|
|
||||||
#if defined(CONFIG_USERSPACE)
|
#if defined(CONFIG_USERSPACE)
|
||||||
/* Clear user thread context */
|
/* Clear user thread context */
|
||||||
|
z_riscv_pmp_usermode_init(thread);
|
||||||
thread->arch.priv_stack_start = 0;
|
thread->arch.priv_stack_start = 0;
|
||||||
|
|
||||||
/* the unwound stack pointer upon exiting exception */
|
/* the unwound stack pointer upon exiting exception */
|
||||||
|
@ -119,7 +115,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
|
|
||||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
#if defined(CONFIG_PMP_STACK_GUARD)
|
||||||
/* Setup PMP regions of PMP stack guard of thread. */
|
/* Setup PMP regions of PMP stack guard of thread. */
|
||||||
z_riscv_init_stack_guard(thread);
|
z_riscv_pmp_stackguard_prepare(thread);
|
||||||
#endif /* CONFIG_PMP_STACK_GUARD */
|
#endif /* CONFIG_PMP_STACK_GUARD */
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
||||||
|
@ -245,16 +241,17 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
csr_write(mstatus, status);
|
csr_write(mstatus, status);
|
||||||
csr_write(mepc, z_thread_entry);
|
csr_write(mepc, z_thread_entry);
|
||||||
|
|
||||||
/* exception stack has to be in mscratch */
|
#ifdef CONFIG_PMP_STACK_GUARD
|
||||||
csr_write(mscratch, top_of_priv_stack);
|
/* reconfigure as the kernel mode stack will be different */
|
||||||
|
z_riscv_pmp_stackguard_prepare(_current);
|
||||||
/* Set up Physical Memory Protection */
|
|
||||||
#if defined(CONFIG_PMP_STACK_GUARD)
|
|
||||||
z_riscv_init_stack_guard(_current);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
z_riscv_init_user_accesses(_current);
|
/* Set up Physical Memory Protection */
|
||||||
z_riscv_configure_user_allowed_stack(_current);
|
z_riscv_pmp_usermode_prepare(_current);
|
||||||
|
z_riscv_pmp_usermode_enable(_current);
|
||||||
|
|
||||||
|
/* exception stack has to be in mscratch */
|
||||||
|
csr_write(mscratch, top_of_priv_stack);
|
||||||
|
|
||||||
#if !defined(CONFIG_SMP)
|
#if !defined(CONFIG_SMP)
|
||||||
is_user_mode = true;
|
is_user_mode = true;
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#define ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_
|
#define ZEPHYR_ARCH_RISCV_INCLUDE_KERNEL_ARCH_FUNC_H_
|
||||||
|
|
||||||
#include <kernel_arch_data.h>
|
#include <kernel_arch_data.h>
|
||||||
|
#include <pmp.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -23,17 +24,13 @@ extern "C" {
|
||||||
|
|
||||||
#ifndef _ASMLANGUAGE
|
#ifndef _ASMLANGUAGE
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_PMP
|
|
||||||
void z_riscv_configure_static_pmp_regions(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static ALWAYS_INLINE void arch_kernel_init(void)
|
static ALWAYS_INLINE void arch_kernel_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
csr_write(mscratch, 0);
|
csr_write(mscratch, 0);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_RISCV_PMP
|
#ifdef CONFIG_RISCV_PMP
|
||||||
z_riscv_configure_static_pmp_regions();
|
z_riscv_pmp_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,9 +33,6 @@ config MAX_IRQ_PER_AGGREGATOR
|
||||||
config NUM_IRQS
|
config NUM_IRQS
|
||||||
default 64
|
default 64
|
||||||
|
|
||||||
config PMP_POWER_OF_TWO_ALIGNMENT
|
|
||||||
default y
|
|
||||||
|
|
||||||
config PMP_SLOT
|
config PMP_SLOT
|
||||||
default 16
|
default 16
|
||||||
|
|
||||||
|
|
|
@ -27,10 +27,6 @@
|
||||||
extern void arm_core_mpu_disable(void);
|
extern void arm_core_mpu_disable(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_RISCV)
|
|
||||||
#include <../arch/riscv/include/core_pmp.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
|
#define INFO(fmt, ...) printk(fmt, ##__VA_ARGS__)
|
||||||
#define PIPE_LEN 1
|
#define PIPE_LEN 1
|
||||||
#define BYTES_TO_READ_WRITE 1
|
#define BYTES_TO_READ_WRITE 1
|
||||||
|
@ -240,7 +236,12 @@ static void test_disable_mmu_mpu(void)
|
||||||
#elif defined(CONFIG_RISCV)
|
#elif defined(CONFIG_RISCV)
|
||||||
set_fault(K_ERR_CPU_EXCEPTION);
|
set_fault(K_ERR_CPU_EXCEPTION);
|
||||||
|
|
||||||
z_riscv_pmp_clear_config();
|
/*
|
||||||
|
* Try to make everything accessible through PMP slot 3
|
||||||
|
* which should not be locked.
|
||||||
|
*/
|
||||||
|
csr_write(pmpaddr3, LLONG_MAX);
|
||||||
|
csr_write(pmpcfg0, (PMP_R|PMP_W|PMP_X|PMP_NAPOT) << 24);
|
||||||
#else
|
#else
|
||||||
#error "Not implemented for this architecture"
|
#error "Not implemented for this architecture"
|
||||||
#endif
|
#endif
|
||||||
|
@ -1048,10 +1049,8 @@ void test_main(void)
|
||||||
#if defined(CONFIG_GEN_PRIV_STACKS)
|
#if defined(CONFIG_GEN_PRIV_STACKS)
|
||||||
priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
|
priv_stack_ptr = (char *)z_priv_stack_find(ztest_thread_stack);
|
||||||
#else
|
#else
|
||||||
struct _thread_arch *thread_struct;
|
priv_stack_ptr = (char *)((uintptr_t)ztest_thread_stack +
|
||||||
|
Z_RISCV_STACK_GUARD_SIZE);
|
||||||
thread_struct = ((struct _thread_arch *) ztest_thread_stack);
|
|
||||||
priv_stack_ptr = (char *)thread_struct->priv_stack_start + 1;
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
k_thread_access_grant(k_current_get(),
|
k_thread_access_grant(k_current_get(),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue