riscv: new TLS-based arch_is_user_context() implementation
This reverts the bulk of commit c8bfc2afda
("riscv: make
arch_is_user_context() SMP compatible") and replaces it with a flag
stored in the thread local storage (TLS) area, therefore making TLS
mandatory for userspace support on RISC-V.
This has many advantages:
- The tp (x4) register is already dedicated by the standard for this
purpose, making TLS support almost free.
- This is very efficient, requiring only a single instruction to clear
and 2 instructions to set.
- This makes the SMP case much more efficient. No need for funky
exception code any longer.
- SMP and non-SMP now use the same implementation making maintenance
easier.
- The is_user_mode variable no longer requires a dedicated PMP mapping
and therefore freeing one PMP slot for other purposes.
Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
5f65dbcc9dab3d39473b05397e05.
This commit is contained in:
parent
3f8e326d1a
commit
00a9634c05
7 changed files with 34 additions and 104 deletions
|
@ -41,11 +41,9 @@
|
|||
op fa6, __z_arch_esf_t_fa6_OFFSET(reg) ;\
|
||||
op fa7, __z_arch_esf_t_fa7_OFFSET(reg) ;
|
||||
|
||||
#define DO_CALLER_SAVED_T0T1(op) \
|
||||
#define DO_CALLER_SAVED(op) \
|
||||
RV_E( op t0, __z_arch_esf_t_t0_OFFSET(sp) );\
|
||||
RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) )
|
||||
|
||||
#define DO_CALLER_SAVED_REST(op) \
|
||||
RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) );\
|
||||
RV_E( op t2, __z_arch_esf_t_t2_OFFSET(sp) );\
|
||||
RV_I( op t3, __z_arch_esf_t_t3_OFFSET(sp) );\
|
||||
RV_I( op t4, __z_arch_esf_t_t4_OFFSET(sp) );\
|
||||
|
@ -139,46 +137,12 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
|
|||
|
||||
/* restore privileged stack pointer and zero the scratch reg */
|
||||
csrrw sp, mscratch, sp
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
j 2f
|
||||
|
||||
1: /*
|
||||
* We were in user space. Determine if it attempted to execute an
|
||||
* arch_is_user_context() based on mscratch access. We want to return
|
||||
* to u-mode with t0!=0 as quickly as possible if so.
|
||||
*/
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
DO_CALLER_SAVED_T0T1(sr) ;
|
||||
/* First, determine if we had an illegal instruction exception. */
|
||||
csrr t0, mcause
|
||||
li t1, SOC_MCAUSE_EXP_MASK
|
||||
and t0, t0, t1
|
||||
addi t0, t0, -2 /* = 2 = illegal instruction */
|
||||
bnez t0, 3f
|
||||
/* Make sure it was actually a "csrr t0, mscratch" */
|
||||
csrr t0, mepc
|
||||
lw t0, 0(t0)
|
||||
li t1, 0x340022f3
|
||||
bne t0, t1, 3f
|
||||
/* So it was: skip over it and return leaving t0 clobbered. */
|
||||
csrr t0, mepc
|
||||
addi t0, t0, 4
|
||||
csrw mepc, t0
|
||||
lr t1, __z_arch_esf_t_t1_OFFSET(sp)
|
||||
addi sp, sp, __z_arch_esf_t_SIZEOF
|
||||
/* restore user stack pointer and leave */
|
||||
csrrw sp, mscratch, sp
|
||||
mret
|
||||
2:
|
||||
#endif /* CONFIG_SMP */
|
||||
1:
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#endif
|
||||
|
||||
/* Save caller-saved registers on current thread stack. */
|
||||
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
||||
DO_CALLER_SAVED_T0T1(sr) ;
|
||||
3: DO_CALLER_SAVED_REST(sr) ;
|
||||
DO_CALLER_SAVED(sr) ;
|
||||
|
||||
/* Save s0 in the esf and load it with &_current_cpu. */
|
||||
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
|
||||
|
@ -199,17 +163,14 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
|
|||
/* save stack value to be restored later */
|
||||
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
|
||||
|
||||
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
|
||||
/* Make sure tls pointer is sane */
|
||||
lr t0, ___cpu_t_current_OFFSET(s0)
|
||||
lr tp, _thread_offset_to_tls(t0)
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_SMP)
|
||||
/* Clear user mode variable */
|
||||
la t0, is_user_mode
|
||||
sw zero, 0(t0)
|
||||
#endif
|
||||
/* Clear our per-thread usermode flag */
|
||||
lui t0, %tprel_hi(is_user_mode)
|
||||
add t0, t0, tp, %tprel_add(is_user_mode)
|
||||
sb zero, %tprel_lo(is_user_mode)(t0)
|
||||
#endif
|
||||
|
||||
/* Save MEPC register */
|
||||
|
@ -608,12 +569,11 @@ no_fp: /* make sure this is reflected in the restored mstatus */
|
|||
call z_riscv_pmp_usermode_enable
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_SMP)
|
||||
/* Set user mode variable */
|
||||
li t0, 1
|
||||
la t1, is_user_mode
|
||||
sw t0, 0(t1)
|
||||
#endif
|
||||
/* Set our per-thread usermode flag */
|
||||
li t1, 1
|
||||
lui t0, %tprel_hi(is_user_mode)
|
||||
add t0, t0, tp, %tprel_add(is_user_mode)
|
||||
sb t1, %tprel_lo(is_user_mode)(t0)
|
||||
|
||||
/* load scratch reg with stack pointer for next exception entry */
|
||||
add t0, sp, __z_arch_esf_t_SIZEOF
|
||||
|
@ -625,8 +585,7 @@ no_fp: /* make sure this is reflected in the restored mstatus */
|
|||
lr s0, __z_arch_esf_t_s0_OFFSET(sp)
|
||||
|
||||
/* Restore caller-saved registers from thread stack */
|
||||
DO_CALLER_SAVED_T0T1(lr)
|
||||
DO_CALLER_SAVED_REST(lr)
|
||||
DO_CALLER_SAVED(lr)
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* retrieve saved stack pointer */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue