riscv: integrate the new FPU context switching support

FPU context switching is always performed on demand through the FPU
access exception handler. Actual task switching only grants or denies
FPU access depending on the current FPU owner.

Because RISC-V doesn't have a dedicated FPU access exception, we must
catch the Illegal Instruction exception and look for actual FP opcodes.

There is no longer a need to allocate FPU storage on the stack for every
exception making esf smaller and stack overflows less likely.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2023-01-17 23:32:21 -05:00 committed by Carles Cufí
commit ff07da6ff1
12 changed files with 204 additions and 344 deletions

View file

@ -29,23 +29,10 @@
RV_I( op s10, _thread_offset_to_s10(reg) );\
RV_I( op s11, _thread_offset_to_s11(reg) )
#define DO_FP_CALLEE_SAVED(op, reg) \
op fs0, _thread_offset_to_fs0(reg) ;\
op fs1, _thread_offset_to_fs1(reg) ;\
op fs2, _thread_offset_to_fs2(reg) ;\
op fs3, _thread_offset_to_fs3(reg) ;\
op fs4, _thread_offset_to_fs4(reg) ;\
op fs5, _thread_offset_to_fs5(reg) ;\
op fs6, _thread_offset_to_fs6(reg) ;\
op fs7, _thread_offset_to_fs7(reg) ;\
op fs8, _thread_offset_to_fs8(reg) ;\
op fs9, _thread_offset_to_fs9(reg) ;\
op fs10, _thread_offset_to_fs10(reg) ;\
op fs11, _thread_offset_to_fs11(reg)
GTEXT(z_riscv_switch)
GTEXT(z_thread_mark_switched_in)
GTEXT(z_riscv_configure_stack_guard)
GTEXT(z_riscv_fpu_thread_context_switch)
/* void z_riscv_switch(k_thread_t *switch_to, k_thread_t *switch_from) */
SECTION_FUNC(TEXT, z_riscv_switch)
@ -53,18 +40,6 @@ SECTION_FUNC(TEXT, z_riscv_switch)
/* Save the old thread's callee-saved registers */
DO_CALLEE_SAVED(sr, a1)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Assess whether floating-point registers need to be saved. */
lb t0, _thread_offset_to_user_options(a1)
andi t0, t0, K_FP_REGS
beqz t0, skip_store_fp_callee_saved
frcsr t0
sw t0, _thread_offset_to_fcsr(a1)
DO_FP_CALLEE_SAVED(fsr, a1)
skip_store_fp_callee_saved:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/* Save the old thread's stack pointer */
sr sp, _thread_offset_to_sp(a1)
@ -79,11 +54,15 @@ skip_store_fp_callee_saved:
lr tp, _thread_offset_to_tls(a0)
#endif
#if defined(CONFIG_FPU_SHARING)
/* Preserve a0 across following call. s0 is not yet restored. */
mv s0, a0
call z_riscv_fpu_thread_context_switch
mv a0, s0
#endif
#if defined(CONFIG_PMP_STACK_GUARD)
/*
* Stack guard has priority over user space for PMP usage.
* Preserve a0 across following call. s0 is not yet restored.
*/
/* Stack guard has priority over user space for PMP usage. */
mv s0, a0
call z_riscv_pmp_stackguard_enable
mv a0, s0
@ -111,27 +90,5 @@ not_user_task:
/* Restore the new thread's callee-saved registers */
DO_CALLEE_SAVED(lr, a0)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Determine if we need to restore floating-point registers. */
lb t0, _thread_offset_to_user_options(a0)
li t1, MSTATUS_FS_INIT
andi t0, t0, K_FP_REGS
beqz t0, no_fp
/* Enable floating point access */
csrs mstatus, t1
/* Restore FP regs */
lw t1, _thread_offset_to_fcsr(a0)
fscsr t1
DO_FP_CALLEE_SAVED(flr, a0)
j 1f
no_fp:
/* Disable floating point access */
csrc mstatus, t1
1:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/* Return to arch_switch() or _irq_wrapper() */
ret