FPU context switching is always performed on demand through the FPU access exception handler. Actual task switching only grants or denies FPU access depending on the current FPU owner. Because RISC-V doesn't have a dedicated FPU access exception, we must catch the Illegal Instruction exception and look for actual FP opcodes. There is no longer a need to allocate FPU storage on the stack for every exception making esf smaller and stack overflows less likely. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
646 lines
17 KiB
ArmAsm
646 lines
17 KiB
ArmAsm
/*
|
|
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
|
|
* Copyright (c) 2018 Foundries.io Ltd
|
|
* Copyright (c) 2020 BayLibre, SAS
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
#include <zephyr/toolchain.h>
|
|
#include <zephyr/linker/sections.h>
|
|
#include <offsets_short.h>
|
|
#include <zephyr/arch/cpu.h>
|
|
#include <zephyr/sys/util.h>
|
|
#include <zephyr/kernel.h>
|
|
#include <zephyr/syscall.h>
|
|
#include <zephyr/arch/riscv/csr.h>
|
|
#include <zephyr/arch/riscv/syscall.h>
|
|
#include "asm_macros.inc"
|
|
|
|
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
|
|
#include <soc_isr_stacking.h>
|
|
#endif
|
|
|
|
/* Convenience macro for loading/storing register states. */
|
|
#define DO_CALLER_SAVED(op) \
|
|
RV_E( op t0, __z_arch_esf_t_t0_OFFSET(sp) );\
|
|
RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) );\
|
|
RV_E( op t2, __z_arch_esf_t_t2_OFFSET(sp) );\
|
|
RV_I( op t3, __z_arch_esf_t_t3_OFFSET(sp) );\
|
|
RV_I( op t4, __z_arch_esf_t_t4_OFFSET(sp) );\
|
|
RV_I( op t5, __z_arch_esf_t_t5_OFFSET(sp) );\
|
|
RV_I( op t6, __z_arch_esf_t_t6_OFFSET(sp) );\
|
|
RV_E( op a0, __z_arch_esf_t_a0_OFFSET(sp) );\
|
|
RV_E( op a1, __z_arch_esf_t_a1_OFFSET(sp) );\
|
|
RV_E( op a2, __z_arch_esf_t_a2_OFFSET(sp) );\
|
|
RV_E( op a3, __z_arch_esf_t_a3_OFFSET(sp) );\
|
|
RV_E( op a4, __z_arch_esf_t_a4_OFFSET(sp) );\
|
|
RV_E( op a5, __z_arch_esf_t_a5_OFFSET(sp) );\
|
|
RV_I( op a6, __z_arch_esf_t_a6_OFFSET(sp) );\
|
|
RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\
|
|
RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) )
|
|
|
|
.macro get_current_cpu dst
|
|
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
|
|
csrr \dst, mscratch
|
|
#else
|
|
la \dst, _kernel + ___kernel_t_cpus_OFFSET
|
|
#endif
|
|
.endm
|
|
|
|
/* imports */
|
|
GDATA(_sw_isr_table)
|
|
GTEXT(__soc_is_irq)
|
|
GTEXT(__soc_handle_irq)
|
|
GTEXT(_Fault)
|
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
|
GTEXT(__soc_save_context)
|
|
GTEXT(__soc_restore_context)
|
|
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
|
|
|
GTEXT(z_riscv_fatal_error)
|
|
GTEXT(z_get_next_switch_handle)
|
|
GTEXT(z_riscv_switch)
|
|
GTEXT(z_riscv_thread_start)
|
|
|
|
#ifdef CONFIG_TRACING
|
|
GTEXT(sys_trace_isr_enter)
|
|
GTEXT(sys_trace_isr_exit)
|
|
#endif
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
GDATA(_k_syscall_table)
|
|
#endif
|
|
|
|
/* exports */
|
|
GTEXT(_isr_wrapper)
|
|
|
|
/* use ABI name of registers for the sake of simplicity */
|
|
|
|
/*
|
|
* Generic architecture-level IRQ handling, along with callouts to
|
|
* SoC-specific routines.
|
|
*
|
|
* Architecture level IRQ handling includes basic context save/restore
|
|
* of standard registers and calling ISRs registered at Zephyr's driver
|
|
* level.
|
|
*
|
|
* Since RISC-V does not completely prescribe IRQ handling behavior,
|
|
* implementations vary (some implementations also deviate from
|
|
* what standard behavior is defined). Hence, the arch level code expects
|
|
* the following functions to be provided at the SOC level:
|
|
*
|
|
* - __soc_is_irq: decide if we're handling an interrupt or an exception
|
|
* - __soc_handle_irq: handle SoC-specific details for a pending IRQ
|
|
* (e.g. clear a pending bit in a SoC-specific register)
|
|
*
|
|
* If CONFIG_RISCV_SOC_CONTEXT_SAVE=y, calls to SoC-level context save/restore
|
|
* routines are also made here. For details, see the Kconfig help text.
|
|
*/
|
|
|
|
/*
|
|
* Handler called upon each exception/interrupt/fault
|
|
*/
|
|
SECTION_FUNC(exception.entry, _isr_wrapper)
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
/* retrieve address of _current_cpu preserving s0 */
|
|
csrrw s0, mscratch, s0
|
|
|
|
/* preserve t0 and t1 temporarily */
|
|
sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
|
|
sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
|
|
|
|
/* determine if we come from user space */
|
|
csrr t0, mstatus
|
|
li t1, MSTATUS_MPP
|
|
and t0, t0, t1
|
|
bnez t0, 1f
|
|
|
|
/* in user space we were: switch to our privileged stack */
|
|
mv t0, sp
|
|
lr sp, _curr_cpu_arch_user_exc_sp(s0)
|
|
|
|
/* Save user stack value. Coming from user space, we know this
|
|
* can't overflow the privileged stack. The esf will be allocated
|
|
* later but it is safe to store our saved user sp here. */
|
|
sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp)
|
|
|
|
/* Make sure tls pointer is sane */
|
|
lr t0, ___cpu_t_current_OFFSET(s0)
|
|
lr tp, _thread_offset_to_tls(t0)
|
|
|
|
/* Clear our per-thread usermode flag */
|
|
lui t0, %tprel_hi(is_user_mode)
|
|
add t0, t0, tp, %tprel_add(is_user_mode)
|
|
sb zero, %tprel_lo(is_user_mode)(t0)
|
|
1:
|
|
/* retrieve original t0/t1 values */
|
|
lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
|
|
lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
|
|
|
|
/* retrieve original s0 and restore _current_cpu in mscratch */
|
|
csrrw s0, mscratch, s0
|
|
#endif
|
|
|
|
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
|
|
SOC_ISR_SW_STACKING
|
|
#else
|
|
/* Save caller-saved registers on current thread stack. */
|
|
addi sp, sp, -__z_arch_esf_t_SIZEOF
|
|
DO_CALLER_SAVED(sr) ;
|
|
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
|
|
|
|
/* Save s0 in the esf and load it with &_current_cpu. */
|
|
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
|
|
get_current_cpu s0
|
|
|
|
/* Save MEPC register */
|
|
csrr t0, mepc
|
|
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
|
|
/* Save MSTATUS register */
|
|
csrr t2, mstatus
|
|
sr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
|
|
|
|
#if defined(CONFIG_FPU_SHARING)
|
|
/* determine if this is an Illegal Instruction exception */
|
|
csrr t0, mcause
|
|
li t1, 2 /* 2 = illegal instruction */
|
|
bne t0, t1, no_fp
|
|
/* determine if FPU access was disabled */
|
|
csrr t0, mstatus
|
|
li t1, MSTATUS_FS
|
|
and t0, t0, t1
|
|
bnez t0, no_fp
|
|
/* determine if we trapped on an FP instruction. */
|
|
csrr t2, mtval /* get faulting instruction */
|
|
andi t0, t2, 0x7f /* keep only the opcode bits */
|
|
xori t1, t0, 0b1010011 /* OP-FP */
|
|
beqz t1, is_fp
|
|
ori t0, t0, 0b0100000
|
|
xori t1, t0, 0b0100111 /* LOAD-FP / STORE-FP */
|
|
#if !defined(CONFIG_RISCV_ISA_EXT_C)
|
|
bnez t1, no_fp
|
|
#else
|
|
beqz t1, is_fp
|
|
/* remaining non RVC (0b11) and RVC with 0b01 are not FP instructions */
|
|
andi t1, t0, 1
|
|
bnez t1, no_fp
|
|
/*
|
|
* 001...........00 = C.FLD RV32/64 (RV128 = C.LQ)
|
|
* 001...........10 = C.FLDSP RV32/64 (RV128 = C.LQSP)
|
|
* 011...........00 = C.FLW RV32 (RV64/128 = C.LD)
|
|
* 011...........10 = C.FLWSPP RV32 (RV64/128 = C.LDSP)
|
|
* 101...........00 = C.FSD RV32/64 (RV128 = C.SQ)
|
|
* 101...........10 = C.FSDSP RV32/64 (RV128 = C.SQSP)
|
|
* 111...........00 = C.FSW RV32 (RV64/128 = C.SD)
|
|
* 111...........10 = C.FSWSP RV32 (RV64/128 = C.SDSP)
|
|
*
|
|
* so must be .01............. on RV64 and ..1............. on RV32.
|
|
*/
|
|
srli t0, t2, 8
|
|
#if defined(CONFIG_64BIT)
|
|
andi t1, t0, 0b01100000
|
|
xori t1, t1, 0b00100000
|
|
bnez t1, no_fp
|
|
#else
|
|
andi t1, t0, 0b00100000
|
|
beqz t1, no_fp
|
|
#endif
|
|
#endif /* CONFIG_RISCV_ISA_EXT_C */
|
|
|
|
is_fp: /* Process the FP trap and quickly return from exception */
|
|
la ra, fp_trap_exit
|
|
mv a0, sp
|
|
tail z_riscv_fpu_trap
|
|
|
|
no_fp: /* increment _current->arch.exception_depth */
|
|
lr t0, ___cpu_t_current_OFFSET(s0)
|
|
lb t1, _thread_offset_to_exception_depth(t0)
|
|
add t1, t1, 1
|
|
sb t1, _thread_offset_to_exception_depth(t0)
|
|
|
|
/* configure the FPU for exception mode */
|
|
call z_riscv_fpu_enter_exc
|
|
#endif
|
|
|
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
|
/* Handle context saving at SOC level. */
|
|
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
|
|
jal ra, __soc_save_context
|
|
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
|
|
|
/*
|
|
* Check if exception is the result of an interrupt or not.
|
|
* (SOC dependent). Following the RISC-V architecture spec, the MSB
|
|
* of the mcause register is used to indicate whether an exception
|
|
* is the result of an interrupt or an exception/fault. But for some
|
|
* SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
|
|
* interrupt. Hence, check for interrupt/exception via the __soc_is_irq
|
|
* function (that needs to be implemented by each SOC). The result is
|
|
* returned via register a0 (1: interrupt, 0 exception)
|
|
*/
|
|
jal ra, __soc_is_irq
|
|
|
|
/* If a0 != 0, jump to is_interrupt */
|
|
bnez a0, is_interrupt
|
|
|
|
/*
|
|
* If the exception is the result of an ECALL, check whether to
|
|
* perform a context-switch or an IRQ offload. Otherwise call _Fault
|
|
* to report the exception.
|
|
*/
|
|
csrr t0, mcause
|
|
li t2, SOC_MCAUSE_EXP_MASK
|
|
and t0, t0, t2
|
|
|
|
/*
|
|
* If mcause == SOC_MCAUSE_ECALL_EXP, handle system call from
|
|
* kernel thread.
|
|
*/
|
|
li t1, SOC_MCAUSE_ECALL_EXP
|
|
beq t0, t1, is_kernel_syscall
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
/*
|
|
* If mcause == SOC_MCAUSE_USER_ECALL_EXP, handle system call
|
|
* for user mode thread.
|
|
*/
|
|
li t1, SOC_MCAUSE_USER_ECALL_EXP
|
|
beq t0, t1, is_user_syscall
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
/*
|
|
* Call _Fault to handle exception.
|
|
* Stack pointer is pointing to a z_arch_esf_t structure, pass it
|
|
* to _Fault (via register a0).
|
|
* If _Fault shall return, set return address to
|
|
* no_reschedule to restore stack.
|
|
*/
|
|
mv a0, sp
|
|
la ra, no_reschedule
|
|
tail _Fault
|
|
|
|
is_kernel_syscall:
|
|
/*
|
|
* A syscall is the result of an ecall instruction, in which case the
|
|
* MEPC will contain the address of the ecall instruction.
|
|
* Increment saved MEPC by 4 to prevent triggering the same ecall
|
|
* again upon exiting the ISR.
|
|
*
|
|
* It's safe to always increment by 4, even with compressed
|
|
* instructions, because the ecall instruction is always 4 bytes.
|
|
*/
|
|
lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
addi t0, t0, 4
|
|
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
|
|
#ifdef CONFIG_PMP_STACK_GUARD
|
|
/* Re-activate PMP for m-mode */
|
|
li t1, MSTATUS_MPP
|
|
csrc mstatus, t1
|
|
li t1, MSTATUS_MPRV
|
|
csrs mstatus, t1
|
|
#endif
|
|
|
|
/* Determine what to do. Operation code is in t0. */
|
|
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
|
|
|
|
.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
|
|
beqz t0, do_fault
|
|
|
|
#if defined(CONFIG_IRQ_OFFLOAD)
|
|
li t1, RV_ECALL_IRQ_OFFLOAD
|
|
beq t0, t1, do_irq_offload
|
|
#endif
|
|
|
|
#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
|
|
li t1, RV_ECALL_SCHEDULE
|
|
bne t0, t1, skip_schedule
|
|
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
|
|
lr a1, __z_arch_esf_t_a1_OFFSET(sp)
|
|
j reschedule
|
|
skip_schedule:
|
|
#endif
|
|
|
|
/* default fault code is K_ERR_KERNEL_OOPS */
|
|
li a0, 3
|
|
j 1f
|
|
|
|
do_fault:
|
|
/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
|
|
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
|
|
1: mv a1, sp
|
|
tail z_riscv_fatal_error
|
|
|
|
#if defined(CONFIG_IRQ_OFFLOAD)
|
|
do_irq_offload:
|
|
/*
|
|
* Retrieve provided routine and argument from the stack.
|
|
* Routine pointer is in saved a0, argument in saved a1
|
|
* so we load them with a1/a0 (reversed).
|
|
*/
|
|
lr a1, __z_arch_esf_t_a0_OFFSET(sp)
|
|
lr a0, __z_arch_esf_t_a1_OFFSET(sp)
|
|
|
|
/* Increment _current_cpu->nested */
|
|
lw t1, ___cpu_t_nested_OFFSET(s0)
|
|
addi t2, t1, 1
|
|
sw t2, ___cpu_t_nested_OFFSET(s0)
|
|
bnez t1, 1f
|
|
|
|
/* Switch to interrupt stack */
|
|
mv t0, sp
|
|
lr sp, ___cpu_t_irq_stack_OFFSET(s0)
|
|
|
|
/* Save thread stack pointer on interrupt stack */
|
|
addi sp, sp, -16
|
|
sr t0, 0(sp)
|
|
1:
|
|
/* Execute provided routine (argument is in a0 already). */
|
|
jalr ra, a1, 0
|
|
|
|
/* Leave through the regular IRQ exit path */
|
|
j irq_done
|
|
#endif /* CONFIG_IRQ_OFFLOAD */
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
is_user_syscall:
|
|
|
|
#ifdef CONFIG_PMP_STACK_GUARD
|
|
/*
|
|
* We came from userspace and need to reconfigure the
|
|
* PMP for kernel mode stack guard.
|
|
*/
|
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
|
call z_riscv_pmp_stackguard_enable
|
|
#endif
|
|
|
|
/* It is safe to re-enable IRQs now */
|
|
csrs mstatus, MSTATUS_IEN
|
|
|
|
/*
|
|
* Same as for is_kernel_syscall: increment saved MEPC by 4 to
|
|
* prevent triggering the same ecall again upon exiting the ISR.
|
|
*/
|
|
lr t1, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
addi t1, t1, 4
|
|
sr t1, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
|
|
/* Restore argument registers from user stack */
|
|
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
|
|
lr a1, __z_arch_esf_t_a1_OFFSET(sp)
|
|
lr a2, __z_arch_esf_t_a2_OFFSET(sp)
|
|
lr a3, __z_arch_esf_t_a3_OFFSET(sp)
|
|
lr a4, __z_arch_esf_t_a4_OFFSET(sp)
|
|
lr a5, __z_arch_esf_t_a5_OFFSET(sp)
|
|
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
|
|
#if defined(CONFIG_RISCV_ISA_RV32E)
|
|
/* Stack alignment for RV32E is 4 bytes */
|
|
addi sp, sp, -4
|
|
mv t1, sp
|
|
sw t1, 0(sp)
|
|
#else
|
|
mv a6, sp
|
|
#endif /* CONFIG_RISCV_ISA_RV32E */
|
|
|
|
/* validate syscall limit */
|
|
li t1, K_SYSCALL_LIMIT
|
|
bltu t0, t1, valid_syscall_id
|
|
|
|
/* bad syscall id. Set arg1 to bad id and set call_id to SYSCALL_BAD */
|
|
mv a0, t0
|
|
li t0, K_SYSCALL_BAD
|
|
|
|
valid_syscall_id:
|
|
|
|
la t2, _k_syscall_table
|
|
|
|
slli t1, t0, RV_REGSHIFT # Determine offset from indice value
|
|
add t2, t2, t1 # Table addr + offset = function addr
|
|
lr t2, 0(t2) # Load function address
|
|
|
|
/* Execute syscall function */
|
|
jalr ra, t2, 0
|
|
|
|
#if defined(CONFIG_RISCV_ISA_RV32E)
|
|
addi sp, sp, 4
|
|
#endif /* CONFIG_RISCV_ISA_RV32E */
|
|
|
|
/* Update a0 (return value) on the stack */
|
|
sr a0, __z_arch_esf_t_a0_OFFSET(sp)
|
|
|
|
/* Disable IRQs again before leaving */
|
|
csrc mstatus, MSTATUS_IEN
|
|
j might_have_rescheduled
|
|
#endif /* CONFIG_USERSPACE */
|
|
|
|
is_interrupt:
|
|
|
|
#ifdef CONFIG_PMP_STACK_GUARD
|
|
#ifdef CONFIG_USERSPACE
|
|
/*
|
|
* If we came from userspace then we need to reconfigure the
|
|
* PMP for kernel mode stack guard.
|
|
*/
|
|
lr t0, __z_arch_esf_t_mstatus_OFFSET(sp)
|
|
li t1, MSTATUS_MPP
|
|
and t0, t0, t1
|
|
bnez t0, 1f
|
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
|
call z_riscv_pmp_stackguard_enable
|
|
j 2f
|
|
#endif /* CONFIG_USERSPACE */
|
|
1: /* Re-activate PMP for m-mode */
|
|
li t1, MSTATUS_MPP
|
|
csrc mstatus, t1
|
|
li t1, MSTATUS_MPRV
|
|
csrs mstatus, t1
|
|
2:
|
|
#endif
|
|
|
|
/* Increment _current_cpu->nested */
|
|
lw t1, ___cpu_t_nested_OFFSET(s0)
|
|
addi t2, t1, 1
|
|
sw t2, ___cpu_t_nested_OFFSET(s0)
|
|
bnez t1, on_irq_stack
|
|
|
|
/* Switch to interrupt stack */
|
|
mv t0, sp
|
|
lr sp, ___cpu_t_irq_stack_OFFSET(s0)
|
|
|
|
/*
|
|
* Save thread stack pointer on interrupt stack
|
|
* In RISC-V, stack pointer needs to be 16-byte aligned
|
|
*/
|
|
addi sp, sp, -16
|
|
sr t0, 0(sp)
|
|
|
|
on_irq_stack:
|
|
|
|
#ifdef CONFIG_TRACING_ISR
|
|
call sys_trace_isr_enter
|
|
#endif
|
|
|
|
/* Get IRQ causing interrupt */
|
|
csrr a0, mcause
|
|
li t0, SOC_MCAUSE_EXP_MASK
|
|
and a0, a0, t0
|
|
|
|
/*
|
|
* Clear pending IRQ generating the interrupt at SOC level
|
|
* Pass IRQ number to __soc_handle_irq via register a0
|
|
*/
|
|
jal ra, __soc_handle_irq
|
|
|
|
/*
|
|
* Call corresponding registered function in _sw_isr_table.
|
|
* (table is 2-word wide, we should shift index accordingly)
|
|
*/
|
|
la t0, _sw_isr_table
|
|
slli a0, a0, (RV_REGSHIFT + 1)
|
|
add t0, t0, a0
|
|
|
|
/* Load argument in a0 register */
|
|
lr a0, 0(t0)
|
|
|
|
/* Load ISR function address in register t1 */
|
|
lr t1, RV_REGSIZE(t0)
|
|
|
|
/* Call ISR function */
|
|
jalr ra, t1, 0
|
|
|
|
#ifdef CONFIG_TRACING_ISR
|
|
call sys_trace_isr_exit
|
|
#endif
|
|
|
|
irq_done:
|
|
/* Decrement _current_cpu->nested */
|
|
lw t2, ___cpu_t_nested_OFFSET(s0)
|
|
addi t2, t2, -1
|
|
sw t2, ___cpu_t_nested_OFFSET(s0)
|
|
bnez t2, no_reschedule
|
|
|
|
/* nested count is back to 0: Return to thread stack */
|
|
lr sp, 0(sp)
|
|
|
|
#ifdef CONFIG_STACK_SENTINEL
|
|
call z_check_stack_sentinel
|
|
#endif
|
|
|
|
check_reschedule:
|
|
|
|
/* Get pointer to current thread on this CPU */
|
|
lr a1, ___cpu_t_current_OFFSET(s0)
|
|
|
|
/*
|
|
* Get next thread to schedule with z_get_next_switch_handle().
|
|
* We pass it a NULL as we didn't save the whole thread context yet.
|
|
* If no scheduling is necessary then NULL will be returned.
|
|
*/
|
|
addi sp, sp, -16
|
|
sr a1, 0(sp)
|
|
mv a0, zero
|
|
call z_get_next_switch_handle
|
|
lr a1, 0(sp)
|
|
addi sp, sp, 16
|
|
beqz a0, no_reschedule
|
|
|
|
reschedule:
|
|
|
|
/*
|
|
* Perform context switch:
|
|
* a0 = new thread
|
|
* a1 = old thread
|
|
*/
|
|
call z_riscv_switch
|
|
|
|
z_riscv_thread_start:
|
|
might_have_rescheduled:
|
|
/* reload s0 with &_current_cpu as it might have changed or be unset */
|
|
get_current_cpu s0
|
|
|
|
no_reschedule:
|
|
|
|
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
|
/* Restore context at SOC level */
|
|
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
|
|
jal ra, __soc_restore_context
|
|
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
|
|
|
|
#if defined(CONFIG_FPU_SHARING)
|
|
/* FPU handling upon exception mode exit */
|
|
mv a0, sp
|
|
call z_riscv_fpu_exit_exc
|
|
|
|
/* decrement _current->arch.exception_depth */
|
|
lr t0, ___cpu_t_current_OFFSET(s0)
|
|
lb t1, _thread_offset_to_exception_depth(t0)
|
|
add t1, t1, -1
|
|
sb t1, _thread_offset_to_exception_depth(t0)
|
|
fp_trap_exit:
|
|
#endif
|
|
|
|
/* Restore MEPC and MSTATUS registers */
|
|
lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
|
|
lr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
|
|
csrw mepc, t0
|
|
csrw mstatus, t2
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
/*
|
|
* Check if we are returning to user mode. If so then we must
|
|
* set is_user_mode to true and preserve our kernel mode stack for
|
|
* the next exception to come.
|
|
*/
|
|
li t1, MSTATUS_MPP
|
|
and t0, t2, t1
|
|
bnez t0, 1f
|
|
|
|
#ifdef CONFIG_PMP_STACK_GUARD
|
|
/* Remove kernel stack guard and Reconfigure PMP for user mode */
|
|
lr a0, ___cpu_t_current_OFFSET(s0)
|
|
call z_riscv_pmp_usermode_enable
|
|
#endif
|
|
|
|
/* Set our per-thread usermode flag */
|
|
li t1, 1
|
|
lui t0, %tprel_hi(is_user_mode)
|
|
add t0, t0, tp, %tprel_add(is_user_mode)
|
|
sb t1, %tprel_lo(is_user_mode)(t0)
|
|
|
|
/* preserve stack pointer for next exception entry */
|
|
add t0, sp, __z_arch_esf_t_SIZEOF
|
|
sr t0, _curr_cpu_arch_user_exc_sp(s0)
|
|
|
|
j 2f
|
|
1:
|
|
/*
|
|
* We are returning to kernel mode. Store the stack pointer to
|
|
* be re-loaded further down.
|
|
*/
|
|
addi t0, sp, __z_arch_esf_t_SIZEOF
|
|
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
|
|
2:
|
|
#endif
|
|
|
|
/* Restore s0 (it is no longer ours) */
|
|
lr s0, __z_arch_esf_t_s0_OFFSET(sp)
|
|
|
|
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
|
|
SOC_ISR_SW_UNSTACKING
|
|
#else
|
|
/* Restore caller-saved registers from thread stack */
|
|
DO_CALLER_SAVED(lr)
|
|
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
/* retrieve saved stack pointer */
|
|
lr sp, __z_arch_esf_t_sp_OFFSET(sp)
|
|
#else
|
|
/* remove esf from the stack */
|
|
addi sp, sp, __z_arch_esf_t_SIZEOF
|
|
#endif
|
|
|
|
mret
|