arch/riscv: Use arch_switch() for context swap

Enable `arch_switch()` as preparation for SMP support. This patch
doesn't try to keep support for old style context swap - only switch
based swap is supported, to keep things simple.

A fair amount of refactoring was done in this patch, specially regarding
the code that decides what to do about the ISR. In RISC-V, ECALL
instructions are used to signalize several events, such as user space
system calls, forced syscall, IRQ offload, return from syscall and
context switch. All those handled by the ISR - which also handles
interrupts. After refactor, this "dispatching" step is done at the
beginning of ISR (just after saving generic registers).

As with other platforms, the thread object itself is used as the thread
"switch handle" for the context swap.

Signed-off-by: Ederson de Souza <ederson.desouza@intel.com>
This commit is contained in:
Ederson de Souza 2021-12-21 16:44:57 -08:00 committed by Anas Nashif
commit be28de692c
6 changed files with 246 additions and 202 deletions

View file

@ -107,6 +107,8 @@ config RISCV
select HAS_DTS select HAS_DTS
select ARCH_SUPPORTS_COREDUMP select ARCH_SUPPORTS_COREDUMP
select ARCH_HAS_THREAD_LOCAL_STORAGE select ARCH_HAS_THREAD_LOCAL_STORAGE
select USE_SWITCH
select USE_SWITCH_SUPPORTED
imply XIP imply XIP
help help
RISCV architecture RISCV architecture

View file

@ -12,7 +12,7 @@ zephyr_library_sources(
prep_c.c prep_c.c
reboot.c reboot.c
reset.S reset.S
swap.S switch.S
thread.c thread.c
) )

View file

@ -222,6 +222,14 @@
la ret, _kernel ;\ la ret, _kernel ;\
add ret, ret, temp ; add ret, ret, temp ;
#define RISCV_IRQ_INTERRUPT 0x1
#define RISCV_IRQ_USER_ECALL 0x2
#define RISCV_IRQ_EXCEPTION 0x3
#define RISCV_IRQ_RETURN_FROM_SYSCALL 0x4
#define RISCV_IRQ_FORCED_SYSCALL 0x5
#define RISCV_IRQ_OFFLOAD 0x6
#define RISCV_IRQ_CONTEXT_SWITCH 0x7
/* imports */ /* imports */
GDATA(_sw_isr_table) GDATA(_sw_isr_table)
GTEXT(__soc_is_irq) GTEXT(__soc_is_irq)
@ -232,9 +240,7 @@ GTEXT(__soc_save_context)
GTEXT(__soc_restore_context) GTEXT(__soc_restore_context)
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
GTEXT(_k_neg_eagain) GTEXT(z_get_next_switch_handle)
GTEXT(_is_next_thread_current)
GTEXT(z_get_next_ready_thread)
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
GTEXT(z_thread_mark_switched_in) GTEXT(z_thread_mark_switched_in)
@ -300,22 +306,110 @@ SECTION_FUNC(exception.entry, __irq_wrapper)
/* /*
* Save caller-saved registers on current thread stack. * Save caller-saved registers on current thread stack.
* NOTE: need to be updated to account for floating-point registers
* floating-point registers should be accounted for when corresponding
* config variable is set
*/ */
STORE_CALLER_SAVED() STORE_CALLER_SAVED()
/* Let's quickly figure out why we're here: context switch, IRQ offload,
* user syscall, forced syscall, IRQ or returning from syscall.
* Save this information in a0 to guide flow after.
*/
/*
* Check if exception is the result of an interrupt or not.
* (SOC dependent). Following the RISC-V architecture spec, the MSB
* of the mcause register is used to indicate whether an exception
* is the result of an interrupt or an exception/fault. But for some
* SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
* interrupt. Hence, check for interrupt/exception via the __soc_is_irq
* function (that needs to be implemented by each SOC). The result is
* returned via register a0 (1: interrupt, 0 exception)
*/
jal ra, __soc_is_irq
bnez a0, dispatch_end
/* Is our exception an ECALL? From machine or user mode? */
csrr t0, mcause
li t1, SOC_MCAUSE_EXP_MASK
and t0, t1, t0
li t1, SOC_MCAUSE_ECALL_EXP
beq t0, t1, dispatch_kernel_syscall
#ifdef CONFIG_USERSPACE
li t1, SOC_MCAUSE_USER_ECALL_EXP
bne t0, t1, dispatch_exception
li a0, RISCV_IRQ_USER_ECALL
j dispatch_end
dispatch_exception:
#endif
/* If nothing else, this is an exception */
li a0, RISCV_IRQ_EXCEPTION
j dispatch_end
dispatch_kernel_syscall:
/* Kernel syscall, it can still be context switch, IRQ offload,
forced syscall or returning from syscall. */
#ifdef CONFIG_USERSPACE
/* Check if it is a return from user syscall */
csrr t0, mepc
la t1, z_riscv_do_syscall_start
bltu t0, t1, dispatch_not_return_from_syscall
la t1, z_riscv_do_syscall_end
bgtu t0, t1, dispatch_not_return_from_syscall
li a0, RISCV_IRQ_RETURN_FROM_SYSCALL
j dispatch_end
dispatch_not_return_from_syscall:
/* Could still be forced syscall. */
li t0, FORCE_SYSCALL_ID
bne a7, t0, dispatch_not_forced_syscall
li a0, RISCV_IRQ_FORCED_SYSCALL
j dispatch_end
dispatch_not_forced_syscall:
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_IRQ_OFFLOAD
/*
* Determine if the system call is the result of an IRQ offloading.
* Done by checking if _offload_routine is not pointing to NULL.
*/
la t0, _offload_routine
RV_OP_LOADREG t1, 0x00(t0)
beqz t1, dispatch_not_irq_offload
li a0, RISCV_IRQ_OFFLOAD
j dispatch_end
dispatch_not_irq_offload:
#endif
/* Context switch be it then. */
li a0, RISCV_IRQ_CONTEXT_SWITCH
dispatch_end:
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Assess whether floating-point registers need to be saved. */ /* Assess whether floating-point registers need to be saved.
* Note that there's a catch here: if we're performing a
* context switch, _current is *not* the outgoing thread - that
* can be found via CONTAINER_OF(a1).
*/
li t0, RISCV_IRQ_CONTEXT_SWITCH
beq a0, t0, store_fp_caller_context_switch
GET_CPU(t0, t1) GET_CPU(t0, t1)
RV_OP_LOADREG t0, ___cpu_t_current_OFFSET(t0) RV_OP_LOADREG t0, ___cpu_t_current_OFFSET(t0)
j store_fp_caller_saved
store_fp_caller_context_switch:
RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
addi t0, a1, -___thread_t_switch_handle_OFFSET
store_fp_caller_saved:
/* t0 should be the thread to have its context saved */
RV_OP_LOADREG t0, _thread_offset_to_user_options(t0) RV_OP_LOADREG t0, _thread_offset_to_user_options(t0)
andi t0, t0, K_FP_REGS andi t0, t0, K_FP_REGS
RV_OP_STOREREG t0, __z_arch_esf_t_fp_state_OFFSET(sp) RV_OP_STOREREG t0, __z_arch_esf_t_fp_state_OFFSET(sp)
beqz t0, skip_store_fp_caller_saved beqz t0, skip_store_fp_caller_saved
STORE_FP_CALLER_SAVED(sp) STORE_FP_CALLER_SAVED(sp)
skip_store_fp_caller_saved: skip_store_fp_caller_saved:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
@ -329,8 +423,14 @@ skip_store_fp_caller_saved:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */ /* Handle context saving at SOC level. */
addi sp, sp, -16
RV_OP_STOREREG a0, 0x00(sp)
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
jal ra, __soc_save_context jal ra, __soc_save_context
RV_OP_LOADREG a0, 0x00(sp)
addi sp, sp, 16
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
@ -371,21 +471,9 @@ is_priv_sp:
sb zero, 0x00(t0) sb zero, 0x00(t0)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
/* /* Jump to is_interrupt to handle interrupts. */
* Check if exception is the result of an interrupt or not. li t0, RISCV_IRQ_INTERRUPT
* (SOC dependent). Following the RISC-V architecture spec, the MSB beq a0, t0, is_interrupt
* of the mcause register is used to indicate whether an exception
* is the result of an interrupt or an exception/fault. But for some
* SOCs (like pulpino or riscv-qemu), the MSB is never set to indicate
* interrupt. Hence, check for interrupt/exception via the __soc_is_irq
* function (that needs to be implemented by each SOC). The result is
* returned via register a0 (1: interrupt, 0 exception)
*/
jal ra, __soc_is_irq
/* If a0 != 0, jump to is_interrupt */
addi t1, x0, 0
bnez a0, is_interrupt
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* Reset IRQ flag */ /* Reset IRQ flag */
@ -393,32 +481,41 @@ is_priv_sp:
sb zero, 0x00(t1) sb zero, 0x00(t1)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
/* li t0, RISCV_IRQ_EXCEPTION
* If the exception is the result of an ECALL, check whether to beq a0, t0, handle_exception
* perform a context-switch or an IRQ offload. Otherwise call _Fault
* to report the exception.
*/
csrr t0, mcause
li t2, SOC_MCAUSE_EXP_MASK
and t0, t0, t2
li t1, SOC_MCAUSE_ECALL_EXP
/*
* If mcause == SOC_MCAUSE_ECALL_EXP, handle system call from
* kernel thread.
*/
beq t0, t1, is_kernel_syscall
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
li t1, SOC_MCAUSE_USER_ECALL_EXP li t0, RISCV_IRQ_RETURN_FROM_SYSCALL
beq a0, t0, return_from_syscall
/*
* If mcause == SOC_MCAUSE_USER_ECALL_EXP, handle system call from
* user thread, otherwise handle fault.
*/
beq t0, t1, is_user_syscall
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
/* At this point, we're sure to be handling a syscall, which
* is a result of an ECALL instruction. Increment MEPC to
* to avoid triggering the same ECALL again when leaving the
* ISR.
*
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
RV_OP_LOADREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
addi t1, t1, 4
RV_OP_STOREREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
#ifdef CONFIG_USERSPACE
li t0, RISCV_IRQ_USER_ECALL
beq a0, t0, is_user_syscall
#endif
/* IRQ offload is handled by is_interrupt */
li t0, RISCV_IRQ_OFFLOAD
beq a0, t0, is_interrupt
/* Both forced syscall and context switches are handled by
* handle_kernel_syscall.
*/
j handle_kernel_syscall
handle_exception:
/* /*
* Call _Fault to handle exception. * Call _Fault to handle exception.
* Stack pointer is pointing to a z_arch_esf_t structure, pass it * Stack pointer is pointing to a z_arch_esf_t structure, pass it
@ -442,7 +539,8 @@ user_fault:
RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0) RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0)
RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1) RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1)
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */ RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */
addi sp, t0, CONFIG_PRIVILEGED_STACK_SIZE li t2, CONFIG_PRIVILEGED_STACK_SIZE
add sp, t0, t2
tail _Fault tail _Fault
supervisor_fault: supervisor_fault:
@ -451,41 +549,7 @@ supervisor_fault:
la ra, no_reschedule la ra, no_reschedule
tail _Fault tail _Fault
is_kernel_syscall: handle_kernel_syscall:
#ifdef CONFIG_USERSPACE
/* Check if it is a return from user syscall */
csrr t0, mepc
la t1, z_riscv_do_syscall_start
bltu t0, t1, not_user_syscall
la t1, z_riscv_do_syscall_end
bleu t0, t1, return_from_syscall
not_user_syscall:
#endif /* CONFIG_USERSPACE */
/*
* A syscall is the result of an ecall instruction, in which case the
* MEPC will contain the address of the ecall instruction.
* Increment saved MEPC by 4 to prevent triggering the same ecall
* again upon exiting the ISR.
*
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
RV_OP_LOADREG t0, __z_arch_esf_t_mepc_OFFSET(sp)
addi t0, t0, 4
RV_OP_STOREREG t0, __z_arch_esf_t_mepc_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/*
* Determine if the system call is the result of an IRQ offloading.
* Done by checking if _offload_routine is not pointing to NULL.
* If NULL, jump to reschedule to perform a context-switch, otherwise,
* jump to is_interrupt to handle the IRQ offload.
*/
la t0, _offload_routine
RV_OP_LOADREG t1, 0x00(t0)
bnez t1, is_interrupt
#endif /* CONFIG_IRQ_OFFLOAD */
#ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_PMP_STACK_GUARD
li t0, MSTATUS_MPRV li t0, MSTATUS_MPRV
csrs mstatus, t0 csrs mstatus, t0
@ -494,6 +558,7 @@ not_user_syscall:
csrrw sp, mscratch, sp csrrw sp, mscratch, sp
csrr t0, mscratch csrr t0, mscratch
addi sp, sp, -__z_arch_esf_t_SIZEOF addi sp, sp, -__z_arch_esf_t_SIZEOF
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0) RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0)
beqz t1, skip_fp_move_kernel_syscall beqz t1, skip_fp_move_kernel_syscall
@ -510,16 +575,16 @@ skip_fp_move_kernel_syscall:
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* /*
* Check for forced syscall, * Check for forced syscall,
* otherwise go to reschedule to handle context-switch * otherwise go to riscv_switch to handle context-switch
*/ */
li t0, FORCE_SYSCALL_ID li t0, FORCE_SYSCALL_ID
bne a7, t0, reschedule bne a7, t0, riscv_switch
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp) RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
/* Check for user_mode_enter function */ /* Check for user_mode_enter function */
la t0, arch_user_mode_enter la t0, arch_user_mode_enter
bne t0, a0, reschedule bne t0, a0, riscv_switch
RV_OP_LOADREG a0, __z_arch_esf_t_a1_OFFSET(sp) RV_OP_LOADREG a0, __z_arch_esf_t_a1_OFFSET(sp)
RV_OP_LOADREG a1, __z_arch_esf_t_a2_OFFSET(sp) RV_OP_LOADREG a1, __z_arch_esf_t_a2_OFFSET(sp)
@ -534,32 +599,22 @@ skip_fp_move_kernel_syscall:
j z_riscv_user_mode_enter_syscall j z_riscv_user_mode_enter_syscall
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
/* /*
* Go to reschedule to handle context-switch * Handle context-switch.
*/ */
j reschedule
riscv_switch:
RV_OP_LOADREG a0, __z_arch_esf_t_a0_OFFSET(sp)
RV_OP_LOADREG a1, __z_arch_esf_t_a1_OFFSET(sp)
addi t1, a1, -___thread_t_switch_handle_OFFSET
j do_switch
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
is_user_syscall: is_user_syscall:
#ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_PMP_STACK_GUARD
GET_CPU(t0, t1) GET_CPU(t0, t1)
RV_OP_LOADREG a0, ___cpu_t_current_OFFSET(t0) RV_OP_LOADREG a0, ___cpu_t_current_OFFSET(t0)
jal ra, z_riscv_configure_stack_guard jal ra, z_riscv_configure_stack_guard
#endif /* CONFIG_PMP_STACK_GUARD */
/*
* A syscall is the result of an ecall instruction, in which case the
* MEPC will contain the address of the ecall instruction.
* Increment saved MEPC by 4 to prevent triggering the same ecall
* again upon exiting the ISR.
*
* It is safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
RV_OP_LOADREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
addi t1, t1, 4
RV_OP_STOREREG t1, __z_arch_esf_t_mepc_OFFSET(sp)
#ifdef CONFIG_PMP_STACK_GUARD
/* /*
* Copy ESF to user stack in case of rescheduling * Copy ESF to user stack in case of rescheduling
* directly from kernel ECALL (nested ECALL) * directly from kernel ECALL (nested ECALL)
@ -591,7 +646,8 @@ skip_fp_copy_user_syscall:
RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0) RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0)
RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1) RV_OP_LOADREG t0, _thread_offset_to_priv_stack_start(t1)
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */ RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Update user SP */
addi sp, t0, CONFIG_PRIVILEGED_STACK_SIZE li t2, CONFIG_PRIVILEGED_STACK_SIZE
add sp, t0, t2
/* validate syscall limit */ /* validate syscall limit */
li t0, K_SYSCALL_LIMIT li t0, K_SYSCALL_LIMIT
@ -694,10 +750,10 @@ on_irq_stack:
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
/* /*
* If we are here due to a system call, t1 register should != 0. * Are we here to perform IRQ offloading?
* In this case, perform IRQ offloading, otherwise jump to call_irq
*/ */
beqz t1, call_irq li t0, RISCV_IRQ_OFFLOAD
bne a0, t0, call_irq
/* /*
* Call z_irq_do_offload to handle IRQ offloading. * Call z_irq_do_offload to handle IRQ offloading.
@ -758,31 +814,51 @@ on_thread_stack:
#ifdef CONFIG_STACK_SENTINEL #ifdef CONFIG_STACK_SENTINEL
call z_check_stack_sentinel call z_check_stack_sentinel
GET_CPU(t1, t2)
#endif #endif
#ifdef CONFIG_PREEMPT_ENABLED reschedule:
/* /* Get next thread to run and switch to it - or not, if the same */
* Check if we need to perform a reschedule GET_CPU(t0, t1)
*/ RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0)
/* Get pointer to _current_cpu.current */ addi sp, sp, -16
RV_OP_LOADREG t2, ___cpu_t_current_OFFSET(t1) RV_OP_STOREREG t1, 0x00(sp)
/* #ifdef CONFIG_SMP
* Check if next thread to schedule is current thread. /* Send NULL so this function doesn't mark outgoing thread
* If yes do not perform a reschedule * as available for pickup by another CPU. The catch: it will
* also return NULL. But luckily, it will update _current.
*/ */
RV_OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t1) li a0, 0
beq t3, t2, no_reschedule call z_get_next_switch_handle
GET_CPU(t0, t1)
RV_OP_LOADREG a0, ___cpu_t_current_OFFSET(t0)
#else #else
j no_reschedule mv a0, t1
#endif /* CONFIG_PREEMPT_ENABLED */ call z_get_next_switch_handle
#endif
RV_OP_LOADREG t1, 0x00(sp)
addi sp, sp, 16
/* From now on, t1 is the outgoing thread */
beq a0, t1, no_reschedule
mv a1, x0
#ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_PMP_STACK_GUARD
RV_OP_LOADREG a0, ___cpu_t_current_OFFSET(t1) addi sp, sp, -32
RV_OP_STOREREG a0, 0x00(sp)
RV_OP_STOREREG a1, 0x08(sp)
RV_OP_STOREREG t1, 0x10(sp)
GET_CPU(t2, t3)
mv a0, t1
jal ra, z_riscv_configure_stack_guard jal ra, z_riscv_configure_stack_guard
RV_OP_LOADREG a0, 0x00(sp)
RV_OP_LOADREG a1, 0x08(sp)
RV_OP_LOADREG t1, 0x10(sp)
addi sp, sp, 32
/* /*
* Move to saved SP and move ESF to retrieve it * Move to saved SP and move ESF to retrieve it
* after reschedule. * after reschedule.
@ -791,13 +867,13 @@ on_thread_stack:
csrr t0, mscratch csrr t0, mscratch
addi sp, sp, -__z_arch_esf_t_SIZEOF addi sp, sp, -__z_arch_esf_t_SIZEOF
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
RV_OP_LOADREG t1, __z_arch_esf_t_fp_state_OFFSET(t0) RV_OP_LOADREG t2, __z_arch_esf_t_fp_state_OFFSET(t0)
beqz t1, skip_fp_move_irq beqz t2, skip_fp_move_irq
COPY_ESF_FP(sp, t0, t1) COPY_ESF_FP(sp, t0, t2)
skip_fp_move_irq: skip_fp_move_irq:
COPY_ESF_FP_STATE(sp, t0, t1) COPY_ESF_FP_STATE(sp, t0, t2)
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
COPY_ESF(sp, t0, t1) COPY_ESF(sp, t0, t2)
addi t0, t0, __z_arch_esf_t_SIZEOF addi t0, t0, __z_arch_esf_t_SIZEOF
csrw mscratch, t0 csrw mscratch, t0
#endif /* CONFIG_PMP_STACK_GUARD */ #endif /* CONFIG_PMP_STACK_GUARD */
@ -805,48 +881,33 @@ skip_fp_move_irq:
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* Check if we are in user thread */ /* Check if we are in user thread */
WAS_NOT_USER(t3, t4) WAS_NOT_USER(t3, t4)
bnez t3, reschedule bnez t3, do_switch
/* /*
* Switch to privilege stack because we want * Switch to privilege stack because we want
* this starting point after reschedule. * this starting point after reschedule.
*/ */
RV_OP_LOADREG t3, _thread_offset_to_priv_stack_start(t2) RV_OP_LOADREG t2, _thread_offset_to_priv_stack_start(t1)
RV_OP_STOREREG sp, _thread_offset_to_user_sp(t2) /* Save user SP */ RV_OP_STOREREG sp, _thread_offset_to_user_sp(t1) /* Save user SP */
mv t0, sp mv t0, sp
addi sp, t3, CONFIG_PRIVILEGED_STACK_SIZE li t3, CONFIG_PRIVILEGED_STACK_SIZE
add sp, t2, t3
/* /*
* Copy Saved ESF to priv stack, that will allow us to know during * Copy Saved ESF to priv stack, that will allow us to know during
* rescheduling if the thread was working on user mode. * rescheduling if the thread was working on user mode.
*/ */
addi sp, sp, -__z_arch_esf_t_SIZEOF addi sp, sp, -__z_arch_esf_t_SIZEOF
COPY_ESF(sp, t0, t1) COPY_ESF(sp, t0, t2)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
reschedule: do_switch:
/* /* Expectations:
* Check if the current thread is the same as the thread on the ready Q. If * a0: handle for next thread
* so, do not reschedule. * a1: address of handle for outgoing thread or 0, if not handling arch_switch
* Note: * t1: k_thread for outgoing thread
* Sometimes this code is execute back-to-back before the target thread
* has a chance to run. If this happens, the current thread and the
* target thread will be the same.
*/ */
GET_CPU(t0, t1)
RV_OP_LOADREG t2, ___cpu_t_current_OFFSET(t0)
RV_OP_LOADREG t3, _kernel_offset_to_ready_q_cache(t0)
beq t2, t3, no_reschedule_resched
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
call z_thread_mark_switched_out
#endif
/* Get reference to current CPU */
GET_CPU(t0, t1)
/* Get pointer to current thread */
RV_OP_LOADREG t1, ___cpu_t_current_OFFSET(t0)
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* /*
@ -883,25 +944,34 @@ skip_callee_saved_reg:
li t3, CONFIG_ISR_STACK_SIZE li t3, CONFIG_ISR_STACK_SIZE
add t2, t2, t3 add t2, t2, t3
csrw mscratch, t2 csrw mscratch, t2
#endif /* CONFIG_PMP_STACK_GUARD */ #endif /* CONFIG_PMP_STACK_GUARD */
/* /* Save stack pointer of current thread. */
* Save stack pointer of current thread and set the default return value
* of z_swap to _k_neg_eagain for the thread.
*/
RV_OP_STOREREG sp, _thread_offset_to_sp(t1) RV_OP_STOREREG sp, _thread_offset_to_sp(t1)
la t2, _k_neg_eagain
lw t3, 0x00(t2)
sw t3, _thread_offset_to_swap_return_value(t1)
/* Get next thread to schedule. */
RV_OP_LOADREG t1, _kernel_offset_to_ready_q_cache(t0)
/* /*
* Set _current_cpu.current to new thread loaded in t1 * Current thread is saved. If a1 != 0, we're coming from riscv_switch
* and need to update switched_from, as it's a synchronization signal
* that old thread is saved.
*/ */
RV_OP_STOREREG t1, ___cpu_t_current_OFFSET(t0) beqz a1, clear_old_thread_switch_handle
addi t2, a1, -___thread_t_switch_handle_OFFSET
RV_OP_STOREREG t2, 0x00(a1)
j load_new_thread
clear_old_thread_switch_handle:
#ifdef CONFIG_SMP
/* Signal that old thread can be picked up by any CPU to be run again */
RV_OP_STOREREG t1, ___thread_t_switch_handle_OFFSET(t1)
#endif
load_new_thread:
/*
* At this point, a0 contains the new thread. Set
* t0 to be current CPU and t1 to be the new thread.
*/
GET_CPU(t0, t1)
mv t1, a0
/* Switch to new thread stack */ /* Switch to new thread stack */
RV_OP_LOADREG sp, _thread_offset_to_sp(t1) RV_OP_LOADREG sp, _thread_offset_to_sp(t1)

View file

@ -10,49 +10,24 @@
#include <arch/cpu.h> #include <arch/cpu.h>
/* exports */ /* exports */
GTEXT(arch_swap) GTEXT(arch_switch)
GTEXT(z_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* Use ABI name of registers for the sake of simplicity */ /* Use ABI name of registers for the sake of simplicity */
/* /*
* unsigned int arch_swap(unsigned int key) * void arch_switch(void *switch_to, void **switched_from);
* *
* Always called with interrupts locked * Always called with interrupts locked
* key is stored in a0 register *
* a0 = (struct k_thread *) switch_to
* a1 = (struct k_thread **) address of output thread switch_handle field
*/ */
SECTION_FUNC(exception.other, arch_swap) SECTION_FUNC(exception.other, arch_switch)
/* Make a system call to perform context switch */ /* Make a system call to perform context switch */
ecall ecall
/*
* when thread is rescheduled, unlock irq and return.
* Restored register a0 contains IRQ lock state of thread.
*
* Prior to unlocking irq, load return value of
* arch_swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called arch_thread_return_value_set(..).
*/
la t0, _kernel
/* Get pointer to _kernel.current */
RV_OP_LOADREG t1, _kernel_offset_to_current(t0)
/* Load return value of arch_swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1)
/*
* Unlock irq, following IRQ lock state in a0 register.
* Use atomic instruction csrrs to do so.
*/
andi a0, a0, MSTATUS_IEN
csrrs t0, mstatus, a0
/* Set value of return register a0 to value of register t2 */
addi a0, t2, 0
/* Return */ /* Return */
ret ret
@ -63,7 +38,7 @@ SECTION_FUNC(exception.other, arch_swap)
SECTION_FUNC(TEXT, z_thread_entry_wrapper) SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/* /*
* z_thread_entry_wrapper is called for every new thread upon the return * z_thread_entry_wrapper is called for every new thread upon the return
* of arch_swap or ISR. Its address, as well as its input function * of arch_switch or ISR. Its address, as well as its input function
* arguments thread_entry_t, void *, void *, void * are restored from * arguments thread_entry_t, void *, void *, void * are restored from
* the thread stack (initialized via function _thread). * the thread stack (initialized via function _thread).
* In this case, thread_entry_t, * void *, void * and void * are stored * In this case, thread_entry_t, * void *, void * and void * are stored

View file

@ -126,6 +126,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif #endif
thread->callee_saved.sp = (ulong_t)stack_init; thread->callee_saved.sp = (ulong_t)stack_init;
thread->switch_handle = thread;
} }
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)

View file

@ -34,11 +34,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
#endif #endif
} }
static ALWAYS_INLINE void void arch_switch(void *switch_to, void **switched_from);
arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
{
thread->arch.swap_return_value = value;
}
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf); const z_arch_esf_t *esf);