riscv: decouple the Zephyr CPU number from the hart ID

Currently it is assumed that Zephyr CPU numbers match their hartid
value one for one. This assumption was relied upon to efficiently
retrieve the current CPU's `struct _cpu` pointer.

People are starting to have systems with a mix of different usage for
each CPU and such assumption may no longer be true.

Let's completely decouple the hartid from the Zephyr CPU number by
stuffing each CPU's `struct _cpu` pointer in their respective scratch
register instead. `arch_curr_cpu()` becomes more efficient as well.

Since the scratch register was previously used to store userspace's
exception stack pointer, that is now moved into `struct _cpu_arch`
which implied minor user space entry code cleanup and rationalization.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2023-01-06 17:46:21 -05:00 committed by Carles Cufí
commit 26d7bd47a0
9 changed files with 106 additions and 75 deletions

View file

@ -63,15 +63,13 @@
RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\
RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) )
#ifdef CONFIG_SMP
#define GET_CURRENT_CPU(dst, tmp) \
csrr tmp, mhartid ;\
la dst, _kernel + ___kernel_t_cpus_OFFSET ;\
shiftmul_add dst, tmp, ___cpu_t_SIZEOF
.macro get_current_cpu dst
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
csrr \dst, mscratch
#else
#define GET_CURRENT_CPU(dst, tmp) \
la dst, _kernel + ___kernel_t_cpus_OFFSET
la \dst, _kernel + ___kernel_t_cpus_OFFSET
#endif
.endm
/* imports */
GDATA(_sw_isr_table)
@ -129,17 +127,43 @@ GTEXT(_isr_wrapper)
SECTION_FUNC(exception.entry, _isr_wrapper)
#ifdef CONFIG_USERSPACE
/*
* The scratch register contains either the privileged stack pointer
* to use when interrupting a user mode thread, or 0 when interrupting
* kernel mode in which case the current stack should be used.
*/
csrrw sp, mscratch, sp
bnez sp, 1f
/* retrieve address of _current_cpu preserving s0 */
csrrw s0, mscratch, s0
/* restore privileged stack pointer and zero the scratch reg */
csrrw sp, mscratch, sp
/* preserve t0 and t1 temporarily */
sr t0, _curr_cpu_arch_user_exc_tmp0(s0)
sr t1, _curr_cpu_arch_user_exc_tmp1(s0)
/* determine if we come from user space */
csrr t0, mstatus
li t1, MSTATUS_MPP
and t0, t0, t1
bnez t0, 1f
/* in user space we were: switch to our privileged stack */
mv t0, sp
lr sp, _curr_cpu_arch_user_exc_sp(s0)
/* Save user stack value. Coming from user space, we know this
* can't overflow the privileged stack. The esf will be allocated
* later but it is safe to store our saved user sp here. */
sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp)
/* Make sure tls pointer is sane */
lr t0, ___cpu_t_current_OFFSET(s0)
lr tp, _thread_offset_to_tls(t0)
/* Clear our per-thread usermode flag */
lui t0, %tprel_hi(is_user_mode)
add t0, t0, tp, %tprel_add(is_user_mode)
sb zero, %tprel_lo(is_user_mode)(t0)
1:
/* retrieve original t0/t1 values */
lr t0, _curr_cpu_arch_user_exc_tmp0(s0)
lr t1, _curr_cpu_arch_user_exc_tmp1(s0)
/* retrieve original s0 and restore _current_cpu in mscratch */
csrrw s0, mscratch, s0
#endif
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
@ -152,32 +176,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
/* Save s0 in the esf and load it with &_current_cpu. */
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
GET_CURRENT_CPU(s0, t0)
#ifdef CONFIG_USERSPACE
/*
* The scratch register now contains either the user mode stack
* pointer, or 0 if entered from kernel mode. Retrieve that value
* and zero the scratch register as we are in kernel mode now.
*/
csrrw t0, mscratch, zero
bnez t0, 1f
/* came from kernel mode: adjust stack value */
add t0, sp, __z_arch_esf_t_SIZEOF
1:
/* save stack value to be restored later */
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
/* Make sure tls pointer is sane */
lr t0, ___cpu_t_current_OFFSET(s0)
lr tp, _thread_offset_to_tls(t0)
/* Clear our per-thread usermode flag */
lui t0, %tprel_hi(is_user_mode)
add t0, t0, tp, %tprel_add(is_user_mode)
sb zero, %tprel_lo(is_user_mode)(t0)
#endif
get_current_cpu s0
/* Save MEPC register */
csrr t0, mepc
@ -531,7 +530,7 @@ z_riscv_thread_start:
might_have_rescheduled:
#ifdef CONFIG_SMP
/* reload s0 with &_current_cpu as it might have changed */
GET_CURRENT_CPU(s0, t0)
get_current_cpu s0
#endif
no_reschedule:
@ -572,8 +571,8 @@ no_fp: /* make sure this is reflected in the restored mstatus */
#ifdef CONFIG_USERSPACE
/*
* Check if we are returning to user mode. If so then we must
* set is_user_mode to true and load the scratch register with
* the stack pointer to be used with the next exception to come.
* set is_user_mode to true and preserve our kernel mode stack for
* the next exception to come.
*/
li t1, MSTATUS_MPP
and t0, t2, t1
@ -591,10 +590,19 @@ no_fp: /* make sure this is reflected in the restored mstatus */
add t0, t0, tp, %tprel_add(is_user_mode)
sb t1, %tprel_lo(is_user_mode)(t0)
/* load scratch reg with stack pointer for next exception entry */
/* preserve stack pointer for next exception entry */
add t0, sp, __z_arch_esf_t_SIZEOF
csrw mscratch, t0
sr t0, _curr_cpu_arch_user_exc_sp(s0)
j 2f
1:
/*
* We are returning to kernel mode. Store the stack pointer to
* be re-loaded further down.
*/
addi t0, sp, __z_arch_esf_t_SIZEOF
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
2:
#endif
/* Restore s0 (it is no longer ours) */

View file

@ -121,4 +121,10 @@ GEN_SOC_OFFSET_SYMS();
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_sp);
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp0);
GEN_OFFSET_SYM(_cpu_arch_t, user_exc_tmp1);
#endif
GEN_ABS_SYM_END

View file

@ -33,6 +33,7 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
void z_riscv_secondary_cpu_init(int cpu_num)
{
csr_write(mscratch, &_kernel.cpus[cpu_num]);
#ifdef CONFIG_THREAD_LOCAL_STORAGE
__asm__("mv tp, %0" : : "r" (z_idle_threads[cpu_num].tls));
#endif

View file

@ -80,9 +80,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* Clear user thread context */
z_riscv_pmp_usermode_init(thread);
thread->arch.priv_stack_start = 0;
/* the unwound stack pointer upon exiting exception */
stack_init->sp = (unsigned long)(stack_init + 1);
#endif /* CONFIG_USERSPACE */
/* Assign thread entry point and mstatus.MPRV mode. */
@ -242,8 +239,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
z_riscv_pmp_usermode_prepare(_current);
z_riscv_pmp_usermode_enable(_current);
/* exception stack has to be in mscratch */
csr_write(mscratch, top_of_priv_stack);
/* preserve stack pointer for next exception entry */
arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
is_user_mode = true;

View file

@ -29,8 +29,8 @@ static ALWAYS_INLINE void arch_kernel_init(void)
#ifdef CONFIG_THREAD_LOCAL_STORAGE
__asm__ volatile ("li tp, 0");
#endif
#ifdef CONFIG_USERSPACE
csr_write(mscratch, 0);
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
csr_write(mscratch, &_kernel.cpus[0]);
#endif
#ifdef CONFIG_RISCV_PMP
z_riscv_pmp_init();

View file

@ -9,14 +9,6 @@
#include <offsets.h>
/* kernel */
/* nothing for now */
/* end - kernel */
/* threads */
#define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
@ -109,12 +101,22 @@
#endif /* defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) */
#ifdef CONFIG_USERSPACE
#define _thread_offset_to_priv_stack_start \
(___thread_t_arch_OFFSET + ___thread_arch_t_priv_stack_start_OFFSET)
#define _thread_offset_to_user_sp \
(___thread_t_arch_OFFSET + ___thread_arch_t_user_sp_OFFSET)
#define _curr_cpu_arch_user_exc_sp \
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_sp_OFFSET)
#define _curr_cpu_arch_user_exc_tmp0 \
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp0_OFFSET)
#define _curr_cpu_arch_user_exc_tmp1 \
(___cpu_t_arch_OFFSET + ___cpu_arch_t_user_exc_tmp1_OFFSET)
#endif
/* end - threads */
#endif /* ZEPHYR_ARCH_RISCV_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

View file

@ -10,24 +10,20 @@
#ifndef _ASMLANGUAGE
#include <zephyr/kernel_structs.h>
#include "csr.h"
static ALWAYS_INLINE uint32_t arch_proc_id(void)
{
uint32_t hartid;
#ifdef CONFIG_SMP
__asm__ volatile("csrr %0, mhartid" : "=r" (hartid));
#else
hartid = 0;
#endif
return hartid;
return csr_read(mhartid);
}
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
{
/* linear hartid enumeration space assumed */
return &_kernel.cpus[arch_proc_id()];
#if defined(CONFIG_SMP) || defined(CONFIG_USERSPACE)
return (_cpu_t *)csr_read(mscratch);
#else
return &_kernel.cpus[0];
#endif
}
static ALWAYS_INLINE unsigned int arch_num_cpus(void)

View file

@ -0,0 +1,19 @@
/*
* Copyright (c) BayLibre SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
#define ZEPHYR_INCLUDE_RISCV_STRUCTS_H_
/* Per CPU architecture specifics */
struct _cpu_arch {
#ifdef CONFIG_USERSPACE
unsigned long user_exc_sp;
unsigned long user_exc_tmp0;
unsigned long user_exc_tmp1;
#endif
};
#endif /* ZEPHYR_INCLUDE_RISCV_STRUCTS_H_ */

View file

@ -25,6 +25,8 @@
#if defined(CONFIG_ARM64)
#include <zephyr/arch/arm64/structs.h>
#elif defined(CONFIG_RISCV)
#include <zephyr/arch/riscv/structs.h>
#else
/* Default definitions when no architecture specific definitions exist. */