arm64: hold curr_cpu instance in tpidrro_el0
Let's fully exploit tpidrro_el0 by storing in it the current CPU's struct _cpu instance alongside the userspace mode flag bit. This greatly simplifies the code needed to get at the cpu structure, and this paves the way to much simpler multi cluster support, as there is no longer the need to decode MPIDR all the time. The same code is used in the !SMP case as there are benefits there too such as avoiding the literal pool, and it looks cleaner. The tpidrro_el0 value is no longer stored in the exception stack frame. Instead, we simply restore the user mode flag based on the SPSR value. This way, more flag bits could be used independently in the future. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
2bed37e534
commit
88477906f0
13 changed files with 55 additions and 64 deletions
|
@ -7,9 +7,9 @@
|
||||||
#ifndef _MACRO_PRIV_INC_
|
#ifndef _MACRO_PRIV_INC_
|
||||||
#define _MACRO_PRIV_INC_
|
#define _MACRO_PRIV_INC_
|
||||||
|
|
||||||
#ifdef _ASMLANGUAGE
|
#include <arch/arm64/tpidrro_el0.h>
|
||||||
|
|
||||||
GDATA(_kernel)
|
#ifdef _ASMLANGUAGE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get CPU id
|
* Get CPU id
|
||||||
|
@ -25,12 +25,9 @@ GDATA(_kernel)
|
||||||
* Get CPU pointer
|
* Get CPU pointer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro get_cpu xreg0, xreg1
|
.macro get_cpu xreg0
|
||||||
get_cpu_id \xreg1
|
mrs \xreg0, tpidrro_el0
|
||||||
mov \xreg0, #___cpu_t_SIZEOF
|
and \xreg0, \xreg0, #TPIDRROEL0_CURR_CPU
|
||||||
mul \xreg0, \xreg0, \xreg1
|
|
||||||
ldr \xreg1, =(_kernel + ___kernel_t_cpus_OFFSET)
|
|
||||||
add \xreg0, \xreg0, \xreg1
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -38,17 +35,10 @@ GDATA(_kernel)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro inc_nest_counter xreg0, xreg1
|
.macro inc_nest_counter xreg0, xreg1
|
||||||
#ifdef CONFIG_SMP
|
get_cpu \xreg0
|
||||||
get_cpu \xreg0, \xreg1
|
|
||||||
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
||||||
add \xreg1, \xreg1, #1
|
add \xreg1, \xreg1, #1
|
||||||
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
||||||
#else
|
|
||||||
ldr \xreg0, =_kernel
|
|
||||||
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
|
||||||
add \xreg1, \xreg1, #1
|
|
||||||
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
|
||||||
#endif
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -56,17 +46,10 @@ GDATA(_kernel)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.macro dec_nest_counter xreg0, xreg1
|
.macro dec_nest_counter xreg0, xreg1
|
||||||
#ifdef CONFIG_SMP
|
get_cpu \xreg0
|
||||||
get_cpu \xreg0, \xreg1
|
|
||||||
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
ldr \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
||||||
subs \xreg1, \xreg1, #1
|
subs \xreg1, \xreg1, #1
|
||||||
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
str \xreg1, [\xreg0, #___cpu_t_nested_OFFSET]
|
||||||
#else
|
|
||||||
ldr \xreg0, =_kernel
|
|
||||||
ldr \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
|
||||||
subs \xreg1, \xreg1, #1
|
|
||||||
str \xreg1, [\xreg0, #_kernel_offset_to_nested]
|
|
||||||
#endif
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
|
@ -44,9 +44,6 @@ GEN_NAMED_OFFSET_SYM(_callee_saved_t, x29, x29_sp);
|
||||||
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
|
GEN_ABSOLUTE_SYM(___callee_saved_t_SIZEOF, sizeof(struct _callee_saved));
|
||||||
|
|
||||||
GEN_NAMED_OFFSET_SYM(_esf_t, spsr, spsr_elr);
|
GEN_NAMED_OFFSET_SYM(_esf_t, spsr, spsr_elr);
|
||||||
#ifdef CONFIG_USERSPACE
|
|
||||||
GEN_NAMED_OFFSET_SYM(_esf_t, tpidrro_el0, tpidrro_el0);
|
|
||||||
#endif
|
|
||||||
GEN_NAMED_OFFSET_SYM(_esf_t, x18, x18_x30);
|
GEN_NAMED_OFFSET_SYM(_esf_t, x18, x18_x30);
|
||||||
GEN_NAMED_OFFSET_SYM(_esf_t, x16, x16_x17);
|
GEN_NAMED_OFFSET_SYM(_esf_t, x16, x16_x17);
|
||||||
GEN_NAMED_OFFSET_SYM(_esf_t, x14, x14_x15);
|
GEN_NAMED_OFFSET_SYM(_esf_t, x14, x14_x15);
|
||||||
|
|
|
@ -45,6 +45,9 @@ static inline void z_arm64_bss_zero(void)
|
||||||
*/
|
*/
|
||||||
void z_arm64_prep_c(void)
|
void z_arm64_prep_c(void)
|
||||||
{
|
{
|
||||||
|
/* Initialize tpidrro_el0 with our struct _cpu instance address */
|
||||||
|
write_tpidrro_el0((uintptr_t)&_kernel.cpus[0]);
|
||||||
|
|
||||||
z_arm64_bss_zero();
|
z_arm64_bss_zero();
|
||||||
#ifdef CONFIG_XIP
|
#ifdef CONFIG_XIP
|
||||||
z_data_copy();
|
z_data_copy();
|
||||||
|
|
|
@ -65,6 +65,9 @@ void z_arm64_secondary_start(void)
|
||||||
arch_cpustart_t fn;
|
arch_cpustart_t fn;
|
||||||
int cpu_num = MPIDR_TO_CORE(GET_MPIDR());
|
int cpu_num = MPIDR_TO_CORE(GET_MPIDR());
|
||||||
|
|
||||||
|
/* Initialize tpidrro_el0 with our struct _cpu instance address */
|
||||||
|
write_tpidrro_el0((uintptr_t)&_kernel.cpus[cpu_num]);
|
||||||
|
|
||||||
z_arm64_mmu_init();
|
z_arm64_mmu_init();
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
|
@ -53,7 +53,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
pInitCtx->elr = (uint64_t)z_thread_entry;
|
pInitCtx->elr = (uint64_t)z_thread_entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
pInitCtx->tpidrro_el0 = 0x0;
|
|
||||||
thread->arch.priv_stack_start = 0;
|
thread->arch.priv_stack_start = 0;
|
||||||
#else
|
#else
|
||||||
pInitCtx->elr = (uint64_t)z_thread_entry;
|
pInitCtx->elr = (uint64_t)z_thread_entry;
|
||||||
|
@ -104,9 +103,6 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
pInitCtx->spsr = DAIF_FIQ_BIT | SPSR_MODE_EL0T;
|
pInitCtx->spsr = DAIF_FIQ_BIT | SPSR_MODE_EL0T;
|
||||||
pInitCtx->elr = (uint64_t)z_thread_entry;
|
pInitCtx->elr = (uint64_t)z_thread_entry;
|
||||||
|
|
||||||
/* The thread will be in user context */
|
|
||||||
pInitCtx->tpidrro_el0 = 0x1;
|
|
||||||
|
|
||||||
pInitCtx->x0 = (uint64_t)user_entry;
|
pInitCtx->x0 = (uint64_t)user_entry;
|
||||||
pInitCtx->x1 = (uint64_t)p1;
|
pInitCtx->x1 = (uint64_t)p1;
|
||||||
pInitCtx->x2 = (uint64_t)p2;
|
pInitCtx->x2 = (uint64_t)p2;
|
||||||
|
|
|
@ -110,13 +110,8 @@ valid_syscall_id:
|
||||||
ldr x9, [x9, x8, lsl #3]
|
ldr x9, [x9, x8, lsl #3]
|
||||||
|
|
||||||
/* Recover the privileged stack */
|
/* Recover the privileged stack */
|
||||||
#ifdef CONFIG_SMP
|
get_cpu x10
|
||||||
get_cpu x10, x8
|
|
||||||
ldr x10, [x10, #___cpu_t_current_OFFSET]
|
ldr x10, [x10, #___cpu_t_current_OFFSET]
|
||||||
#else
|
|
||||||
ldr x10, =_kernel
|
|
||||||
ldr x10, [x10, #_kernel_offset_to_current]
|
|
||||||
#endif
|
|
||||||
ldr x10, [x10, #_thread_offset_to_priv_stack_start]
|
ldr x10, [x10, #_thread_offset_to_priv_stack_start]
|
||||||
add x10, x10, #CONFIG_PRIVILEGED_STACK_SIZE
|
add x10, x10, #CONFIG_PRIVILEGED_STACK_SIZE
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,9 @@
|
||||||
|
|
||||||
#include <toolchain.h>
|
#include <toolchain.h>
|
||||||
#include <linker/sections.h>
|
#include <linker/sections.h>
|
||||||
|
#include <offsets.h>
|
||||||
#include <arch/cpu.h>
|
#include <arch/cpu.h>
|
||||||
#include <offsets_short.h>
|
#include <arch/arm64/tpidrro_el0.h>
|
||||||
#include "vector_table.h"
|
#include "vector_table.h"
|
||||||
|
|
||||||
_ASM_FILE_PROLOGUE
|
_ASM_FILE_PROLOGUE
|
||||||
|
@ -56,12 +57,10 @@ _ASM_FILE_PROLOGUE
|
||||||
stp \xreg0, \xreg1, [sp, ___esf_t_spsr_elr_OFFSET]
|
stp \xreg0, \xreg1, [sp, ___esf_t_spsr_elr_OFFSET]
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
/* Save the current kernel/user mode in the context */
|
/* Clear usermode flag */
|
||||||
mrs \xreg0, tpidrro_el0
|
mrs \xreg0, tpidrro_el0
|
||||||
str \xreg0, [sp, ___esf_t_tpidrro_el0_OFFSET]
|
bic \xreg0, \xreg0, #TPIDRROEL0_IN_EL0
|
||||||
|
msr tpidrro_el0, \xreg0
|
||||||
/* Switch TPIDRRO_EL0 to kernel mode */
|
|
||||||
msr tpidrro_el0, xzr
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
|
@ -219,8 +218,12 @@ SECTION_FUNC(TEXT, z_arm64_exit_exc)
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
/* Restore the kernel/user mode flag */
|
/* Restore the kernel/user mode flag */
|
||||||
ldr x0, [sp, ___esf_t_tpidrro_el0_OFFSET]
|
tst x0, #SPSR_MODE_MASK /* EL0 == 0 */
|
||||||
|
bne 1f
|
||||||
|
mrs x0, tpidrro_el0
|
||||||
|
orr x0, x0, #TPIDRROEL0_IN_EL0
|
||||||
msr tpidrro_el0, x0
|
msr tpidrro_el0, x0
|
||||||
|
1:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
|
ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
|
||||||
|
|
|
@ -10,21 +10,12 @@
|
||||||
#ifndef _ASMLANGUAGE
|
#ifndef _ASMLANGUAGE
|
||||||
|
|
||||||
#include <kernel_structs.h>
|
#include <kernel_structs.h>
|
||||||
#include <arch/cpu.h>
|
|
||||||
#include <arch/arm64/lib_helpers.h>
|
#include <arch/arm64/lib_helpers.h>
|
||||||
|
#include <arch/arm64/tpidrro_el0.h>
|
||||||
|
|
||||||
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
|
static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
return (_cpu_t *)(read_tpidrro_el0() & TPIDRROEL0_CURR_CPU);
|
||||||
uint64_t core;
|
|
||||||
|
|
||||||
/* Note: Only support one Cluster */
|
|
||||||
core = MPIDR_TO_CORE(GET_MPIDR());
|
|
||||||
|
|
||||||
return &_kernel.cpus[core];
|
|
||||||
#else
|
|
||||||
return &_kernel.cpus[0];
|
|
||||||
#endif /* CONFIG_SMP */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* !_ASMLANGUAGE */
|
#endif /* !_ASMLANGUAGE */
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#define SPSR_MODE_EL1H (0x5)
|
#define SPSR_MODE_EL1H (0x5)
|
||||||
#define SPSR_MODE_EL2T (0x8)
|
#define SPSR_MODE_EL2T (0x8)
|
||||||
#define SPSR_MODE_EL2H (0x9)
|
#define SPSR_MODE_EL2H (0x9)
|
||||||
|
#define SPSR_MODE_MASK (0xf)
|
||||||
|
|
||||||
|
|
||||||
#define SCTLR_EL3_RES1 (BIT(29) | BIT(28) | BIT(23) | \
|
#define SCTLR_EL3_RES1 (BIT(29) | BIT(28) | BIT(23) | \
|
||||||
|
|
|
@ -47,9 +47,6 @@ struct __esf {
|
||||||
uint64_t x30;
|
uint64_t x30;
|
||||||
uint64_t spsr;
|
uint64_t spsr;
|
||||||
uint64_t elr;
|
uint64_t elr;
|
||||||
#ifdef CONFIG_USERSPACE
|
|
||||||
uint64_t tpidrro_el0;
|
|
||||||
#endif
|
|
||||||
} __aligned(16);
|
} __aligned(16);
|
||||||
|
|
||||||
typedef struct __esf z_arch_esf_t;
|
typedef struct __esf z_arch_esf_t;
|
||||||
|
|
|
@ -66,6 +66,7 @@ MAKE_REG_HELPER(hcr_el2);
|
||||||
MAKE_REG_HELPER(id_aa64pfr0_el1);
|
MAKE_REG_HELPER(id_aa64pfr0_el1);
|
||||||
MAKE_REG_HELPER(id_aa64mmfr0_el1);
|
MAKE_REG_HELPER(id_aa64mmfr0_el1);
|
||||||
MAKE_REG_HELPER(scr_el3);
|
MAKE_REG_HELPER(scr_el3);
|
||||||
|
MAKE_REG_HELPER(tpidrro_el0);
|
||||||
|
|
||||||
MAKE_REG_HELPER_EL123(actlr)
|
MAKE_REG_HELPER_EL123(actlr)
|
||||||
MAKE_REG_HELPER_EL123(cpacr)
|
MAKE_REG_HELPER_EL123(cpacr)
|
||||||
|
|
|
@ -26,7 +26,8 @@
|
||||||
|
|
||||||
#include <zephyr/types.h>
|
#include <zephyr/types.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <arch/arm64/cpu.h>
|
#include <arch/arm64/lib_helpers.h>
|
||||||
|
#include <arch/arm64/tpidrro_el0.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -165,11 +166,7 @@ static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
|
||||||
|
|
||||||
static inline bool arch_is_user_context(void)
|
static inline bool arch_is_user_context(void)
|
||||||
{
|
{
|
||||||
uint64_t tpidrro_el0;
|
return (read_tpidrro_el0() & TPIDRROEL0_IN_EL0) != 0;
|
||||||
|
|
||||||
__asm__ volatile("mrs %0, tpidrro_el0" : "=r" (tpidrro_el0));
|
|
||||||
|
|
||||||
return (tpidrro_el0 != 0x0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
24
include/arch/arm64/tpidrro_el0.h
Normal file
24
include/arch/arm64/tpidrro_el0.h
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2021 BayLibre SAS
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file
|
||||||
|
* @brief tpidrro_el0 bits allocation
|
||||||
|
*
|
||||||
|
* Among other things, the tpidrro_el0 holds the address for the current
|
||||||
|
* CPU's struct _cpu instance. But such a pointer is at least 8-bytes
|
||||||
|
* aligned, and the address space is 48 bits max. That leaves plenty of
|
||||||
|
* free bits for other purposes.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_
|
||||||
|
#define ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_
|
||||||
|
|
||||||
|
#define TPIDRROEL0_IN_EL0 0x0000000000000001
|
||||||
|
|
||||||
|
#define TPIDRROEL0_CURR_CPU 0x0000fffffffffff8
|
||||||
|
|
||||||
|
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_ */
|
Loading…
Add table
Add a link
Reference in a new issue