arm64: implement exception depth count

Add the exception depth count to tpidrro_el0 and make it available
through the arch_exception_depth() accessor.

The IN_EL0 flag is now updated unconditionally even if userspace is
not configured. Doing otherwise made the code rather hairy and
I doubt the overhead is measurable.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2021-04-13 00:42:00 -04:00 committed by Carles Cufí
commit a82fff04ff
10 changed files with 53 additions and 13 deletions

View file

@ -226,6 +226,8 @@ void z_arm64_fatal_error(unsigned int reason, z_arch_esf_t *esf)
if (dump_far)
LOG_ERR("FAR_ELn: 0x%016llx", far);
LOG_ERR("TPIDRRO: 0x%016llx", read_tpidrro_el0());
#endif /* CONFIG_EXCEPTION_DEBUG */
if (is_recoverable(esf, esr, far, elr))

View file

@ -30,6 +30,8 @@
#include <kernel_arch_data.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_thread_arch_t, exception_depth);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x19, x19_x20);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x21, x21_x22);
GEN_NAMED_OFFSET_SYM(_callee_saved_t, x23, x23_x24);

View file

@ -47,7 +47,18 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
/* Save the current SP_ELx */
mov x4, sp
stp x4, xzr, [x2, ___callee_saved_t_sp_elx_OFFSET]
str x4, [x2, ___callee_saved_t_sp_elx_OFFSET]
/* save current thread's exception depth */
mrs x4, tpidrro_el0
lsr x2, x4, #TPIDRROEL0_EXC_SHIFT
strb w2, [x1, #_thread_offset_to_exception_depth]
/* retrieve next thread's exception depth */
ldrb w2, [x0, #_thread_offset_to_exception_depth]
bic x4, x4, #TPIDRROEL0_EXC_DEPTH
orr x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT
msr tpidrro_el0, x4
#ifdef CONFIG_SMP
/* save old thread into switch handle which is required by
@ -82,7 +93,7 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
msr sp_el0, x1
/* Restore SP_EL1 */
ldp x1, xzr, [x2, ___callee_saved_t_sp_elx_OFFSET]
ldr x1, [x2, ___callee_saved_t_sp_elx_OFFSET]
mov sp, x1
#ifdef CONFIG_USERSPACE

View file

@ -108,6 +108,9 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* Keep using SP_EL1 */
pInitCtx->spsr = SPSR_MODE_EL1H | DAIF_FIQ_BIT;
/* thread birth happens through the exception return path */
thread->arch.exception_depth = 1;
/*
* We are saving SP_EL1 to pop out entry and parameters when going
* through z_arm64_exit_exc(). For user threads the definitive location

View file

@ -131,11 +131,14 @@ valid_syscall_id:
*
* We leverage z_arm64_exit_exc() to pop out the entry function and parameters
* from ESF and fake a return from exception to move from EL1 to EL0. The fake
* ESF is built in arch_user_mode_enter() before jumping here
* ESF is built in arch_user_mode_enter() before jumping here.
*/
GTEXT(z_arm64_userspace_enter)
SECTION_FUNC(TEXT, z_arm64_userspace_enter)
msr DAIFSET, #DAIFSET_IRQ_BIT
/*
* When a kernel thread is moved to user mode it doesn't have any
* SP_EL0 set yet. We set it here for the first time pointing to the
@ -150,4 +153,12 @@ SECTION_FUNC(TEXT, z_arm64_userspace_enter)
* location for when the next exception will come.
*/
mov sp, x0
/* we have to fake our exception depth count too */
mrs x0, tpidrro_el0
mov x1, #TPIDRROEL0_EXC_UNIT
bic x0, x0, #TPIDRROEL0_EXC_DEPTH
add x0, x0, x1
msr tpidrro_el0, x0
b z_arm64_exit_exc

View file

@ -51,12 +51,12 @@ _ASM_FILE_PROLOGUE
mrs \xreg1, elr_el1
stp \xreg0, \xreg1, [sp, ___esf_t_spsr_elr_OFFSET]
#ifdef CONFIG_USERSPACE
/* Clear usermode flag */
/* Clear usermode flag and increment exception depth */
mrs \xreg0, tpidrro_el0
mov \xreg1, #TPIDRROEL0_EXC_UNIT
bic \xreg0, \xreg0, #TPIDRROEL0_IN_EL0
add \xreg0, \xreg0, \xreg1
msr tpidrro_el0, \xreg0
#endif
.endm
@ -213,15 +213,14 @@ SECTION_FUNC(TEXT, z_arm64_exit_exc)
msr spsr_el1, x0
msr elr_el1, x1
#ifdef CONFIG_USERSPACE
/* Restore the kernel/user mode flag */
/* Restore the kernel/user mode flag and decrement exception depth */
tst x0, #SPSR_MODE_MASK /* EL0 == 0 */
bne 1f
mrs x0, tpidrro_el0
orr x0, x0, #TPIDRROEL0_IN_EL0
mov x1, #TPIDRROEL0_EXC_UNIT
orr x2, x0, #TPIDRROEL0_IN_EL0
csel x0, x2, x0, eq
sub x0, x0, x1
msr tpidrro_el0, x0
1:
#endif
ldp x0, x1, [sp, ___esf_t_x0_x1_OFFSET]
ldp x2, x3, [sp, ___esf_t_x2_x3_OFFSET]

View file

@ -9,4 +9,7 @@
#include <offsets.h>
#define _thread_offset_to_exception_depth \
(___thread_t_arch_OFFSET + ___thread_arch_t_exception_depth_OFFSET)
#endif /* ZEPHYR_ARCH_ARM64_INCLUDE_OFFSETS_SHORT_ARCH_H_ */

View file

@ -18,5 +18,10 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
return (_cpu_t *)(read_tpidrro_el0() & TPIDRROEL0_CURR_CPU);
}
static ALWAYS_INLINE int arch_exception_depth(void)
{
return (read_tpidrro_el0() & TPIDRROEL0_EXC_DEPTH) / TPIDRROEL0_EXC_UNIT;
}
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_ARCH_INLINES_H */

View file

@ -36,7 +36,6 @@ struct _callee_saved {
uint64_t x29;
uint64_t sp_el0;
uint64_t sp_elx;
uint64_t xzr;
};
typedef struct _callee_saved _callee_saved_t;
@ -45,6 +44,7 @@ struct _thread_arch {
#ifdef CONFIG_USERSPACE
struct arm_mmu_ptables *ptables;
#endif
uint8_t exception_depth;
};
typedef struct _thread_arch _thread_arch_t;

View file

@ -21,4 +21,8 @@
#define TPIDRROEL0_CURR_CPU 0x0000fffffffffff8
#define TPIDRROEL0_EXC_DEPTH 0xff00000000000000
#define TPIDRROEL0_EXC_UNIT 0x0100000000000000
#define TPIDRROEL0_EXC_SHIFT 56
#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_TPIDRRO_EL0_H_ */