zephyr/arch/arm64/core/switch.S

219 lines
5.5 KiB
ArmAsm
Raw Permalink Normal View History

/*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* Thread context switching for ARM64 Cortex-A (AArch64)
*
* This module implements the routines necessary for thread context switching
* on ARM64 Cortex-A (AArch64)
*/
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <offsets_short.h>
#include <zephyr/arch/cpu.h>
#include <zephyr/syscall.h>
#include "macro_priv.inc"
_ASM_FILE_PROLOGUE
/*
* Routine to handle context switches
*
* This function is directly called either by _isr_wrapper() in case of
* preemption, or arch_switch() in case of cooperative switching.
*
* void z_arm64_context_switch(struct k_thread *new, struct k_thread *old);
*/
GTEXT(z_arm64_context_switch)
SECTION_FUNC(TEXT, z_arm64_context_switch)
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
arm64: Rework stack usage The ARM64 port is currently using SP_EL0 for everything: kernel threads, user threads and exceptions. In addition when taking an exception the exception code is still using the thread SP without relying on any interrupt stack. If from one hand this makes the context switch really quick because the thread context is already on the thread stack so we have only to save one register (SP) for the whole context, on the other hand the major limitation introduced by this choice is that if for some reason the thread SP is corrupted or pointing to some unaccessible location (for example in case of stack overflow), the exception code is unable to recover or even deal with it. The usual way of dealing with this kind of problems is to use a dedicated interrupt stack on SP_EL1 when servicing the exceptions. The real drawback of this is that, in case of context switch, all the context must be copied from the shared interrupt stack into a thread-specific stack or structure, so it is really slow. We use here an hybrid approach, sacrificing a bit of stack space for a quicker context switch. While nothing really changes for kernel threads, for user threads we now use the privileged stack (already present to service syscalls) as interrupt stack. When an exception arrives the code now switches to use SP_EL1 that for user threads is always pointing inside the privileged portion of the stack of the current running thread. This achieves two things: (1) isolate exceptions and syscall code to use a stack that is isolated, privileged and not accessible to user threads and (2) the thread SP is not touched at all during exceptions, so it can be invalid or corrupted without any direct consequence. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
/* Save the current SP_EL0 */
mrs x4, sp_el0
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#endif
stp x19, x20, [x1, #_thread_offset_to_callee_saved_x19_x20]
stp x21, x22, [x1, #_thread_offset_to_callee_saved_x21_x22]
stp x23, x24, [x1, #_thread_offset_to_callee_saved_x23_x24]
stp x25, x26, [x1, #_thread_offset_to_callee_saved_x25_x26]
stp x27, x28, [x1, #_thread_offset_to_callee_saved_x27_x28]
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
stp x29, x4, [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#else
str x29, [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
#endif
arm64: Rework stack usage The ARM64 port is currently using SP_EL0 for everything: kernel threads, user threads and exceptions. In addition when taking an exception the exception code is still using the thread SP without relying on any interrupt stack. If from one hand this makes the context switch really quick because the thread context is already on the thread stack so we have only to save one register (SP) for the whole context, on the other hand the major limitation introduced by this choice is that if for some reason the thread SP is corrupted or pointing to some unaccessible location (for example in case of stack overflow), the exception code is unable to recover or even deal with it. The usual way of dealing with this kind of problems is to use a dedicated interrupt stack on SP_EL1 when servicing the exceptions. The real drawback of this is that, in case of context switch, all the context must be copied from the shared interrupt stack into a thread-specific stack or structure, so it is really slow. We use here an hybrid approach, sacrificing a bit of stack space for a quicker context switch. While nothing really changes for kernel threads, for user threads we now use the privileged stack (already present to service syscalls) as interrupt stack. When an exception arrives the code now switches to use SP_EL1 that for user threads is always pointing inside the privileged portion of the stack of the current running thread. This achieves two things: (1) isolate exceptions and syscall code to use a stack that is isolated, privileged and not accessible to user threads and (2) the thread SP is not touched at all during exceptions, so it can be invalid or corrupted without any direct consequence. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
/* Save the current SP_ELx and return address */
arm64: Rework stack usage The ARM64 port is currently using SP_EL0 for everything: kernel threads, user threads and exceptions. In addition when taking an exception the exception code is still using the thread SP without relying on any interrupt stack. If from one hand this makes the context switch really quick because the thread context is already on the thread stack so we have only to save one register (SP) for the whole context, on the other hand the major limitation introduced by this choice is that if for some reason the thread SP is corrupted or pointing to some unaccessible location (for example in case of stack overflow), the exception code is unable to recover or even deal with it. The usual way of dealing with this kind of problems is to use a dedicated interrupt stack on SP_EL1 when servicing the exceptions. The real drawback of this is that, in case of context switch, all the context must be copied from the shared interrupt stack into a thread-specific stack or structure, so it is really slow. We use here an hybrid approach, sacrificing a bit of stack space for a quicker context switch. While nothing really changes for kernel threads, for user threads we now use the privileged stack (already present to service syscalls) as interrupt stack. When an exception arrives the code now switches to use SP_EL1 that for user threads is always pointing inside the privileged portion of the stack of the current running thread. This achieves two things: (1) isolate exceptions and syscall code to use a stack that is isolated, privileged and not accessible to user threads and (2) the thread SP is not touched at all during exceptions, so it can be invalid or corrupted without any direct consequence. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
mov x4, sp
stp x4, lr, [x1, #_thread_offset_to_callee_saved_sp_elx_lr]
/* save current thread's exception depth */
mrs x4, tpidrro_el0
lsr x2, x4, #TPIDRROEL0_EXC_SHIFT
strb w2, [x1, #_thread_offset_to_exception_depth]
/* retrieve next thread's exception depth */
ldrb w2, [x0, #_thread_offset_to_exception_depth]
bic x4, x4, #TPIDRROEL0_EXC_DEPTH
orr x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT
msr tpidrro_el0, x4
#ifdef CONFIG_FPU_SHARING
/*
* Do this after tpidrro_el0 is updated with the new exception
* depth value, and before old->switch_handle is updated (making
* it available for grab by another CPU) as we still use its stack.
*/
stp x0, x1, [sp, #-16]!
bl z_arm64_fpu_thread_context_switch
ldp x0, x1, [sp], #16
#endif
/* save old thread into switch handle which is required by
* z_sched_switch_spin()
*/
str x1, [x1, #___thread_t_switch_handle_OFFSET]
#ifdef CONFIG_THREAD_LOCAL_STORAGE
/* Grab the TLS pointer */
ldr x2, [x0, #_thread_offset_to_tls]
/* Store in the "Thread ID" register.
* This register is used as a base pointer to all
* thread variables with offsets added by toolchain.
*/
msr tpidr_el0, x2
#endif
ldp x19, x20, [x0, #_thread_offset_to_callee_saved_x19_x20]
ldp x21, x22, [x0, #_thread_offset_to_callee_saved_x21_x22]
ldp x23, x24, [x0, #_thread_offset_to_callee_saved_x23_x24]
ldp x25, x26, [x0, #_thread_offset_to_callee_saved_x25_x26]
ldp x27, x28, [x0, #_thread_offset_to_callee_saved_x27_x28]
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
ldp x29, x4, [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
arm64: Rework stack usage The ARM64 port is currently using SP_EL0 for everything: kernel threads, user threads and exceptions. In addition when taking an exception the exception code is still using the thread SP without relying on any interrupt stack. If from one hand this makes the context switch really quick because the thread context is already on the thread stack so we have only to save one register (SP) for the whole context, on the other hand the major limitation introduced by this choice is that if for some reason the thread SP is corrupted or pointing to some unaccessible location (for example in case of stack overflow), the exception code is unable to recover or even deal with it. The usual way of dealing with this kind of problems is to use a dedicated interrupt stack on SP_EL1 when servicing the exceptions. The real drawback of this is that, in case of context switch, all the context must be copied from the shared interrupt stack into a thread-specific stack or structure, so it is really slow. We use here an hybrid approach, sacrificing a bit of stack space for a quicker context switch. While nothing really changes for kernel threads, for user threads we now use the privileged stack (already present to service syscalls) as interrupt stack. When an exception arrives the code now switches to use SP_EL1 that for user threads is always pointing inside the privileged portion of the stack of the current running thread. This achieves two things: (1) isolate exceptions and syscall code to use a stack that is isolated, privileged and not accessible to user threads and (2) the thread SP is not touched at all during exceptions, so it can be invalid or corrupted without any direct consequence. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
/* Restore SP_EL0 */
msr sp_el0, x4
arch: arm64: Enable safe exception stack This commit mainly enable the safe exception stack including the stack switch. Init the safe exception stack by calling z_arm64_safe_exception_stack during the boot stage on every core. Also, tweaks several files to properly switch the mode with different cases. 1) The same as before, when executing in userspace, SP_EL0 holds the user stack and SP_EL1 holds the privileged stack, using EL1h mode. 2) When entering exception from EL0 then SP_EL0 will be saved in the _esf_t structure. SP_EL1 will be the current SP, then retrieves the safe exception stack to SP_EL0, making sure the always pointing to safe exception stack as long as the system running in kernel space. 3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored from the stack value previously saved in the _esf_t structure. Still at EL1h mode. 4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will keep holding the safe exception stack unchanged as memtioned above. 5) Do a quick stack check every time entering the exception from EL1 to EL1. If check fail, set SP_EL1 to safe exception stack, and then handle the fatal error. Overall, the exception from user mode will be handled with kernel stack at the assumption that it is impossible the stackoverflow happens at the entry of exception from EL0 to EL1. However the exception from kernel mode will be firstly checked with the safe exception stack to see if the kernel stack overflows, because the exception might be triggered by stack invalid accessing. Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
#else
ldr x29, [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
#endif
/* Restore SP_EL1 and return address */
ldp x4, lr, [x0, #_thread_offset_to_callee_saved_sp_elx_lr]
mov sp, x4
#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
/* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */
get_cpu x4
ldr x2, [x0, #_thread_offset_to_stack_limit]
str x2, [x4, #_cpu_offset_to_current_stack_limit]
#endif
#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
str lr, [sp, #-16]!
bl z_arm64_swap_mem_domains
ldr lr, [sp], #16
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
str lr, [sp, #-16]!
bl z_thread_mark_switched_in
ldr lr, [sp], #16
#endif
/* Return to arch_switch() or _isr_wrapper() */
ret
/*
* Synchronous exceptions handler
*
* The service call (SVC) is used in the following occasions:
* - Cooperative context switching
* - IRQ offloading
*/
GTEXT(z_arm64_sync_exc)
SECTION_FUNC(TEXT, z_arm64_sync_exc)
mrs x0, esr_el1
lsr x1, x0, #26
#ifdef CONFIG_FPU_SHARING
cmp x1, #0x07 /*Access to SIMD or floating-point */
bne 1f
mov x0, sp
bl z_arm64_fpu_trap
b z_arm64_exit_exc_fpu_done
1:
#endif
cmp x1, #0x15 /* 0x15 = SVC */
bne inv
/* Demux the SVC call */
and x1, x0, #0xff
cmp x1, #_SVC_CALL_RUNTIME_EXCEPT
beq oops
#ifdef CONFIG_USERSPACE
cmp x1, #_SVC_CALL_SYSTEM_CALL
beq z_arm64_do_syscall
#endif
#ifdef CONFIG_IRQ_OFFLOAD
cmp x1, #_SVC_CALL_IRQ_OFFLOAD
beq offload
b inv
offload:
/*
* Retrieve provided routine and argument from the stack.
* Routine pointer is in saved x0, argument in saved x1
* so we load them with x1/x0 (reversed).
*/
ldp x1, x0, [sp, ___esf_t_x0_x1_OFFSET]
/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
get_cpu x2
ldr w3, [x2, #___cpu_t_nested_OFFSET]
add w4, w3, #1
str w4, [x2, #___cpu_t_nested_OFFSET]
/* If not nested: switch to IRQ stack and save current sp on it. */
cbnz w3, 1f
ldr x3, [x2, #___cpu_t_irq_stack_OFFSET]
mov x4, sp
mov sp, x3
str x4, [sp, #-16]!
#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
/* update the stack limit with IRQ stack limit */
sub x3, x3, #CONFIG_ISR_STACK_SIZE
str x3, [x2, #_cpu_offset_to_current_stack_limit]
#endif
1:
/* Execute provided routine (argument is in x0 already). */
blr x1
/* Exit through regular IRQ exit path */
b z_arm64_irq_done
#endif
b inv
oops:
mov x0, sp
b z_arm64_do_kernel_oops
inv:
mov x0, #0 /* K_ERR_CPU_EXCEPTION */
mov x1, sp
bl z_arm64_fatal_error
/* Return here only in case of recoverable error */
b z_arm64_exit_exc