2019-11-10 17:17:19 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2020-09-18 09:04:43 +02:00
|
|
|
/*
|
|
|
|
* Thread context switching for ARM64 Cortex-A (AArch64)
|
2019-11-10 17:17:19 +01:00
|
|
|
*
|
|
|
|
* This module implements the routines necessary for thread context switching
|
2020-09-18 09:04:43 +02:00
|
|
|
* on ARM64 Cortex-A (AArch64)
|
2019-11-10 17:17:19 +01:00
|
|
|
*/
|
|
|
|
|
2022-05-09 13:56:13 +02:00
|
|
|
#include <zephyr/toolchain.h>
|
|
|
|
#include <zephyr/linker/sections.h>
|
2019-11-10 17:17:19 +01:00
|
|
|
#include <offsets_short.h>
|
2022-05-09 13:56:13 +02:00
|
|
|
#include <zephyr/arch/cpu.h>
|
|
|
|
#include <zephyr/syscall.h>
|
2020-04-27 13:26:29 +02:00
|
|
|
#include "macro_priv.inc"
|
2019-11-10 17:17:19 +01:00
|
|
|
|
2020-03-10 18:26:58 +01:00
|
|
|
_ASM_FILE_PROLOGUE
|
|
|
|
|
2020-09-18 09:04:43 +02:00
|
|
|
/*
|
|
|
|
* Routine to handle context switches
|
2019-11-10 17:17:19 +01:00
|
|
|
*
|
2020-02-18 09:44:13 +01:00
|
|
|
* This function is directly called either by _isr_wrapper() in case of
|
2022-03-14 18:51:40 +01:00
|
|
|
* preemption, or arch_switch() in case of cooperative switching.
|
|
|
|
*
|
|
|
|
* void z_arm64_context_switch(struct k_thread *new, struct k_thread *old);
|
2019-11-10 17:17:19 +01:00
|
|
|
*/
|
2020-02-18 09:44:13 +01:00
|
|
|
|
|
|
|
GTEXT(z_arm64_context_switch)
|
|
|
|
SECTION_FUNC(TEXT, z_arm64_context_switch)
|
2021-02-23 22:36:47 +01:00
|
|
|
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
|
|
|
/* Save the current SP_EL0 */
|
|
|
|
mrs x4, sp_el0
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#endif
|
2020-11-11 14:29:44 +01:00
|
|
|
|
2021-05-03 19:58:57 +02:00
|
|
|
stp x19, x20, [x1, #_thread_offset_to_callee_saved_x19_x20]
|
|
|
|
stp x21, x22, [x1, #_thread_offset_to_callee_saved_x21_x22]
|
|
|
|
stp x23, x24, [x1, #_thread_offset_to_callee_saved_x23_x24]
|
|
|
|
stp x25, x26, [x1, #_thread_offset_to_callee_saved_x25_x26]
|
|
|
|
stp x27, x28, [x1, #_thread_offset_to_callee_saved_x27_x28]
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
|
2021-05-03 19:58:57 +02:00
|
|
|
stp x29, x4, [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#else
|
|
|
|
str x29, [x1, #_thread_offset_to_callee_saved_x29_sp_el0]
|
|
|
|
#endif
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
|
|
|
|
2022-03-14 18:51:40 +01:00
|
|
|
/* Save the current SP_ELx and return address */
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
|
|
|
mov x4, sp
|
2022-03-14 18:51:40 +01:00
|
|
|
stp x4, lr, [x1, #_thread_offset_to_callee_saved_sp_elx_lr]
|
2021-04-13 06:42:00 +02:00
|
|
|
|
|
|
|
/* save current thread's exception depth */
|
|
|
|
mrs x4, tpidrro_el0
|
|
|
|
lsr x2, x4, #TPIDRROEL0_EXC_SHIFT
|
|
|
|
strb w2, [x1, #_thread_offset_to_exception_depth]
|
|
|
|
|
|
|
|
/* retrieve next thread's exception depth */
|
|
|
|
ldrb w2, [x0, #_thread_offset_to_exception_depth]
|
|
|
|
bic x4, x4, #TPIDRROEL0_EXC_DEPTH
|
|
|
|
orr x4, x4, x2, lsl #TPIDRROEL0_EXC_SHIFT
|
|
|
|
msr tpidrro_el0, x4
|
2020-11-09 08:41:13 +01:00
|
|
|
|
2022-03-14 18:51:40 +01:00
|
|
|
#ifdef CONFIG_FPU_SHARING
|
|
|
|
/*
|
|
|
|
* Do this after tpidrro_el0 is updated with the new exception
|
|
|
|
* depth value, and before old->switch_handle is updated (making
|
|
|
|
* it available for grab by another CPU) as we still use its stack.
|
|
|
|
*/
|
|
|
|
stp x0, x1, [sp, #-16]!
|
|
|
|
bl z_arm64_fpu_thread_context_switch
|
|
|
|
ldp x0, x1, [sp], #16
|
|
|
|
#endif
|
|
|
|
|
2020-11-09 08:41:13 +01:00
|
|
|
/* save old thread into switch handle which is required by
|
2023-05-26 18:12:51 +02:00
|
|
|
* z_sched_switch_spin()
|
2020-11-09 08:41:13 +01:00
|
|
|
*/
|
|
|
|
str x1, [x1, #___thread_t_switch_handle_OFFSET]
|
2019-11-10 17:17:19 +01:00
|
|
|
|
2020-09-29 19:10:00 +02:00
|
|
|
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
|
|
|
/* Grab the TLS pointer */
|
2021-05-03 19:58:57 +02:00
|
|
|
ldr x2, [x0, #_thread_offset_to_tls]
|
2020-09-29 19:10:00 +02:00
|
|
|
|
|
|
|
/* Store in the "Thread ID" register.
|
|
|
|
* This register is used as a base pointer to all
|
|
|
|
* thread variables with offsets added by toolchain.
|
|
|
|
*/
|
|
|
|
msr tpidr_el0, x2
|
|
|
|
#endif
|
|
|
|
|
2021-05-03 19:58:57 +02:00
|
|
|
ldp x19, x20, [x0, #_thread_offset_to_callee_saved_x19_x20]
|
|
|
|
ldp x21, x22, [x0, #_thread_offset_to_callee_saved_x21_x22]
|
|
|
|
ldp x23, x24, [x0, #_thread_offset_to_callee_saved_x23_x24]
|
|
|
|
ldp x25, x26, [x0, #_thread_offset_to_callee_saved_x25_x26]
|
|
|
|
ldp x27, x28, [x0, #_thread_offset_to_callee_saved_x27_x28]
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#ifndef CONFIG_ARM64_SAFE_EXCEPTION_STACK
|
2021-05-03 19:58:57 +02:00
|
|
|
ldp x29, x4, [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
|
arm64: Rework stack usage
The ARM64 port is currently using SP_EL0 for everything: kernel threads,
user threads and exceptions. In addition when taking an exception the
exception code is still using the thread SP without relying on any
interrupt stack.
If from one hand this makes the context switch really quick because the
thread context is already on the thread stack so we have only to save
one register (SP) for the whole context, on the other hand the major
limitation introduced by this choice is that if for some reason the
thread SP is corrupted or pointing to some unaccessible location (for
example in case of stack overflow), the exception code is unable to
recover or even deal with it.
The usual way of dealing with this kind of problems is to use a
dedicated interrupt stack on SP_EL1 when servicing the exceptions. The
real drawback of this is that, in case of context switch, all the
context must be copied from the shared interrupt stack into a
thread-specific stack or structure, so it is really slow.
We use here an hybrid approach, sacrificing a bit of stack space for a
quicker context switch. While nothing really changes for kernel threads,
for user threads we now use the privileged stack (already present to
service syscalls) as interrupt stack.
When an exception arrives the code now switches to use SP_EL1 that for
user threads is always pointing inside the privileged portion of the
stack of the current running thread. This achieves two things: (1)
isolate exceptions and syscall code to use a stack that is isolated,
privileged and not accessible to user threads and (2) the thread SP is
not touched at all during exceptions, so it can be invalid or corrupted
without any direct consequence.
Signed-off-by: Carlo Caione <ccaione@baylibre.com>
2021-04-21 20:14:14 +02:00
|
|
|
|
|
|
|
/* Restore SP_EL0 */
|
2021-05-03 19:58:57 +02:00
|
|
|
msr sp_el0, x4
|
arch: arm64: Enable safe exception stack
This commit mainly enable the safe exception stack including the stack
switch. Init the safe exception stack by calling
z_arm64_safe_exception_stack during the boot stage on every core. Also,
tweaks several files to properly switch the mode with different cases.
1) The same as before, when executing in userspace, SP_EL0 holds the
user stack and SP_EL1 holds the privileged stack, using EL1h mode.
2) When entering exception from EL0 then SP_EL0 will be saved in the
_esf_t structure. SP_EL1 will be the current SP, then retrieves the safe
exception stack to SP_EL0, making sure the always pointing to safe
exception stack as long as the system running in kernel space.
3) When exiting exception from EL1 to EL0 then SP_EL0 will be restored
from the stack value previously saved in the _esf_t structure. Still at
EL1h mode.
4) Either entering or exiting exception from EL1 to EL1, SP_EL0 will
keep holding the safe exception stack unchanged as memtioned above.
5) Do a quick stack check every time entering the exception from EL1 to
EL1. If check fail, set SP_EL1 to safe exception stack, and then handle
the fatal error.
Overall, the exception from user mode will be handled with kernel stack
at the assumption that it is impossible the stackoverflow happens at the
entry of exception from EL0 to EL1. However the exception from kernel
mode will be firstly checked with the safe exception stack to see if the
kernel stack overflows, because the exception might be triggered by
stack invalid accessing.
Signed-off-by: Jaxson Han <jaxson.han@arm.com>
2022-11-01 09:11:45 +01:00
|
|
|
#else
|
|
|
|
ldr x29, [x0, #_thread_offset_to_callee_saved_x29_sp_el0]
|
|
|
|
#endif
|
2019-11-10 17:17:19 +01:00
|
|
|
|
2022-03-14 18:51:40 +01:00
|
|
|
/* Restore SP_EL1 and return address */
|
|
|
|
ldp x4, lr, [x0, #_thread_offset_to_callee_saved_sp_elx_lr]
|
2021-05-03 19:58:57 +02:00
|
|
|
mov sp, x4
|
2019-11-10 17:17:19 +01:00
|
|
|
|
2023-01-20 17:00:23 +01:00
|
|
|
#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
|
|
|
|
/* arch_curr_cpu()->arch.current_stack_limit = thread->arch.stack_limit */
|
|
|
|
get_cpu x4
|
|
|
|
ldr x2, [x0, #_thread_offset_to_stack_limit]
|
|
|
|
str x2, [x4, #_cpu_offset_to_current_stack_limit]
|
|
|
|
#endif
|
|
|
|
|
2023-01-20 17:07:24 +01:00
|
|
|
#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
|
2022-03-16 18:12:39 +01:00
|
|
|
str lr, [sp, #-16]!
|
2021-07-20 04:14:17 +02:00
|
|
|
bl z_arm64_swap_mem_domains
|
2022-03-16 18:12:39 +01:00
|
|
|
ldr lr, [sp], #16
|
2020-11-26 12:57:36 +01:00
|
|
|
#endif
|
|
|
|
|
2020-08-28 01:12:01 +02:00
|
|
|
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
|
2022-03-16 18:12:39 +01:00
|
|
|
str lr, [sp, #-16]!
|
2020-08-28 01:12:01 +02:00
|
|
|
bl z_thread_mark_switched_in
|
2022-03-16 18:12:39 +01:00
|
|
|
ldr lr, [sp], #16
|
2019-11-18 10:49:25 +01:00
|
|
|
#endif
|
|
|
|
|
2022-03-14 18:51:40 +01:00
|
|
|
/* Return to arch_switch() or _isr_wrapper() */
|
2019-11-10 17:17:19 +01:00
|
|
|
ret
|
|
|
|
|
2020-09-18 09:04:43 +02:00
|
|
|
/*
|
|
|
|
* Synchronous exceptions handler
|
2019-11-10 17:17:19 +01:00
|
|
|
*
|
|
|
|
* The service call (SVC) is used in the following occasions:
|
|
|
|
* - Cooperative context switching
|
|
|
|
* - IRQ offloading
|
|
|
|
*/
|
2020-02-18 09:44:13 +01:00
|
|
|
|
2020-09-18 09:19:01 +02:00
|
|
|
GTEXT(z_arm64_sync_exc)
|
|
|
|
SECTION_FUNC(TEXT, z_arm64_sync_exc)
|
2019-11-10 17:17:19 +01:00
|
|
|
|
|
|
|
mrs x0, esr_el1
|
|
|
|
lsr x1, x0, #26
|
|
|
|
|
2021-04-08 05:31:44 +02:00
|
|
|
#ifdef CONFIG_FPU_SHARING
|
|
|
|
cmp x1, #0x07 /*Access to SIMD or floating-point */
|
|
|
|
bne 1f
|
|
|
|
mov x0, sp
|
|
|
|
bl z_arm64_fpu_trap
|
|
|
|
b z_arm64_exit_exc_fpu_done
|
|
|
|
1:
|
|
|
|
#endif
|
|
|
|
|
2019-11-10 17:17:19 +01:00
|
|
|
cmp x1, #0x15 /* 0x15 = SVC */
|
|
|
|
bne inv
|
|
|
|
|
|
|
|
/* Demux the SVC call */
|
2020-08-03 12:18:24 +02:00
|
|
|
and x1, x0, #0xff
|
2020-11-06 18:55:16 +01:00
|
|
|
|
|
|
|
cmp x1, #_SVC_CALL_RUNTIME_EXCEPT
|
|
|
|
beq oops
|
|
|
|
|
2020-11-26 10:28:07 +01:00
|
|
|
#ifdef CONFIG_USERSPACE
|
|
|
|
cmp x1, #_SVC_CALL_SYSTEM_CALL
|
|
|
|
beq z_arm64_do_syscall
|
|
|
|
#endif
|
|
|
|
|
2019-11-10 17:17:19 +01:00
|
|
|
#ifdef CONFIG_IRQ_OFFLOAD
|
2020-08-03 12:18:24 +02:00
|
|
|
cmp x1, #_SVC_CALL_IRQ_OFFLOAD
|
2019-11-10 17:17:19 +01:00
|
|
|
beq offload
|
|
|
|
b inv
|
|
|
|
offload:
|
2022-03-12 02:39:47 +01:00
|
|
|
/*
|
|
|
|
* Retrieve provided routine and argument from the stack.
|
|
|
|
* Routine pointer is in saved x0, argument in saved x1
|
|
|
|
* so we load them with x1/x0 (reversed).
|
|
|
|
*/
|
|
|
|
ldp x1, x0, [sp, ___esf_t_x0_x1_OFFSET]
|
|
|
|
|
2022-03-12 03:29:06 +01:00
|
|
|
/* ++_current_cpu->nested to be checked by arch_is_in_isr() */
|
|
|
|
get_cpu x2
|
|
|
|
ldr w3, [x2, #___cpu_t_nested_OFFSET]
|
|
|
|
add w4, w3, #1
|
|
|
|
str w4, [x2, #___cpu_t_nested_OFFSET]
|
|
|
|
|
|
|
|
/* If not nested: switch to IRQ stack and save current sp on it. */
|
|
|
|
cbnz w3, 1f
|
2022-03-12 02:39:47 +01:00
|
|
|
ldr x3, [x2, #___cpu_t_irq_stack_OFFSET]
|
|
|
|
mov x4, sp
|
|
|
|
mov sp, x3
|
|
|
|
str x4, [sp, #-16]!
|
2023-01-20 17:00:23 +01:00
|
|
|
#if defined(CONFIG_ARM64_SAFE_EXCEPTION_STACK)
|
|
|
|
/* update the stack limit with IRQ stack limit */
|
|
|
|
sub x3, x3, #CONFIG_ISR_STACK_SIZE
|
|
|
|
str x3, [x2, #_cpu_offset_to_current_stack_limit]
|
|
|
|
#endif
|
2022-03-12 03:29:06 +01:00
|
|
|
1:
|
2022-03-12 02:39:47 +01:00
|
|
|
/* Execute provided routine (argument is in x0 already). */
|
|
|
|
blr x1
|
|
|
|
|
|
|
|
/* Exit through regular IRQ exit path */
|
|
|
|
b z_arm64_irq_done
|
2019-11-10 17:17:19 +01:00
|
|
|
#endif
|
|
|
|
b inv
|
|
|
|
|
2020-11-06 18:55:16 +01:00
|
|
|
oops:
|
|
|
|
mov x0, sp
|
|
|
|
b z_arm64_do_kernel_oops
|
|
|
|
|
2019-11-10 17:17:19 +01:00
|
|
|
inv:
|
2020-01-20 16:14:17 +01:00
|
|
|
mov x0, #0 /* K_ERR_CPU_EXCEPTION */
|
2020-08-03 12:18:24 +02:00
|
|
|
mov x1, sp
|
2020-11-09 16:03:35 +01:00
|
|
|
bl z_arm64_fatal_error
|
|
|
|
|
|
|
|
/* Return here only in case of recoverable error */
|
2021-02-23 20:48:26 +01:00
|
|
|
b z_arm64_exit_exc
|