arm64: switch to the IRQ stack during ISR execution

Avoid executing ISRs using the thread stack as it might not be sized
for that. Plus, we do have IRQ stacks already set up for us.

The non-nested IRQ context is still (and has to be) saved on the thread
stack as the thread could be preempted.

The irq_offload case is never nested and always invoked with the
sched_lock held so it can be simplified a bit.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2022-02-09 16:54:37 -05:00 committed by Anas Nashif
commit 34d425fbe5
2 changed files with 24 additions and 7 deletions

View file

@ -38,6 +38,14 @@ SECTION_FUNC(TEXT, _isr_wrapper)
add w1, w1, #1 add w1, w1, #1
str w1, [x0, #___cpu_t_nested_OFFSET] str w1, [x0, #___cpu_t_nested_OFFSET]
/* If not nested: switch to IRQ stack and save current sp on it. */
cmp w1, #1
bne 1f
ldr x1, [x0, #___cpu_t_irq_stack_OFFSET]
mov x2, sp
mov sp, x1
str x2, [sp, #-16]!
1:
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
bl z_sched_usage_stop bl z_sched_usage_stop
#endif #endif
@ -107,6 +115,10 @@ spurious_continue:
str w1, [x0, #___cpu_t_nested_OFFSET] str w1, [x0, #___cpu_t_nested_OFFSET]
bne exit bne exit
/* No more nested: retrieve the task's stack. */
ldr x0, [sp]
mov sp, x0
/* /*
* z_arch_get_next_switch_handle() is returning: * z_arch_get_next_switch_handle() is returning:
* *

View file

@ -147,19 +147,24 @@ SECTION_FUNC(TEXT, z_arm64_sync_exc)
beq offload beq offload
b inv b inv
offload: offload:
/* ++_current_cpu->nested to be checked by arch_is_in_isr() */ /* _current_cpu->nested=1, to be checked by arch_is_in_isr() */
get_cpu x0 get_cpu x0
ldr w1, [x0, #___cpu_t_nested_OFFSET] mov w1, #1
add w1, w1, #1
str w1, [x0, #___cpu_t_nested_OFFSET] str w1, [x0, #___cpu_t_nested_OFFSET]
/* switch to IRQ stack and save current sp on it. */
ldr x1, [x0, #___cpu_t_irq_stack_OFFSET]
mov x2, sp
mov sp, x1
str x2, [sp, #-16]!
bl z_irq_do_offload bl z_irq_do_offload
/* --_current_cpu->nested */ /* _current_cpu->nested=0 */
get_cpu x0 get_cpu x0
ldr w1, [x0, #___cpu_t_nested_OFFSET] str wzr, [x0, #___cpu_t_nested_OFFSET]
sub w1, w1, #1 /* restore original stack pointer. */
str w1, [x0, #___cpu_t_nested_OFFSET] ldr x1, [sp]
mov sp, x1
b z_arm64_exit_exc b z_arm64_exit_exc
#endif #endif
b inv b inv