diff --git a/arch/arm64/core/isr_wrapper.S b/arch/arm64/core/isr_wrapper.S index 867f5156b28..ba7c00414b0 100644 --- a/arch/arm64/core/isr_wrapper.S +++ b/arch/arm64/core/isr_wrapper.S @@ -119,9 +119,28 @@ spurious_continue: * x0: 1st thread in the ready queue * x1: _current thread */ + +#ifdef CONFIG_SMP + /* + * 2 possibilities here: + * - x0 != NULL (implies x0 != x1): we need to context switch and set + * the switch_handle in the context switch code + * - x0 == NULL: no context switch + */ + cmp x0, #0x0 + bne switch + + /* + * No context switch. Restore x0 from x1 (they are the same thread). + * See also comments to z_arch_get_next_switch_handle() + */ + mov x0, x1 + b exit +switch: +#else cmp x0, x1 beq exit - +#endif /* Switch thread */ bl z_arm64_context_switch diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c index b311baf0437..395ad00dc8e 100644 --- a/arch/arm64/core/thread.c +++ b/arch/arm64/core/thread.c @@ -124,9 +124,28 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, void *z_arch_get_next_switch_handle(struct k_thread **old_thread) { + /* + * When returning from this function we will have the current thread + * onto the stack to be popped in x1 and the next thread in x0 returned + * from z_get_next_switch_handle() (see isr_wrapper.S) + */ *old_thread = _current; +#ifdef CONFIG_SMP + /* + * XXX: see thread in #41840 and #40795 + * + * The scheduler API requires a complete switch handle here, but arm64 + * optimizes things such that the callee-save registers are still + * unsaved here (they get written out in z_arm64_context_switch() + * below). So pass a NULL instead, which the scheduler will store into + * the thread switch_handle field. The resulting thread won't be + * switched into until we write that ourselves. + */ + return z_get_next_switch_handle(NULL); +#else return z_get_next_switch_handle(*old_thread); +#endif } #ifdef CONFIG_USERSPACE