diff --git a/arch/arm64/core/isr_wrapper.S b/arch/arm64/core/isr_wrapper.S index bfce26bcc34..3aeed90a9d0 100644 --- a/arch/arm64/core/isr_wrapper.S +++ b/arch/arm64/core/isr_wrapper.S @@ -117,49 +117,28 @@ z_arm64_irq_done: bne exit /* No more nested: retrieve the task's stack. */ - ldr x0, [sp] - mov sp, x0 + ldr x1, [sp] + mov sp, x1 + + /* retrieve pointer to the current thread */ + ldr x1, [x0, #___cpu_t_current_OFFSET] /* - * z_arch_get_next_switch_handle() is returning: - * - * - The next thread to schedule in x0 - * - The current thread in x1. This value is returned using the - * **old_thread parameter, so we need to make space on the stack for - * that. + * Get next thread to schedule with z_get_next_switch_handle(). + * We pass it a NULL as we didn't save the whole thread context yet. + * If no scheduling is necessary then NULL will be returned. */ - sub sp, sp, #16 - mov x0, sp - bl z_arch_get_next_switch_handle - ldp x1, xzr, [sp], #16 + str x1, [sp, #-16]! + mov x0, xzr + bl z_get_next_switch_handle + ldr x1, [sp], #16 + cbz x0, exit /* - * x0: 1st thread in the ready queue - * x1: _current thread + * Switch thread + * x0: new thread + * x1: old thread */ - -#ifdef CONFIG_SMP - /* - * 2 possibilities here: - * - x0 != NULL (implies x0 != x1): we need to context switch and set - * the switch_handle in the context switch code - * - x0 == NULL: no context switch - */ - cmp x0, #0x0 - bne switch - - /* - * No context switch. Restore x0 from x1 (they are the same thread). - * See also comments to z_arch_get_next_switch_handle() - */ - mov x0, x1 - b exit -switch: -#else - cmp x0, x1 - beq exit -#endif - /* Switch thread */ bl z_arm64_context_switch exit: diff --git a/arch/arm64/core/switch.S b/arch/arm64/core/switch.S index 81df04e1387..d6e47375f8c 100644 --- a/arch/arm64/core/switch.S +++ b/arch/arm64/core/switch.S @@ -68,12 +68,10 @@ SECTION_FUNC(TEXT, z_arm64_context_switch) ldp x0, x1, [sp], #16 #endif -#ifdef CONFIG_SMP /* save old thread into switch handle which is required by * wait_for_switch */ str x1, [x1, #___thread_t_switch_handle_OFFSET] -#endif #ifdef CONFIG_THREAD_LOCAL_STORAGE /* Grab the TLS pointer */ diff --git a/arch/arm64/core/thread.c b/arch/arm64/core/thread.c index 51a9df089ed..1c1aaf50d10 100644 --- a/arch/arm64/core/thread.c +++ b/arch/arm64/core/thread.c @@ -120,32 +120,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, thread->switch_handle = thread; } -void *z_arch_get_next_switch_handle(struct k_thread **old_thread) -{ - /* - * When returning from this function we will have the current thread - * onto the stack to be popped in x1 and the next thread in x0 returned - * from z_get_next_switch_handle() (see isr_wrapper.S) - */ - *old_thread = _current; - -#ifdef CONFIG_SMP - /* - * XXX: see thread in #41840 and #40795 - * - * The scheduler API requires a complete switch handle here, but arm64 - * optimizes things such that the callee-save registers are still - * unsaved here (they get written out in z_arm64_context_switch() - * below). So pass a NULL instead, which the scheduler will store into - * the thread switch_handle field. The resulting thread won't be - * switched into until we write that ourselves. - */ - return z_get_next_switch_handle(NULL); -#else - return z_get_next_switch_handle(*old_thread); -#endif -} - #ifdef CONFIG_USERSPACE FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)