arm64: simplify the code around the call to z_get_next_switch_handle()
Remove the special SMP workaround and the extra wrapper. Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
parent
c9e3e0d956
commit
47e4a4487f
3 changed files with 16 additions and 65 deletions
|
@ -117,49 +117,28 @@ z_arm64_irq_done:
|
||||||
bne exit
|
bne exit
|
||||||
|
|
||||||
/* No more nested: retrieve the task's stack. */
|
/* No more nested: retrieve the task's stack. */
|
||||||
ldr x0, [sp]
|
ldr x1, [sp]
|
||||||
mov sp, x0
|
mov sp, x1
|
||||||
|
|
||||||
|
/* retrieve pointer to the current thread */
|
||||||
|
ldr x1, [x0, #___cpu_t_current_OFFSET]
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* z_arch_get_next_switch_handle() is returning:
|
* Get next thread to schedule with z_get_next_switch_handle().
|
||||||
*
|
* We pass it a NULL as we didn't save the whole thread context yet.
|
||||||
* - The next thread to schedule in x0
|
* If no scheduling is necessary then NULL will be returned.
|
||||||
* - The current thread in x1. This value is returned using the
|
|
||||||
* **old_thread parameter, so we need to make space on the stack for
|
|
||||||
* that.
|
|
||||||
*/
|
*/
|
||||||
sub sp, sp, #16
|
str x1, [sp, #-16]!
|
||||||
mov x0, sp
|
mov x0, xzr
|
||||||
bl z_arch_get_next_switch_handle
|
bl z_get_next_switch_handle
|
||||||
ldp x1, xzr, [sp], #16
|
ldr x1, [sp], #16
|
||||||
|
cbz x0, exit
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* x0: 1st thread in the ready queue
|
* Switch thread
|
||||||
* x1: _current thread
|
* x0: new thread
|
||||||
|
* x1: old thread
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/*
|
|
||||||
* 2 possibilities here:
|
|
||||||
* - x0 != NULL (implies x0 != x1): we need to context switch and set
|
|
||||||
* the switch_handle in the context switch code
|
|
||||||
* - x0 == NULL: no context switch
|
|
||||||
*/
|
|
||||||
cmp x0, #0x0
|
|
||||||
bne switch
|
|
||||||
|
|
||||||
/*
|
|
||||||
* No context switch. Restore x0 from x1 (they are the same thread).
|
|
||||||
* See also comments to z_arch_get_next_switch_handle()
|
|
||||||
*/
|
|
||||||
mov x0, x1
|
|
||||||
b exit
|
|
||||||
switch:
|
|
||||||
#else
|
|
||||||
cmp x0, x1
|
|
||||||
beq exit
|
|
||||||
#endif
|
|
||||||
/* Switch thread */
|
|
||||||
bl z_arm64_context_switch
|
bl z_arm64_context_switch
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
|
|
|
@ -68,12 +68,10 @@ SECTION_FUNC(TEXT, z_arm64_context_switch)
|
||||||
ldp x0, x1, [sp], #16
|
ldp x0, x1, [sp], #16
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/* save old thread into switch handle which is required by
|
/* save old thread into switch handle which is required by
|
||||||
* wait_for_switch
|
* wait_for_switch
|
||||||
*/
|
*/
|
||||||
str x1, [x1, #___thread_t_switch_handle_OFFSET]
|
str x1, [x1, #___thread_t_switch_handle_OFFSET]
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
#ifdef CONFIG_THREAD_LOCAL_STORAGE
|
||||||
/* Grab the TLS pointer */
|
/* Grab the TLS pointer */
|
||||||
|
|
|
@ -120,32 +120,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
thread->switch_handle = thread;
|
thread->switch_handle = thread;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* When returning from this function we will have the current thread
|
|
||||||
* onto the stack to be popped in x1 and the next thread in x0 returned
|
|
||||||
* from z_get_next_switch_handle() (see isr_wrapper.S)
|
|
||||||
*/
|
|
||||||
*old_thread = _current;
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
/*
|
|
||||||
* XXX: see thread in #41840 and #40795
|
|
||||||
*
|
|
||||||
* The scheduler API requires a complete switch handle here, but arm64
|
|
||||||
* optimizes things such that the callee-save registers are still
|
|
||||||
* unsaved here (they get written out in z_arm64_context_switch()
|
|
||||||
* below). So pass a NULL instead, which the scheduler will store into
|
|
||||||
* the thread switch_handle field. The resulting thread won't be
|
|
||||||
* switched into until we write that ourselves.
|
|
||||||
*/
|
|
||||||
return z_get_next_switch_handle(NULL);
|
|
||||||
#else
|
|
||||||
return z_get_next_switch_handle(*old_thread);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||||
void *p1, void *p2, void *p3)
|
void *p1, void *p2, void *p3)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue