kernel: Reset the switch_handler only in the arch code
Avoid setting the switch_handler in the z_get_next_switch_handle() code when the context is not fully saved yet to avoid a race against other cores waiting on wait_for_switch(). See issue #40795 and discussion in #41840 Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
parent
54271d23e8
commit
a74dac89ba
2 changed files with 39 additions and 1 deletions
|
@ -119,9 +119,28 @@ spurious_continue:
|
||||||
* x0: 1st thread in the ready queue
|
* x0: 1st thread in the ready queue
|
||||||
* x1: _current thread
|
* x1: _current thread
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* 2 possibilities here:
|
||||||
|
* - x0 != NULL (implies x0 != x1): we need to context switch and set
|
||||||
|
* the switch_handle in the context switch code
|
||||||
|
* - x0 == NULL: no context switch
|
||||||
|
*/
|
||||||
|
cmp x0, #0x0
|
||||||
|
bne switch
|
||||||
|
|
||||||
|
/*
|
||||||
|
* No context switch. Restore x0 from x1 (they are the same thread).
|
||||||
|
* See also comments to z_arch_get_next_switch_handle()
|
||||||
|
*/
|
||||||
|
mov x0, x1
|
||||||
|
b exit
|
||||||
|
switch:
|
||||||
|
#else
|
||||||
cmp x0, x1
|
cmp x0, x1
|
||||||
beq exit
|
beq exit
|
||||||
|
#endif
|
||||||
/* Switch thread */
|
/* Switch thread */
|
||||||
bl z_arm64_context_switch
|
bl z_arm64_context_switch
|
||||||
|
|
||||||
|
|
|
@ -124,9 +124,28 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||||
|
|
||||||
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* When returning from this function we will have the current thread
|
||||||
|
* onto the stack to be popped in x1 and the next thread in x0 returned
|
||||||
|
* from z_get_next_switch_handle() (see isr_wrapper.S)
|
||||||
|
*/
|
||||||
*old_thread = _current;
|
*old_thread = _current;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* XXX: see thread in #41840 and #40795
|
||||||
|
*
|
||||||
|
* The scheduler API requires a complete switch handle here, but arm64
|
||||||
|
* optimizes things such that the callee-save registers are still
|
||||||
|
* unsaved here (they get written out in z_arm64_context_switch()
|
||||||
|
* below). So pass a NULL instead, which the scheduler will store into
|
||||||
|
* the thread switch_handle field. The resulting thread won't be
|
||||||
|
* switched into until we write that ourselves.
|
||||||
|
*/
|
||||||
|
return z_get_next_switch_handle(NULL);
|
||||||
|
#else
|
||||||
return z_get_next_switch_handle(*old_thread);
|
return z_get_next_switch_handle(*old_thread);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue