From deca2301f6f3b022f03f7ff72028569b959a76de Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Wed, 24 Feb 2021 14:47:35 -0800 Subject: [PATCH] kernel/swap: Move arch_cohere_stacks() back under the lock Commit 6b84ab383050 ("kernel/sched: Adjust locking in z_swap()") moved the call to arch_cohere_stacks() out of the scheduler lock while doing some reorgnizing. On further reflection, this is incorrect. When done outside the lock, the two arch_cohere_stacks() calls will race against each other. Signed-off-by: Andy Ross --- kernel/include/kswap.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 81bfd89281e..75f298cdbec 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -106,6 +106,8 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, z_spin_lock_set_owner(&sched_spinlock); #endif + arch_cohere_stacks(old_thread, NULL, new_thread); + #ifdef CONFIG_SMP /* Add _current back to the run queue HERE. After * wait_for_switch() we are guaranteed to reach the @@ -121,7 +123,6 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, new_thread->switch_handle = NULL; } k_spin_release(&sched_spinlock); - arch_cohere_stacks(old_thread, NULL, new_thread); arch_switch(newsh, &old_thread->switch_handle); } else { k_spin_release(&sched_spinlock);