From eefd3daa810a7ee4bb7d50fde2586fd2e0f60997 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 6 Feb 2020 13:39:52 -0800 Subject: [PATCH] kernel/smp: arch/x86_64: Address race with CPU migration Use of the _current_cpu pointer cannot be done safely in a preemptible context. If a thread is preempted and migrates to another CPU, the old CPU record will be wrong. Add a validation assert to the expression that catches incorrect usages, and fix up the spots where it was wrong (most important being a few uses of _current outside of locks, and the arch_is_in_isr() implementation). Note that the resulting _current expression now requires locking and is going to be somewhat slower. Longer term it's going to be better to augment the arch API to allow SMP architectures to implement a faster "get current thread pointer" action than this default. Note also that this change means that "_current" is no longer expressible as an lvalue (long ago, it was just a static variable), so the places where it gets assigned now assign to _current_cpu->current instead. Signed-off-by: Andy Ross --- arch/arc/core/arc_smp.c | 2 +- arch/x86/include/kernel_arch_func.h | 12 +++++++++++- include/kernel_structs.h | 12 ++++++++++-- kernel/include/kswap.h | 3 +-- kernel/init.c | 2 +- kernel/sched.c | 23 ++++++++++++++++++----- kernel/smp.c | 9 +++++++++ 7 files changed, 51 insertions(+), 12 deletions(-) diff --git a/arch/arc/core/arc_smp.c b/arch/arc/core/arc_smp.c index 0efcbad185a..01c55799e2f 100644 --- a/arch/arc/core/arc_smp.c +++ b/arch/arc/core/arc_smp.c @@ -119,7 +119,7 @@ u64_t z_arc_smp_switch_in_isr(void) _current_cpu->swap_ok = 0; ((struct k_thread *)new_thread)->base.cpu = arch_curr_cpu()->id; - _current = (struct k_thread *) new_thread; + _current_cpu->current = (struct k_thread *) new_thread; ret = new_thread | ((u64_t)(old_thread) << 32); } diff --git a/arch/x86/include/kernel_arch_func.h b/arch/x86/include/kernel_arch_func.h index f05fc592273..6cc7d620b32 100644 --- a/arch/x86/include/kernel_arch_func.h +++ b/arch/x86/include/kernel_arch_func.h @@ -19,7 +19,17 @@ static inline bool arch_is_in_isr(void) { #ifdef CONFIG_SMP - return arch_curr_cpu()->nested != 0; + /* On SMP, there is a race vs. the current CPU changing if we + * are preempted. Need to mask interrupts while inspecting + * (note deliberate lack of gcc size suffix on the + * instructions, we need to work with both architectures here) + */ + bool ret; + + __asm__ volatile ("pushf; cli"); + ret = arch_curr_cpu()->nested != 0; + __asm__ volatile ("popf"); + return ret; #else return _kernel.nested != 0U; #endif diff --git a/include/kernel_structs.h b/include/kernel_structs.h index 6a4cf8a1d7f..198876bb804 100644 --- a/include/kernel_structs.h +++ b/include/kernel_structs.h @@ -195,8 +195,16 @@ typedef struct z_kernel _kernel_t; extern struct z_kernel _kernel; #ifdef CONFIG_SMP -#define _current_cpu (arch_curr_cpu()) -#define _current (arch_curr_cpu()->current) + +/* True if the current context can be preempted and migrated to + * another SMP CPU. + */ +bool z_smp_cpu_mobile(void); + +#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \ + arch_curr_cpu(); }) +#define _current k_current_get() + #else #define _current_cpu (&_kernel.cpus[0]) #define _current _kernel.current diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 311df0f2f06..fd082ebe657 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -113,11 +113,10 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, z_smp_release_global_lock(new_thread); } #endif - _current = new_thread; + _current_cpu->current = new_thread; wait_for_switch(new_thread); arch_switch(new_thread->switch_handle, &old_thread->switch_handle); - sys_trace_thread_switched_in(); } diff --git a/kernel/init.c b/kernel/init.c index aba8cb5c1e3..a6ef06c1c04 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -512,7 +512,7 @@ FUNC_NORETURN void z_cstart(void) # endif }; - _current = &dummy_thread; + _current_cpu->current = &dummy_thread; #endif #ifdef CONFIG_USERSPACE diff --git a/kernel/sched.c b/kernel/sched.c index 8572d6aa24a..1e94514baf1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -720,10 +720,10 @@ void k_sched_lock(void) void k_sched_unlock(void) { #ifdef CONFIG_PREEMPT_ENABLED - __ASSERT(_current->base.sched_locked != 0, ""); - __ASSERT(!arch_is_in_isr(), ""); - LOCKED(&sched_spinlock) { + __ASSERT(_current->base.sched_locked != 0, ""); + __ASSERT(!arch_is_in_isr(), ""); + ++_current->base.sched_locked; update_cache(0); } @@ -751,7 +751,7 @@ struct k_thread *z_get_next_ready_thread(void) /* Just a wrapper around _current = xxx with tracing */ static inline void set_current(struct k_thread *new_thread) { - _current = new_thread; + _current_cpu->current = new_thread; } #ifdef CONFIG_USE_SWITCH @@ -1270,7 +1270,20 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread) k_tid_t z_impl_k_current_get(void) { - return _current; +#ifdef CONFIG_SMP + /* In SMP, _current is a field read from _current_cpu, which + * can race with preemption before it is read. We must lock + * local interrupts when reading it. + */ + unsigned int k = arch_irq_lock(); +#endif + + k_tid_t ret = _current_cpu->current; + +#ifdef CONFIG_SMP + arch_irq_unlock(k); +#endif + return ret; } #ifdef CONFIG_USERSPACE diff --git a/kernel/smp.c b/kernel/smp.c index ed17fa5ac06..3647ea583ad 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -111,4 +111,13 @@ void z_smp_init(void) (void)atomic_set(&start_flag, 1); } +bool z_smp_cpu_mobile(void) +{ + unsigned int k = arch_irq_lock(); + bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k); + + arch_irq_unlock(k); + return !pinned; +} + #endif /* CONFIG_SMP */