kernel/smp: arch/x86_64: Address race with CPU migration

Use of the _current_cpu pointer cannot be done safely in a preemptible
context.  If a thread is preempted and migrates to another CPU, the
old CPU record will be wrong.

Add a validation assert to the expression that catches incorrect
usages, and fix up the spots where it was wrong (most important being
a few uses of _current outside of locks, and the arch_is_in_isr()
implementation).

Note that the resulting _current expression now requires locking and
is going to be somewhat slower.  Longer term it's going to be better
to augment the arch API to allow SMP architectures to implement a
faster "get current thread pointer" action than this default.

Note also that this change means that "_current" is no longer
expressible as an lvalue (long ago, it was just a static variable), so
the places where it gets assigned now assign to _current_cpu->current
instead.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-02-06 13:39:52 -08:00 committed by Anas Nashif
commit eefd3daa81
7 changed files with 51 additions and 12 deletions

View file

@ -119,7 +119,7 @@ u64_t z_arc_smp_switch_in_isr(void)
_current_cpu->swap_ok = 0;
((struct k_thread *)new_thread)->base.cpu =
arch_curr_cpu()->id;
_current = (struct k_thread *) new_thread;
_current_cpu->current = (struct k_thread *) new_thread;
ret = new_thread | ((u64_t)(old_thread) << 32);
}

View file

@ -19,7 +19,17 @@
static inline bool arch_is_in_isr(void)
{
#ifdef CONFIG_SMP
return arch_curr_cpu()->nested != 0;
/* On SMP, there is a race vs. the current CPU changing if we
* are preempted. Need to mask interrupts while inspecting
* (note deliberate lack of gcc size suffix on the
* instructions, we need to work with both architectures here)
*/
bool ret;
__asm__ volatile ("pushf; cli");
ret = arch_curr_cpu()->nested != 0;
__asm__ volatile ("popf");
return ret;
#else
return _kernel.nested != 0U;
#endif

View file

@ -195,8 +195,16 @@ typedef struct z_kernel _kernel_t;
extern struct z_kernel _kernel;
#ifdef CONFIG_SMP
#define _current_cpu (arch_curr_cpu())
#define _current (arch_curr_cpu()->current)
/* True if the current context can be preempted and migrated to
* another SMP CPU.
*/
bool z_smp_cpu_mobile(void);
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
arch_curr_cpu(); })
#define _current k_current_get()
#else
#define _current_cpu (&_kernel.cpus[0])
#define _current _kernel.current

View file

@ -113,11 +113,10 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
z_smp_release_global_lock(new_thread);
}
#endif
_current = new_thread;
_current_cpu->current = new_thread;
wait_for_switch(new_thread);
arch_switch(new_thread->switch_handle,
&old_thread->switch_handle);
sys_trace_thread_switched_in();
}

View file

@ -512,7 +512,7 @@ FUNC_NORETURN void z_cstart(void)
# endif
};
_current = &dummy_thread;
_current_cpu->current = &dummy_thread;
#endif
#ifdef CONFIG_USERSPACE

View file

@ -720,10 +720,10 @@ void k_sched_lock(void)
void k_sched_unlock(void)
{
#ifdef CONFIG_PREEMPT_ENABLED
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(!arch_is_in_isr(), "");
LOCKED(&sched_spinlock) {
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(!arch_is_in_isr(), "");
++_current->base.sched_locked;
update_cache(0);
}
@ -751,7 +751,7 @@ struct k_thread *z_get_next_ready_thread(void)
/* Just a wrapper around _current = xxx with tracing */
static inline void set_current(struct k_thread *new_thread)
{
_current = new_thread;
_current_cpu->current = new_thread;
}
#ifdef CONFIG_USE_SWITCH
@ -1270,7 +1270,20 @@ static inline void z_vrfy_k_wakeup(k_tid_t thread)
k_tid_t z_impl_k_current_get(void)
{
return _current;
#ifdef CONFIG_SMP
/* In SMP, _current is a field read from _current_cpu, which
* can race with preemption before it is read. We must lock
* local interrupts when reading it.
*/
unsigned int k = arch_irq_lock();
#endif
k_tid_t ret = _current_cpu->current;
#ifdef CONFIG_SMP
arch_irq_unlock(k);
#endif
return ret;
}
#ifdef CONFIG_USERSPACE

View file

@ -111,4 +111,13 @@ void z_smp_init(void)
(void)atomic_set(&start_flag, 1);
}
bool z_smp_cpu_mobile(void)
{
unsigned int k = arch_irq_lock();
bool pinned = arch_is_in_isr() || !arch_irq_unlocked(k);
arch_irq_unlock(k);
return !pinned;
}
#endif /* CONFIG_SMP */