kernel/smp: arch/x86_64: Address race with CPU migration
Use of the _current_cpu pointer cannot be done safely in a preemptible context. If a thread is preempted and migrates to another CPU, the old CPU record will be wrong. Add a validation assert to the expression that catches incorrect usages, and fix up the spots where it was wrong (most important being a few uses of _current outside of locks, and the arch_is_in_isr() implementation). Note that the resulting _current expression now requires locking and is going to be somewhat slower. Longer term it's going to be better to augment the arch API to allow SMP architectures to implement a faster "get current thread pointer" action than this default. Note also that this change means that "_current" is no longer expressible as an lvalue (long ago, it was just a static variable), so the places where it gets assigned now assign to _current_cpu->current instead. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
e11262c13e
commit
eefd3daa81
7 changed files with 51 additions and 12 deletions
|
@ -195,8 +195,16 @@ typedef struct z_kernel _kernel_t;
|
|||
extern struct z_kernel _kernel;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define _current_cpu (arch_curr_cpu())
|
||||
#define _current (arch_curr_cpu()->current)
|
||||
|
||||
/* True if the current context can be preempted and migrated to
|
||||
* another SMP CPU.
|
||||
*/
|
||||
bool z_smp_cpu_mobile(void);
|
||||
|
||||
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
|
||||
arch_curr_cpu(); })
|
||||
#define _current k_current_get()
|
||||
|
||||
#else
|
||||
#define _current_cpu (&_kernel.cpus[0])
|
||||
#define _current _kernel.current
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue