kernel: Rework SMP irq_lock() compatibility layer
This was wrong in two ways, one subtle and one awful. The subtle problem was that the IRQ lock isn't actually globally recursive, it gets reset when you context switch (i.e. a _Swap() implicitly releases and reacquires it). So the recursive count I was keeping needs to be per-thread or else we risk deadlock any time we swap away from a thread holding the lock. And because part of my brain apparently knew this, there was an "optimization" in the code that tested the current count vs. zero outside the lock, on the argument that if it was non-zero we must already hold the lock. Which would be true of a per-thread counter, but NOT a global one: the other CPU may be holding that lock, and this test will tell you *you* do. The upshot is that a recursive irq_lock() would almost always SUCCEED INCORRECTLY when there was lock contention. That this didn't break more things is amazing to me. The rework is actually simpler than the original, thankfully. Though there are some further subtleties: * The lock state implied by irq_lock() allows the lock to be implicitly released on context switch (i.e. you can _Swap() with the lock held at a recursion level higher than 1, which needs to allow other processes to run). So return paths into threads from _Swap() and interrupt/exception exit need to check and restore the global lock state, spinning as needed. * The idle loop design specifies a k_cpu_idle() function that is on common architectures expected to enable interrupts (for obvious reasons), but there is no place to put non-arch code to wire it into the global lock accounting. So on SMP, even CPU0 needs to use the "dumb" spinning idle loop. Finally this patch contains a simple bugfix too, found by inspection: the interrupt return code used when CONFIG_SWITCH is enabled wasn't correctly setting the active flag on the threads, opening up the potential for a race that might result in a thread being scheduled on two CPUs simultaneously. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
eb258706e0
commit
15c400774e
6 changed files with 78 additions and 42 deletions
|
@ -420,6 +420,9 @@ struct _thread_base {
|
|||
|
||||
/* CPU index on which thread was last run */
|
||||
u8_t cpu;
|
||||
|
||||
/* Recursive count of irq_lock() calls */
|
||||
u8_t global_lock_count;
|
||||
#endif
|
||||
|
||||
/* data returned by APIs */
|
||||
|
|
|
@ -63,6 +63,7 @@ static void set_kernel_idle_time_in_ticks(s32_t ticks)
|
|||
#define set_kernel_idle_time_in_ticks(x) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
static void sys_power_save_idle(s32_t ticks)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
|
@ -122,6 +123,7 @@ static void sys_power_save_idle(s32_t ticks)
|
|||
k_cpu_idle();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void _sys_power_save_idle_exit(s32_t ticks)
|
||||
{
|
||||
|
@ -165,23 +167,20 @@ void idle(void *unused1, void *unused2, void *unused3)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Simplified idle for non-default SMP CPUs pending driver
|
||||
* support. The busy waiting is needed to prevent lock
|
||||
* contention. Long term we need to wake up idle CPUs with an
|
||||
* IPI.
|
||||
/* Simplified idle for SMP CPUs pending driver support. The
|
||||
* busy waiting is needed to prevent lock contention. Long
|
||||
* term we need to wake up idle CPUs with an IPI.
|
||||
*/
|
||||
if (_arch_curr_cpu()->id > 0) {
|
||||
while (1) {
|
||||
k_busy_wait(100);
|
||||
k_yield();
|
||||
}
|
||||
while (1) {
|
||||
k_busy_wait(100);
|
||||
k_yield();
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
for (;;) {
|
||||
(void)irq_lock();
|
||||
sys_power_save_idle(_get_next_timeout_expiry());
|
||||
|
||||
IDLE_YIELD_IF_COOP();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -23,6 +23,16 @@ extern void _check_stack_sentinel(void);
|
|||
|
||||
extern void _sys_k_event_logger_context_switch(void);
|
||||
|
||||
/* In SMP, the irq_lock() is a spinlock which is implicitly released
|
||||
* and reacquired on context switch to preserve the existing
|
||||
* semantics. This means that whenever we are about to return to a
|
||||
* thread (via either _Swap() or interrupt/exception return!) we need
|
||||
* to restore the lock state to whatever the thread's counter
|
||||
* expects.
|
||||
*/
|
||||
void _smp_reacquire_global_lock(struct k_thread *thread);
|
||||
void _smp_release_global_lock(struct k_thread *thread);
|
||||
|
||||
/* context switching and scheduling-related routines */
|
||||
#ifdef CONFIG_USE_SWITCH
|
||||
|
||||
|
@ -54,6 +64,8 @@ static inline unsigned int _Swap(unsigned int key)
|
|||
new_thread->base.active = 1;
|
||||
|
||||
new_thread->base.cpu = _arch_curr_cpu()->id;
|
||||
|
||||
_smp_release_global_lock(new_thread);
|
||||
#endif
|
||||
|
||||
_current = new_thread;
|
||||
|
|
|
@ -425,6 +425,8 @@ FUNC_NORETURN void _Cstart(void)
|
|||
*/
|
||||
char dummy_thread_memory[sizeof(struct k_thread)];
|
||||
struct k_thread *dummy_thread = (struct k_thread *)&dummy_thread_memory;
|
||||
|
||||
memset(dummy_thread_memory, 0, sizeof(dummy_thread_memory));
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -644,14 +644,24 @@ void *_get_next_switch_handle(void *interrupted)
|
|||
}
|
||||
|
||||
int key = irq_lock();
|
||||
struct k_thread *new_thread = _get_next_ready_thread();
|
||||
|
||||
_current->switch_handle = interrupted;
|
||||
_current = _get_next_ready_thread();
|
||||
|
||||
void *ret = _current->switch_handle;
|
||||
#ifdef CONFIG_SMP
|
||||
_current->base.active = 0;
|
||||
new_thread->base.active = 1;
|
||||
#endif
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
_current->switch_handle = interrupted;
|
||||
_current = new_thread;
|
||||
|
||||
void *ret = new_thread->switch_handle;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
_smp_reacquire_global_lock(_current);
|
||||
#endif
|
||||
|
||||
_check_stack_sentinel();
|
||||
|
||||
return ret;
|
||||
|
|
64
kernel/smp.c
64
kernel/smp.c
|
@ -10,47 +10,57 @@
|
|||
#include <kswap.h>
|
||||
#include <kernel_internal.h>
|
||||
|
||||
static struct k_spinlock global_spinlock;
|
||||
|
||||
static volatile int recursive_count;
|
||||
|
||||
/* FIXME: this value of key works on all known architectures as an
|
||||
* "invalid state" that will never be legitimately returned from
|
||||
* _arch_irq_lock(). But we should force the architecture code to
|
||||
* define something for us.
|
||||
*/
|
||||
#define KEY_RECURSIVE 0xffffffff
|
||||
#ifdef CONFIG_SMP
|
||||
static atomic_t global_lock;
|
||||
|
||||
unsigned int _smp_global_lock(void)
|
||||
{
|
||||
/* OK to test this outside the lock. If it's non-zero, then
|
||||
* we hold the lock by definition
|
||||
*/
|
||||
if (recursive_count) {
|
||||
recursive_count++;
|
||||
int key = _arch_irq_lock();
|
||||
|
||||
return KEY_RECURSIVE;
|
||||
if (!_current->base.global_lock_count) {
|
||||
while (!atomic_cas(&global_lock, 0, 1)) {
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int k = k_spin_lock(&global_spinlock).key;
|
||||
_current->base.global_lock_count++;
|
||||
|
||||
recursive_count = 1;
|
||||
return k;
|
||||
return key;
|
||||
}
|
||||
|
||||
void _smp_global_unlock(unsigned int key)
|
||||
{
|
||||
if (key == KEY_RECURSIVE) {
|
||||
recursive_count--;
|
||||
return;
|
||||
if (_current->base.global_lock_count) {
|
||||
_current->base.global_lock_count--;
|
||||
|
||||
if (!_current->base.global_lock_count) {
|
||||
atomic_clear(&global_lock);
|
||||
}
|
||||
}
|
||||
|
||||
k_spinlock_key_t sk = { .key = key };
|
||||
|
||||
recursive_count = 0;
|
||||
k_spin_unlock(&global_spinlock, sk);
|
||||
_arch_irq_unlock(key);
|
||||
}
|
||||
|
||||
void _smp_reacquire_global_lock(struct k_thread *thread)
|
||||
{
|
||||
if (thread->base.global_lock_count) {
|
||||
_arch_irq_lock();
|
||||
|
||||
while (!atomic_cas(&global_lock, 0, 1)) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Called from within _Swap(), so assumes lock already held */
|
||||
void _smp_release_global_lock(struct k_thread *thread)
|
||||
{
|
||||
if (!thread->base.global_lock_count) {
|
||||
atomic_clear(&global_lock);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
extern k_thread_stack_t _interrupt_stack1[];
|
||||
extern k_thread_stack_t _interrupt_stack2[];
|
||||
extern k_thread_stack_t _interrupt_stack3[];
|
||||
|
@ -73,8 +83,8 @@ static void smp_init_top(int key, void *arg)
|
|||
.base.thread_state = _THREAD_DUMMY,
|
||||
};
|
||||
|
||||
int k = irq_lock();
|
||||
_arch_curr_cpu()->current = &dummy_thread;
|
||||
int k = irq_lock();
|
||||
smp_timer_init();
|
||||
_Swap(k);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue