kernel/sched: Adjust locking in z_swap()

Swap was originally written to use the scheduler lock just to select a
new thread, but it would be nice to be able to rely on scheduler
atomicity later in the process (in particular it would be nice if the
assignment to cpu.current could be seen atomically).  Rework the code
a bit so that swap takes the lock itself and holds it until just
before the call to arch_switch().

Note that the local interrupt mask has always been required to be held
across the swap, so extending the lock here has no effect on latency
at all on uniprocessor setups, and even on SMP only affects average
latency and not worst case.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-02-18 10:15:23 -08:00 committed by Anas Nashif
commit 6b84ab3830
3 changed files with 31 additions and 32 deletions

View file

@ -67,6 +67,7 @@ void z_ready_thread(struct k_thread *thread);
void z_thread_single_abort(struct k_thread *thread);
FUNC_NORETURN void z_self_abort(void);
void z_requeue_current(struct k_thread *curr);
struct k_thread *z_swap_next_thread(void);
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, k_timeout_t timeout)
{
@ -78,17 +79,6 @@ static inline void z_reschedule_unlocked(void)
(void) z_reschedule_irqlock(arch_irq_lock());
}
/* find which one is the next thread to run */
/* must be called with interrupts locked */
#ifdef CONFIG_SMP
extern struct k_thread *z_get_next_ready_thread(void);
#else
static ALWAYS_INLINE struct k_thread *z_get_next_ready_thread(void)
{
return _kernel.ready_q.cache;
}
#endif
static inline bool z_is_idle_thread_entry(void *entry_point)
{
return entry_point == idle;

View file

@ -16,6 +16,8 @@ extern void z_check_stack_sentinel(void);
#define z_check_stack_sentinel() /**/
#endif
extern struct k_spinlock sched_spinlock;
/* In SMP, the irq_lock() is a spinlock which is implicitly released
* and reacquired on context switch to preserve the existing
* semantics. This means that whenever we are about to return to a
@ -68,11 +70,18 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
z_check_stack_sentinel();
if (is_spinlock && lock != NULL) {
/* We always take the scheduler spinlock if we don't already
* have it. We "release" other spinlocks here. But we never
* drop the interrupt lock.
*/
if (is_spinlock && lock != NULL && lock != &sched_spinlock) {
k_spin_release(lock);
}
if (!is_spinlock || lock != &sched_spinlock) {
(void) k_spin_lock(&sched_spinlock);
}
new_thread = z_get_next_ready_thread();
new_thread = z_swap_next_thread();
if (new_thread != old_thread) {
#ifdef CONFIG_TIMESLICING
@ -91,9 +100,12 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
#endif
z_thread_mark_switched_out();
wait_for_switch(new_thread);
arch_cohere_stacks(old_thread, NULL, new_thread);
_current_cpu->current = new_thread;
#ifdef CONFIG_SPIN_VALIDATE
z_spin_lock_set_owner(&sched_spinlock);
#endif
#ifdef CONFIG_SMP
/* Add _current back to the run queue HERE. After
* wait_for_switch() we are guaranteed to reach the
@ -102,14 +114,17 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
*/
z_requeue_current(old_thread);
#endif
void *newsh = new_thread->switch_handle;
if (IS_ENABLED(CONFIG_SMP)) {
/* Active threads MUST have a null here */
new_thread->switch_handle = NULL;
}
k_spin_release(&sched_spinlock);
arch_cohere_stacks(old_thread, NULL, new_thread);
arch_switch(newsh, &old_thread->switch_handle);
} else {
k_spin_release(&sched_spinlock);
}
if (is_spinlock) {

View file

@ -56,7 +56,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
/* the only struct z_kernel instance */
struct z_kernel _kernel;
static struct k_spinlock sched_spinlock;
struct k_spinlock sched_spinlock;
static void update_cache(int);
@ -217,10 +217,8 @@ static ALWAYS_INLINE void dequeue_thread(void *pq,
*/
void z_requeue_current(struct k_thread *curr)
{
LOCKED(&sched_spinlock) {
if (z_is_thread_queued(curr)) {
_priq_run_add(&_kernel.ready_q.runq, curr);
}
if (z_is_thread_queued(curr)) {
_priq_run_add(&_kernel.ready_q.runq, curr);
}
}
#endif
@ -914,7 +912,7 @@ static inline bool need_swap(void)
struct k_thread *new_thread;
/* Check if the next ready thread is the same as the current thread */
new_thread = z_get_next_ready_thread();
new_thread = _kernel.ready_q.cache;
return new_thread != _current;
#endif
}
@ -962,18 +960,14 @@ void k_sched_unlock(void)
#endif
}
#ifdef CONFIG_SMP
struct k_thread *z_get_next_ready_thread(void)
struct k_thread *z_swap_next_thread(void)
{
struct k_thread *ret = 0;
LOCKED(&sched_spinlock) {
ret = next_up();
}
return ret;
}
#ifdef CONFIG_SMP
return next_up();
#else
return _kernel.ready_q.cache;
#endif
}
/* Just a wrapper around _current = xxx with tracing */
static inline void set_current(struct k_thread *new_thread)
@ -1039,7 +1033,7 @@ void *z_get_next_switch_handle(void *interrupted)
return ret;
#else
_current->switch_handle = interrupted;
set_current(z_get_next_ready_thread());
set_current(_kernel.ready_q.cache);
return _current->switch_handle;
#endif
}