kernel/sched: Refactor, unify management of QUEUED state

The QUEUED state flag was managed separately from the run queue
insertion/deletion, and the logic (while AFAICT perfectly correct) was
tangled in a few places trying to keep them in sync.  Put the
management of both behind a queue_thread()/dequeue_thread() API for
clarity.  The ALWAYS_INLINE usage seems to be working to get the
compiler to condense the resulting multiple assignments.  No behavior
change.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-02-07 13:03:09 -08:00 committed by Anas Nashif
commit 91946ef21c
2 changed files with 29 additions and 34 deletions

View file

@ -189,16 +189,6 @@ static inline void z_reset_thread_states(struct k_thread *thread,
thread->base.thread_state &= ~states; thread->base.thread_state &= ~states;
} }
static inline void z_mark_thread_as_queued(struct k_thread *thread)
{
z_set_thread_states(thread, _THREAD_QUEUED);
}
static inline void z_mark_thread_as_not_queued(struct k_thread *thread)
{
z_reset_thread_states(thread, _THREAD_QUEUED);
}
static inline bool z_is_under_prio_ceiling(int prio) static inline bool z_is_under_prio_ceiling(int prio)
{ {
return prio >= CONFIG_PRIORITY_CEILING; return prio >= CONFIG_PRIORITY_CEILING;

View file

@ -181,6 +181,20 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq)
} }
#endif #endif
static ALWAYS_INLINE void queue_thread(void *pq,
struct k_thread *thread)
{
thread->base.thread_state |= _THREAD_QUEUED;
_priq_run_add(pq, thread);
}
static ALWAYS_INLINE void dequeue_thread(void *pq,
struct k_thread *thread)
{
thread->base.thread_state &= ~_THREAD_QUEUED;
_priq_run_remove(pq, thread);
}
static ALWAYS_INLINE struct k_thread *next_up(void) static ALWAYS_INLINE struct k_thread *next_up(void)
{ {
struct k_thread *thread; struct k_thread *thread;
@ -262,15 +276,13 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
/* Put _current back into the queue */ /* Put _current back into the queue */
if (thread != _current && active && if (thread != _current && active &&
!z_is_idle_thread_object(_current) && !queued) { !z_is_idle_thread_object(_current) && !queued) {
_priq_run_add(&_kernel.ready_q.runq, _current); queue_thread(&_kernel.ready_q.runq, _current);
z_mark_thread_as_queued(_current);
} }
/* Take the new _current out of the queue */ /* Take the new _current out of the queue */
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
} }
z_mark_thread_as_not_queued(thread);
return thread; return thread;
#endif #endif
@ -279,10 +291,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
static void move_thread_to_end_of_prio_q(struct k_thread *thread) static void move_thread_to_end_of_prio_q(struct k_thread *thread)
{ {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
} }
_priq_run_add(&_kernel.ready_q.runq, thread); queue_thread(&_kernel.ready_q.runq, thread);
z_mark_thread_as_queued(thread);
update_cache(thread == _current); update_cache(thread == _current);
} }
@ -422,8 +433,7 @@ static void ready_thread(struct k_thread *thread)
*/ */
if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
sys_trace_thread_ready(thread); sys_trace_thread_ready(thread);
_priq_run_add(&_kernel.ready_q.runq, thread); queue_thread(&_kernel.ready_q.runq, thread);
z_mark_thread_as_queued(thread);
update_cache(0); update_cache(0);
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
arch_sched_ipi(); arch_sched_ipi();
@ -465,8 +475,7 @@ void z_impl_k_thread_suspend(struct k_thread *thread)
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
} }
z_mark_thread_as_suspended(thread); z_mark_thread_as_suspended(thread);
update_cache(thread == _current); update_cache(thread == _current);
@ -552,9 +561,8 @@ void z_thread_single_abort(struct k_thread *thread)
if (z_is_thread_ready(thread)) { if (z_is_thread_ready(thread)) {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, dequeue_thread(&_kernel.ready_q.runq,
thread); thread);
z_mark_thread_as_not_queued(thread);
} }
update_cache(thread == _current); update_cache(thread == _current);
} else { } else {
@ -637,8 +645,7 @@ void z_thread_single_abort(struct k_thread *thread)
static void unready_thread(struct k_thread *thread) static void unready_thread(struct k_thread *thread)
{ {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
} }
update_cache(thread == _current); update_cache(thread == _current);
} }
@ -816,9 +823,9 @@ bool z_set_prio(struct k_thread *thread, int prio)
if (need_sched) { if (need_sched) {
/* Don't requeue on SMP if it's the running thread */ /* Don't requeue on SMP if it's the running thread */
if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
thread->base.prio = prio; thread->base.prio = prio;
_priq_run_add(&_kernel.ready_q.runq, thread); queue_thread(&_kernel.ready_q.runq, thread);
} else { } else {
thread->base.prio = prio; thread->base.prio = prio;
} }
@ -1229,8 +1236,8 @@ void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
thread->base.prio_deadline = k_cycle_get_32() + deadline; thread->base.prio_deadline = k_cycle_get_32() + deadline;
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
_priq_run_add(&_kernel.ready_q.runq, thread); queue_thread(&_kernel.ready_q.runq, thread);
} }
} }
} }
@ -1259,11 +1266,10 @@ void z_impl_k_yield(void)
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
if (!IS_ENABLED(CONFIG_SMP) || if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) { z_is_thread_queued(_current)) {
_priq_run_remove(&_kernel.ready_q.runq, dequeue_thread(&_kernel.ready_q.runq,
_current); _current);
} }
_priq_run_add(&_kernel.ready_q.runq, _current); queue_thread(&_kernel.ready_q.runq, _current);
z_mark_thread_as_queued(_current);
update_cache(1); update_cache(1);
} }
} }
@ -1436,8 +1442,7 @@ void z_sched_abort(struct k_thread *thread)
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key); k_spin_unlock(&sched_spinlock, key);
} else if (z_is_thread_queued(thread)) { } else if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); dequeue_thread(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key); k_spin_unlock(&sched_spinlock, key);
} else { } else {