diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 20ea4616a46..d348517f818 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -189,16 +189,6 @@ static inline void z_reset_thread_states(struct k_thread *thread, thread->base.thread_state &= ~states; } -static inline void z_mark_thread_as_queued(struct k_thread *thread) -{ - z_set_thread_states(thread, _THREAD_QUEUED); -} - -static inline void z_mark_thread_as_not_queued(struct k_thread *thread) -{ - z_reset_thread_states(thread, _THREAD_QUEUED); -} - static inline bool z_is_under_prio_ceiling(int prio) { return prio >= CONFIG_PRIORITY_CEILING; diff --git a/kernel/sched.c b/kernel/sched.c index 28e2f52bbcf..9bd1bfba5d8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -181,6 +181,20 @@ static ALWAYS_INLINE struct k_thread *_priq_dumb_mask_best(sys_dlist_t *pq) } #endif +static ALWAYS_INLINE void queue_thread(void *pq, + struct k_thread *thread) +{ + thread->base.thread_state |= _THREAD_QUEUED; + _priq_run_add(pq, thread); +} + +static ALWAYS_INLINE void dequeue_thread(void *pq, + struct k_thread *thread) +{ + thread->base.thread_state &= ~_THREAD_QUEUED; + _priq_run_remove(pq, thread); +} + static ALWAYS_INLINE struct k_thread *next_up(void) { struct k_thread *thread; @@ -262,15 +276,13 @@ static ALWAYS_INLINE struct k_thread *next_up(void) /* Put _current back into the queue */ if (thread != _current && active && !z_is_idle_thread_object(_current) && !queued) { - _priq_run_add(&_kernel.ready_q.runq, _current); - z_mark_thread_as_queued(_current); + queue_thread(&_kernel.ready_q.runq, _current); } /* Take the new _current out of the queue */ if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); + dequeue_thread(&_kernel.ready_q.runq, thread); } - z_mark_thread_as_not_queued(thread); return thread; #endif @@ -279,10 +291,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void) static void move_thread_to_end_of_prio_q(struct k_thread *thread) { if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); + dequeue_thread(&_kernel.ready_q.runq, thread); } - _priq_run_add(&_kernel.ready_q.runq, thread); - z_mark_thread_as_queued(thread); + queue_thread(&_kernel.ready_q.runq, thread); update_cache(thread == _current); } @@ -422,8 +433,7 @@ static void ready_thread(struct k_thread *thread) */ if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { sys_trace_thread_ready(thread); - _priq_run_add(&_kernel.ready_q.runq, thread); - z_mark_thread_as_queued(thread); + queue_thread(&_kernel.ready_q.runq, thread); update_cache(0); #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) arch_sched_ipi(); @@ -465,8 +475,7 @@ void z_impl_k_thread_suspend(struct k_thread *thread) LOCKED(&sched_spinlock) { if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); - z_mark_thread_as_not_queued(thread); + dequeue_thread(&_kernel.ready_q.runq, thread); } z_mark_thread_as_suspended(thread); update_cache(thread == _current); @@ -552,9 +561,8 @@ void z_thread_single_abort(struct k_thread *thread) if (z_is_thread_ready(thread)) { if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, + dequeue_thread(&_kernel.ready_q.runq, thread); - z_mark_thread_as_not_queued(thread); } update_cache(thread == _current); } else { @@ -637,8 +645,7 @@ void z_thread_single_abort(struct k_thread *thread) static void unready_thread(struct k_thread *thread) { if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); - z_mark_thread_as_not_queued(thread); + dequeue_thread(&_kernel.ready_q.runq, thread); } update_cache(thread == _current); } @@ -816,9 +823,9 @@ bool z_set_prio(struct k_thread *thread, int prio) if (need_sched) { /* Don't requeue on SMP if it's the running thread */ if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); + dequeue_thread(&_kernel.ready_q.runq, thread); thread->base.prio = prio; - _priq_run_add(&_kernel.ready_q.runq, thread); + queue_thread(&_kernel.ready_q.runq, thread); } else { thread->base.prio = prio; } @@ -1229,8 +1236,8 @@ void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline) LOCKED(&sched_spinlock) { thread->base.prio_deadline = k_cycle_get_32() + deadline; if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); - _priq_run_add(&_kernel.ready_q.runq, thread); + dequeue_thread(&_kernel.ready_q.runq, thread); + queue_thread(&_kernel.ready_q.runq, thread); } } } @@ -1259,11 +1266,10 @@ void z_impl_k_yield(void) LOCKED(&sched_spinlock) { if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(_current)) { - _priq_run_remove(&_kernel.ready_q.runq, + dequeue_thread(&_kernel.ready_q.runq, _current); } - _priq_run_add(&_kernel.ready_q.runq, _current); - z_mark_thread_as_queued(_current); + queue_thread(&_kernel.ready_q.runq, _current); update_cache(1); } } @@ -1436,8 +1442,7 @@ void z_sched_abort(struct k_thread *thread) thread->base.thread_state |= _THREAD_DEAD; k_spin_unlock(&sched_spinlock, key); } else if (z_is_thread_queued(thread)) { - _priq_run_remove(&_kernel.ready_q.runq, thread); - z_mark_thread_as_not_queued(thread); + dequeue_thread(&_kernel.ready_q.runq, thread); thread->base.thread_state |= _THREAD_DEAD; k_spin_unlock(&sched_spinlock, key); } else {