kernel/sched: Simply de/queue_thread()

Pure refactoring.  For historical reasons these two functions took a
first argument (a pointer to the run queue) that was always the same.
Eliminate it.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-09-23 16:41:30 -07:00 committed by Christopher Friedt
commit c230fb3580

View file

@ -195,12 +195,11 @@ static inline bool should_queue_thread(struct k_thread *th)
return !IS_ENABLED(CONFIG_SMP) || th != _current; return !IS_ENABLED(CONFIG_SMP) || th != _current;
} }
static ALWAYS_INLINE void queue_thread(void *pq, static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
struct k_thread *thread)
{ {
thread->base.thread_state |= _THREAD_QUEUED; thread->base.thread_state |= _THREAD_QUEUED;
if (should_queue_thread(thread)) { if (should_queue_thread(thread)) {
_priq_run_add(pq, thread); _priq_run_add(&_kernel.ready_q.runq, thread);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (thread == _current) { if (thread == _current) {
@ -210,12 +209,11 @@ static ALWAYS_INLINE void queue_thread(void *pq,
#endif #endif
} }
static ALWAYS_INLINE void dequeue_thread(void *pq, static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
struct k_thread *thread)
{ {
thread->base.thread_state &= ~_THREAD_QUEUED; thread->base.thread_state &= ~_THREAD_QUEUED;
if (should_queue_thread(thread)) { if (should_queue_thread(thread)) {
_priq_run_remove(pq, thread); _priq_run_remove(&_kernel.ready_q.runq, thread);
} }
} }
@ -309,12 +307,12 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
/* Put _current back into the queue */ /* Put _current back into the queue */
if (thread != _current && active && if (thread != _current && active &&
!z_is_idle_thread_object(_current) && !queued) { !z_is_idle_thread_object(_current) && !queued) {
queue_thread(&_kernel.ready_q.runq, _current); queue_thread(_current);
} }
/* Take the new _current out of the queue */ /* Take the new _current out of the queue */
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
} }
_current_cpu->swap_ok = false; _current_cpu->swap_ok = false;
@ -325,9 +323,9 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
static void move_thread_to_end_of_prio_q(struct k_thread *thread) static void move_thread_to_end_of_prio_q(struct k_thread *thread)
{ {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
} }
queue_thread(&_kernel.ready_q.runq, thread); queue_thread(thread);
update_cache(thread == _current); update_cache(thread == _current);
} }
@ -493,7 +491,7 @@ static void ready_thread(struct k_thread *thread)
if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) { if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread); SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
queue_thread(&_kernel.ready_q.runq, thread); queue_thread(thread);
update_cache(0); update_cache(0);
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
arch_sched_ipi(); arch_sched_ipi();
@ -539,7 +537,7 @@ void z_impl_k_thread_suspend(struct k_thread *thread)
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
} }
z_mark_thread_as_suspended(thread); z_mark_thread_as_suspended(thread);
update_cache(thread == _current); update_cache(thread == _current);
@ -600,7 +598,7 @@ static _wait_q_t *pended_on_thread(struct k_thread *thread)
static void unready_thread(struct k_thread *thread) static void unready_thread(struct k_thread *thread)
{ {
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
} }
update_cache(thread == _current); update_cache(thread == _current);
} }
@ -763,9 +761,9 @@ bool z_set_prio(struct k_thread *thread, int prio)
if (need_sched) { if (need_sched) {
/* Don't requeue on SMP if it's the running thread */ /* Don't requeue on SMP if it's the running thread */
if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) { if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
thread->base.prio = prio; thread->base.prio = prio;
queue_thread(&_kernel.ready_q.runq, thread); queue_thread(thread);
} else { } else {
thread->base.prio = prio; thread->base.prio = prio;
} }
@ -1158,8 +1156,8 @@ void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
LOCKED(&sched_spinlock) { LOCKED(&sched_spinlock) {
thread->base.prio_deadline = k_cycle_get_32() + deadline; thread->base.prio_deadline = k_cycle_get_32() + deadline;
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
queue_thread(&_kernel.ready_q.runq, thread); queue_thread(thread);
} }
} }
} }
@ -1190,10 +1188,9 @@ void z_impl_k_yield(void)
if (!IS_ENABLED(CONFIG_SMP) || if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) { z_is_thread_queued(_current)) {
dequeue_thread(&_kernel.ready_q.runq, dequeue_thread(_current);
_current);
} }
queue_thread(&_kernel.ready_q.runq, _current); queue_thread(_current);
update_cache(1); update_cache(1);
z_swap(&sched_spinlock, key); z_swap(&sched_spinlock, key);
} }
@ -1473,7 +1470,7 @@ static void end_thread(struct k_thread *thread)
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
thread->base.thread_state &= ~_THREAD_ABORTING; thread->base.thread_state &= ~_THREAD_ABORTING;
if (z_is_thread_queued(thread)) { if (z_is_thread_queued(thread)) {
dequeue_thread(&_kernel.ready_q.runq, thread); dequeue_thread(thread);
} }
if (thread->base.pended_on != NULL) { if (thread->base.pended_on != NULL) {
unpend_thread_no_timeout(thread); unpend_thread_no_timeout(thread);