kernel: Move run queue initialization

Move the initialization of the priority q for running out of sched.c to
remove one more ifdef from sched.c. No change in functionality but
better matches the rest of sched.c and priority_q.h such that the
ifdefry needed is done in in priority_q.h.

Signed-off-by: Tom Burdick <thomas.burdick@intel.com>
This commit is contained in:
Tom Burdick 2024-10-25 12:45:38 -05:00 committed by Anas Nashif
commit 2b5012a5d9
2 changed files with 25 additions and 14 deletions

View file

@ -17,6 +17,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
/* Dumb Scheduling */
#if defined(CONFIG_SCHED_DUMB)
#define _priq_run_init z_priq_dumb_init
#define _priq_run_add z_priq_dumb_add
#define _priq_run_remove z_priq_dumb_remove
# if defined(CONFIG_SCHED_CPU_MASK)
@ -26,6 +27,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
# endif /* CONFIG_SCHED_CPU_MASK */
/* Scalable Scheduling */
#elif defined(CONFIG_SCHED_SCALABLE)
#define _priq_run_init z_priq_rb_init
#define _priq_run_add z_priq_rb_add
#define _priq_run_remove z_priq_rb_remove
#define _priq_run_best z_priq_rb_best
@ -37,7 +39,7 @@ bool z_priq_rb_lessthan(struct rbnode *a, struct rbnode *b);
#else
#define NBITS 32
#endif /* CONFIG_64BIT */
#define _priq_run_init z_priq_mq_init
#define _priq_run_add z_priq_mq_add
#define _priq_run_remove z_priq_mq_remove
#define _priq_run_best z_priq_mq_best
@ -57,6 +59,11 @@ static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread
#define _priq_wait_best z_priq_dumb_best
#endif
static ALWAYS_INLINE void z_priq_dumb_init(sys_dlist_t *pq)
{
sys_dlist_init(pq);
}
static ALWAYS_INLINE void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
{
ARG_UNUSED(pq);
@ -75,6 +82,15 @@ static ALWAYS_INLINE struct k_thread *z_priq_dumb_best(sys_dlist_t *pq)
return thread;
}
static ALWAYS_INLINE void z_priq_rb_init(struct _priq_rb *pq)
{
*pq = (struct _priq_rb) {
.tree = {
.lessthan_fn = z_priq_rb_lessthan,
}
};
}
static ALWAYS_INLINE void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
{
struct k_thread *t;
@ -163,6 +179,13 @@ static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio)
return ret;
}
static ALWAYS_INLINE void z_priq_mq_init(struct _priq_mq *q)
{
for (int i = 0; i < ARRAY_SIZE(q->queues); i++) {
sys_dlist_init(&q->queues[i]);
}
}
static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq,
struct k_thread *thread)
{

View file

@ -968,19 +968,7 @@ int z_unpend_all(_wait_q_t *wait_q)
void init_ready_q(struct _ready_q *ready_q)
{
#if defined(CONFIG_SCHED_SCALABLE)
ready_q->runq = (struct _priq_rb) {
.tree = {
.lessthan_fn = z_priq_rb_lessthan,
}
};
#elif defined(CONFIG_SCHED_MULTIQ)
for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) {
sys_dlist_init(&ready_q->runq.queues[i]);
}
#else
sys_dlist_init(&ready_q->runq);
#endif
_priq_run_init(&ready_q->runq);
}
void z_sched_init(void)