diff --git a/doc/kernel/services/scheduling/index.rst b/doc/kernel/services/scheduling/index.rst index 38837619a1f..87823ad40bf 100644 --- a/doc/kernel/services/scheduling/index.rst +++ b/doc/kernel/services/scheduling/index.rst @@ -82,7 +82,7 @@ runtime overhead and performance scaling when many threads are added. * Traditional multi-queue ready queue (:kconfig:option:`CONFIG_SCHED_MULTIQ`) When selected, the scheduler ready queue will be implemented as the - classic/textbook array of lists, one per priority (max 32 priorities). + classic/textbook array of lists, one per priority. This corresponds to the scheduler algorithm used in Zephyr versions prior to 1.12. diff --git a/include/zephyr/kernel_structs.h b/include/zephyr/kernel_structs.h index 093fcd0a188..ff8bde4e571 100644 --- a/include/zephyr/kernel_structs.h +++ b/include/zephyr/kernel_structs.h @@ -32,6 +32,15 @@ #include #endif +#define K_NUM_THREAD_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + CONFIG_NUM_COOP_PRIORITIES + 1) + +#if defined(CONFIG_64BIT) +#define PRIQ_BITMAP_SIZE (DIV_ROUND_UP(K_NUM_THREAD_PRIO, 8 * sizeof(uint64_t))) +#else +#define PRIQ_BITMAP_SIZE (DIV_ROUND_UP(K_NUM_THREAD_PRIO, 8 * sizeof(uint32_t))) +#endif + + #ifdef __cplusplus extern "C" { #endif @@ -117,8 +126,12 @@ struct _priq_rb { * to represent their requirements. */ struct _priq_mq { - sys_dlist_t queues[32]; - unsigned int bitmask; /* bit 1< 31 -# error Too many priorities for multiqueue scheduler (max 32) -# endif + +struct prio_info { + uint8_t offset_prio; + uint8_t idx; + uint8_t bit; +}; + +static ALWAYS_INLINE struct prio_info get_prio_info(int8_t old_prio) +{ + struct prio_info ret; + + ret.offset_prio = old_prio - K_HIGHEST_THREAD_PRIO; + ret.idx = ret.offset_prio / NBITS; + ret.bit = ret.offset_prio % NBITS; + + return ret; +} static ALWAYS_INLINE void z_priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) { - int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; + struct prio_info pos = get_prio_info(thread->base.prio); - sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); - pq->bitmask |= BIT(priority_bit); + sys_dlist_append(&pq->queues[pos.offset_prio], &thread->base.qnode_dlist); + pq->bitmask[pos.idx] |= BIT(pos.bit); } static ALWAYS_INLINE void z_priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) { - int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; + struct prio_info pos = get_prio_info(thread->base.prio); sys_dlist_remove(&thread->base.qnode_dlist); - if (sys_dlist_is_empty(&pq->queues[priority_bit])) { - pq->bitmask &= ~BIT(priority_bit); + if (sys_dlist_is_empty(&pq->queues[pos.offset_prio])) { + pq->bitmask[pos.idx] &= ~BIT(pos.bit); } } #endif /* CONFIG_SCHED_MULTIQ */ diff --git a/kernel/priority_queues.c b/kernel/priority_queues.c index 5de9cdccf84..692abfa7c6c 100644 --- a/kernel/priority_queues.c +++ b/kernel/priority_queues.c @@ -6,6 +6,7 @@ #include #include +#include void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) { @@ -94,16 +95,25 @@ struct k_thread *z_priq_rb_best(struct _priq_rb *pq) struct k_thread *z_priq_mq_best(struct _priq_mq *pq) { - if (!pq->bitmask) { - return NULL; - } - struct k_thread *thread = NULL; - sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; - sys_dnode_t *n = sys_dlist_peek_head(l); - if (n != NULL) { - thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); + for (int i = 0; i < PRIQ_BITMAP_SIZE; ++i) { + if (!pq->bitmask[i]) { + continue; + } + +#ifdef CONFIG_64BIT + sys_dlist_t *l = &pq->queues[i * 64 + u64_count_trailing_zeros(pq->bitmask[i])]; +#else + sys_dlist_t *l = &pq->queues[i * 32 + u32_count_trailing_zeros(pq->bitmask[i])]; +#endif + sys_dnode_t *n = sys_dlist_peek_head(l); + + if (n != NULL) { + thread = CONTAINER_OF(n, struct k_thread, base.qnode_dlist); + break; + } } + return thread; }