From 9f06a3545044110ec5666f764b041aa1aac216a6 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 28 Jun 2018 10:38:14 -0700 Subject: [PATCH] kernel: Add the old "multi queue" scheduler algorithm as an option Zephyr 1.12 removed the old scheduler and replaced it with the choice of a "dumb" list or a balanced tree. But the old multi-queue algorithm is still useful in the space between these two (applications with large-ish numbers of runnable threads, but that don't need fancy features like EDF or SMP affinity). So add it as a CONFIG_SCHED_MULTIQ option. Signed-off-by: Andy Ross --- include/sched_priq.h | 16 ++++++++++ kernel/Kconfig | 52 +++++++++++++++++++++++---------- kernel/include/kernel_structs.h | 6 ++-- kernel/sched.c | 50 ++++++++++++++++++++++++++++++- samples/hello_world/prj.conf | 1 + 5 files changed, 106 insertions(+), 19 deletions(-) diff --git a/include/sched_priq.h b/include/sched_priq.h index e83b5f685ad..cd3be876ffe 100644 --- a/include/sched_priq.h +++ b/include/sched_priq.h @@ -44,4 +44,20 @@ void _priq_rb_add(struct _priq_rb *pq, struct k_thread *thread); void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread); struct k_thread *_priq_rb_best(struct _priq_rb *pq); +/* Traditional/textbook "multi-queue" structure. Separate lists for a + * small number (max 32 here) of fixed priorities. This corresponds + * to the original Zephyr scheduler. RAM requirements are + * comparatively high, but performance is very fast. Won't work with + * features like deadline scheduling which need large priority spaces + * to represet their requirements. + */ +struct _priq_mq { + sys_dlist_t queues[32]; + unsigned int bitmask; /* bit 1< 31 +# error Too many priorities for multiqueue scheduler (max 32) +# endif +#endif + +void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) +{ + int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; + + sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); + pq->bitmask |= (1 << priority_bit); +} + +void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) +{ + int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; + + sys_dlist_remove(&thread->base.qnode_dlist); + if (sys_dlist_is_empty(&pq->queues[priority_bit])) { + pq->bitmask &= ~(1 << priority_bit); + } +} + +struct k_thread *_priq_mq_best(struct _priq_mq *pq) +{ + if (!pq->bitmask) { + return NULL; + } + + sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; + + return CONTAINER_OF(sys_dlist_peek_head(l), + struct k_thread, base.qnode_dlist); +} + #ifdef CONFIG_TIMESLICING extern s32_t _time_slice_duration; /* Measured in ms */ extern s32_t _time_slice_elapsed; /* Measured in ms */ @@ -644,13 +684,21 @@ void _sched_init(void) { #ifdef CONFIG_SCHED_DUMB sys_dlist_init(&_kernel.ready_q.runq); -#else +#endif + +#ifdef CONFIG_SCHED_SCALABLE _kernel.ready_q.runq = (struct _priq_rb) { .tree = { .lessthan_fn = _priq_rb_lessthan, } }; #endif + +#ifdef CONFIG_SCHED_MULTIQ + for (int i = 0; i < ARRAY_SIZE(_kernel.ready_q.runq.queues); i++) { + sys_dlist_init(&_kernel.ready_q.runq.queues[i]); + } +#endif } int _impl_k_thread_priority_get(k_tid_t thread) diff --git a/samples/hello_world/prj.conf b/samples/hello_world/prj.conf index b2a4ba59104..240d0959a5b 100644 --- a/samples/hello_world/prj.conf +++ b/samples/hello_world/prj.conf @@ -1 +1,2 @@ # nothing here +CONFIG_SCHED_MULTIQ=y