From 851d14afc8941313a6f3faeb74f84ed73a33429a Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 13 May 2021 15:46:43 -0700 Subject: [PATCH] kernel/sched: Remove "cooperative scheduling only" special cases The scheduler has historically had an API where an application can inform the kernel that it will never create a thread that can be preempted, and the kernel and architecture layer would use that as an optimization hint to eliminate some code paths. Those optimizations have dwindled to almost nothing at this point, and they're now objectively a smaller impact than the special casing that was required to handle the idle thread (which, obviously, must always be preemptible). Fix this by eliminating the idea of "cooperative only" and ensuring that there will always be at least one preemptible priority with value >=0. CONFIG_NUM_PREEMPT_PRIORITIES now specifies the number of user-accessible priorities other than the idle thread. The only remaining workaround is that some older architectures (and also SPARC) use the CONFIG_PREEMPT_ENABLED=n state as a hint to skip thread switching on interrupt exit. So detect exactly those platforms and implement a minimal workaround in the idle loop (basically "just call swap()") instead, with a big explanation. Note that this also fixes a bug in one of the philosophers samples, where it would ask for 6 cooperative priorities but then use values -7 through -2. It was assuming the kernel would magically create a cooperative priority for its idle thread, which wasn't correct even before. Fixes #34584 Signed-off-by: Andy Ross --- include/kernel.h | 40 +++++++------------------------- kernel/Kconfig | 6 ++++- kernel/idle.c | 24 +++++++++++++------ kernel/include/ksched.h | 5 ---- kernel/init.c | 2 +- kernel/sched.c | 35 +++++++--------------------- samples/philosophers/sample.yaml | 2 +- 7 files changed, 40 insertions(+), 74 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index 7aecb86e059..f3265d62b7e 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -36,43 +36,19 @@ extern "C" { * @} */ -#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED) -#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES) -#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1) -#elif defined(CONFIG_COOP_ENABLED) -#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1) -#define _NUM_PREEMPT_PRIO (0) -#elif defined(CONFIG_PREEMPT_ENABLED) -#define _NUM_COOP_PRIO (0) -#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1) -#else -#error "invalid configuration" -#endif - -#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x))) -#define K_PRIO_PREEMPT(x) (x) - #define K_ANY NULL #define K_END NULL -#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED) +#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0 +#error Zero available thread priorities defined! +#endif + +#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x))) +#define K_PRIO_PREEMPT(x) (x) + #define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES) -#elif defined(CONFIG_COOP_ENABLED) -#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1) -#elif defined(CONFIG_PREEMPT_ENABLED) -#define K_HIGHEST_THREAD_PRIO 0 -#else -#error "invalid configuration" -#endif - -#ifdef CONFIG_PREEMPT_ENABLED #define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES -#else -#define K_LOWEST_THREAD_PRIO -1 -#endif - #define K_IDLE_PRIO K_LOWEST_THREAD_PRIO - #define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO) #define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1) @@ -2553,7 +2529,7 @@ struct k_mutex { .wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \ .owner = NULL, \ .lock_count = 0, \ - .owner_orig_prio = K_LOWEST_THREAD_PRIO, \ + .owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \ } /** diff --git a/kernel/Kconfig b/kernel/Kconfig index 8c31c14bc73..0f933a60685 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -90,7 +90,11 @@ config PREEMPT_ENABLED config PRIORITY_CEILING int "Priority inheritance ceiling" - default 0 + default -127 + help + This defines the minimum priority value (i.e. the logically + highest priority) that a thread will acquire as part of + k_mutex priority inheritance. config NUM_METAIRQ_PRIORITIES int "Number of very-high priority 'preemptor' threads" diff --git a/kernel/idle.c b/kernel/idle.c index 1711a62e7ca..dd50de9141e 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -13,6 +13,7 @@ #include #include #include +#include LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); @@ -66,6 +67,8 @@ void idle(void *unused1, void *unused2, void *unused3) ARG_UNUSED(unused2); ARG_UNUSED(unused3); + __ASSERT_NO_MSG(_current->base.prio >= 0); + while (true) { /* SMP systems without a working IPI can't * actual enter an idle state, because they @@ -95,14 +98,21 @@ void idle(void *unused1, void *unused2, void *unused3) k_cpu_idle(); } - /* It is possible to (pathologically) configure the - * idle thread to have a non-preemptible priority. - * You might think this is an API bug, but we actually - * have a test that exercises this. Handle the edge - * case when that happens. +#if !defined(CONFIG_PREEMPT_ENABLED) +# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC) + /* A legacy mess: the idle thread is by definition + * preemptible as far as the modern scheduler is + * concerned, but older platforms use + * CONFIG_PREEMPT_ENABLED=n as an optimization hint + * that interrupt exit always returns to the + * interrupted context. So in that setup we need to + * explicitly yield in the idle thread otherwise + * nothing else will run once it starts. */ - if (K_IDLE_PRIO < 0) { - k_yield(); + if (_kernel.ready_q.cache != _current) { + z_swap_unlocked(); } +# endif +#endif } } diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 772eaf5f726..7d39825c52a 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -250,27 +250,22 @@ static inline void _ready_one_thread(_wait_q_t *wq) static inline void z_sched_lock(void) { -#ifdef CONFIG_PREEMPT_ENABLED __ASSERT(!arch_is_in_isr(), ""); __ASSERT(_current->base.sched_locked != 1U, ""); --_current->base.sched_locked; compiler_barrier(); - -#endif } static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void) { -#ifdef CONFIG_PREEMPT_ENABLED __ASSERT(!arch_is_in_isr(), ""); __ASSERT(_current->base.sched_locked != 0U, ""); compiler_barrier(); ++_current->base.sched_locked; -#endif } static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread) diff --git a/kernel/init.c b/kernel/init.c index f1d4eb5abdd..0d829e7f68f 100644 --- a/kernel/init.c +++ b/kernel/init.c @@ -259,7 +259,7 @@ static void init_idle_thread(int i) z_setup_new_thread(thread, stack, CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i], - NULL, NULL, K_LOWEST_THREAD_PRIO, K_ESSENTIAL, + NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL, tname); z_mark_thread_as_started(thread); diff --git a/kernel/sched.c b/kernel/sched.c index b6aecd8a602..1f68f96d5bd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -53,12 +53,8 @@ static void end_thread(struct k_thread *thread); static inline int is_preempt(struct k_thread *thread) { -#ifdef CONFIG_PREEMPT_ENABLED /* explanation in kernel_struct.h */ return thread->base.preempt <= _PREEMPT_THRESHOLD; -#else - return 0; -#endif } static inline int is_metairq(struct k_thread *thread) @@ -154,15 +150,6 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread, return true; } - /* The idle threads can look "cooperative" if there are no - * preemptible priorities (this is sort of an API glitch). - * They must always be preemptible. - */ - if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) && - z_is_idle_thread_object(_current)) { - return true; - } - return false; } @@ -845,7 +832,6 @@ void k_sched_lock(void) void k_sched_unlock(void) { -#ifdef CONFIG_PREEMPT_ENABLED LOCKED(&sched_spinlock) { __ASSERT(_current->base.sched_locked != 0U, ""); __ASSERT(!arch_is_in_isr(), ""); @@ -860,7 +846,6 @@ void k_sched_unlock(void) SYS_PORT_TRACING_FUNC(k_thread, sched_unlock); z_reschedule_unlocked(); -#endif } struct k_thread *z_swap_next_thread(void) @@ -1201,20 +1186,16 @@ void z_impl_k_yield(void) SYS_PORT_TRACING_FUNC(k_thread, yield); - if (!z_is_idle_thread_object(_current)) { - k_spinlock_key_t key = k_spin_lock(&sched_spinlock); + k_spinlock_key_t key = k_spin_lock(&sched_spinlock); - if (!IS_ENABLED(CONFIG_SMP) || - z_is_thread_queued(_current)) { - dequeue_thread(&_kernel.ready_q.runq, - _current); - } - queue_thread(&_kernel.ready_q.runq, _current); - update_cache(1); - z_swap(&sched_spinlock, key); - } else { - z_swap_unlocked(); + if (!IS_ENABLED(CONFIG_SMP) || + z_is_thread_queued(_current)) { + dequeue_thread(&_kernel.ready_q.runq, + _current); } + queue_thread(&_kernel.ready_q.runq, _current); + update_cache(1); + z_swap(&sched_spinlock, key); } #ifdef CONFIG_USERSPACE diff --git a/samples/philosophers/sample.yaml b/samples/philosophers/sample.yaml index 1cb976c1686..e68cce1bcaa 100644 --- a/samples/philosophers/sample.yaml +++ b/samples/philosophers/sample.yaml @@ -38,4 +38,4 @@ tests: sample.kernel.philosopher.coop_only: extra_configs: - CONFIG_NUM_PREEMPT_PRIORITIES=0 - - CONFIG_NUM_COOP_PRIORITIES=6 + - CONFIG_NUM_COOP_PRIORITIES=7