kernel/sched: Remove "cooperative scheduling only" special cases
The scheduler has historically had an API where an application can inform the kernel that it will never create a thread that can be preempted, and the kernel and architecture layer would use that as an optimization hint to eliminate some code paths. Those optimizations have dwindled to almost nothing at this point, and they're now objectively a smaller impact than the special casing that was required to handle the idle thread (which, obviously, must always be preemptible). Fix this by eliminating the idea of "cooperative only" and ensuring that there will always be at least one preemptible priority with value >=0. CONFIG_NUM_PREEMPT_PRIORITIES now specifies the number of user-accessible priorities other than the idle thread. The only remaining workaround is that some older architectures (and also SPARC) use the CONFIG_PREEMPT_ENABLED=n state as a hint to skip thread switching on interrupt exit. So detect exactly those platforms and implement a minimal workaround in the idle loop (basically "just call swap()") instead, with a big explanation. Note that this also fixes a bug in one of the philosophers samples, where it would ask for 6 cooperative priorities but then use values -7 through -2. It was assuming the kernel would magically create a cooperative priority for its idle thread, which wasn't correct even before. Fixes #34584 Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
a7c732db2b
commit
851d14afc8
7 changed files with 40 additions and 74 deletions
|
@ -36,43 +36,19 @@ extern "C" {
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
|
|
||||||
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES)
|
|
||||||
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
|
|
||||||
#elif defined(CONFIG_COOP_ENABLED)
|
|
||||||
#define _NUM_COOP_PRIO (CONFIG_NUM_COOP_PRIORITIES + 1)
|
|
||||||
#define _NUM_PREEMPT_PRIO (0)
|
|
||||||
#elif defined(CONFIG_PREEMPT_ENABLED)
|
|
||||||
#define _NUM_COOP_PRIO (0)
|
|
||||||
#define _NUM_PREEMPT_PRIO (CONFIG_NUM_PREEMPT_PRIORITIES + 1)
|
|
||||||
#else
|
|
||||||
#error "invalid configuration"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define K_PRIO_COOP(x) (-(_NUM_COOP_PRIO - (x)))
|
|
||||||
#define K_PRIO_PREEMPT(x) (x)
|
|
||||||
|
|
||||||
#define K_ANY NULL
|
#define K_ANY NULL
|
||||||
#define K_END NULL
|
#define K_END NULL
|
||||||
|
|
||||||
#if defined(CONFIG_COOP_ENABLED) && defined(CONFIG_PREEMPT_ENABLED)
|
#if CONFIG_NUM_COOP_PRIORITIES + CONFIG_NUM_PREEMPT_PRIORITIES == 0
|
||||||
|
#error Zero available thread priorities defined!
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define K_PRIO_COOP(x) (-(CONFIG_NUM_COOP_PRIORITIES - (x)))
|
||||||
|
#define K_PRIO_PREEMPT(x) (x)
|
||||||
|
|
||||||
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
|
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES)
|
||||||
#elif defined(CONFIG_COOP_ENABLED)
|
|
||||||
#define K_HIGHEST_THREAD_PRIO (-CONFIG_NUM_COOP_PRIORITIES - 1)
|
|
||||||
#elif defined(CONFIG_PREEMPT_ENABLED)
|
|
||||||
#define K_HIGHEST_THREAD_PRIO 0
|
|
||||||
#else
|
|
||||||
#error "invalid configuration"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
|
||||||
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
|
#define K_LOWEST_THREAD_PRIO CONFIG_NUM_PREEMPT_PRIORITIES
|
||||||
#else
|
|
||||||
#define K_LOWEST_THREAD_PRIO -1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
|
#define K_IDLE_PRIO K_LOWEST_THREAD_PRIO
|
||||||
|
|
||||||
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
|
#define K_HIGHEST_APPLICATION_THREAD_PRIO (K_HIGHEST_THREAD_PRIO)
|
||||||
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
|
#define K_LOWEST_APPLICATION_THREAD_PRIO (K_LOWEST_THREAD_PRIO - 1)
|
||||||
|
|
||||||
|
@ -2553,7 +2529,7 @@ struct k_mutex {
|
||||||
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
|
||||||
.owner = NULL, \
|
.owner = NULL, \
|
||||||
.lock_count = 0, \
|
.lock_count = 0, \
|
||||||
.owner_orig_prio = K_LOWEST_THREAD_PRIO, \
|
.owner_orig_prio = K_LOWEST_APPLICATION_THREAD_PRIO, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -90,7 +90,11 @@ config PREEMPT_ENABLED
|
||||||
|
|
||||||
config PRIORITY_CEILING
|
config PRIORITY_CEILING
|
||||||
int "Priority inheritance ceiling"
|
int "Priority inheritance ceiling"
|
||||||
default 0
|
default -127
|
||||||
|
help
|
||||||
|
This defines the minimum priority value (i.e. the logically
|
||||||
|
highest priority) that a thread will acquire as part of
|
||||||
|
k_mutex priority inheritance.
|
||||||
|
|
||||||
config NUM_METAIRQ_PRIORITIES
|
config NUM_METAIRQ_PRIORITIES
|
||||||
int "Number of very-high priority 'preemptor' threads"
|
int "Number of very-high priority 'preemptor' threads"
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <logging/log.h>
|
#include <logging/log.h>
|
||||||
#include <ksched.h>
|
#include <ksched.h>
|
||||||
|
#include <kswap.h>
|
||||||
|
|
||||||
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||||
|
|
||||||
|
@ -66,6 +67,8 @@ void idle(void *unused1, void *unused2, void *unused3)
|
||||||
ARG_UNUSED(unused2);
|
ARG_UNUSED(unused2);
|
||||||
ARG_UNUSED(unused3);
|
ARG_UNUSED(unused3);
|
||||||
|
|
||||||
|
__ASSERT_NO_MSG(_current->base.prio >= 0);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
/* SMP systems without a working IPI can't
|
/* SMP systems without a working IPI can't
|
||||||
* actual enter an idle state, because they
|
* actual enter an idle state, because they
|
||||||
|
@ -95,14 +98,21 @@ void idle(void *unused1, void *unused2, void *unused3)
|
||||||
k_cpu_idle();
|
k_cpu_idle();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* It is possible to (pathologically) configure the
|
#if !defined(CONFIG_PREEMPT_ENABLED)
|
||||||
* idle thread to have a non-preemptible priority.
|
# if !defined(CONFIG_USE_SWITCH) || defined(CONFIG_SPARC)
|
||||||
* You might think this is an API bug, but we actually
|
/* A legacy mess: the idle thread is by definition
|
||||||
* have a test that exercises this. Handle the edge
|
* preemptible as far as the modern scheduler is
|
||||||
* case when that happens.
|
* concerned, but older platforms use
|
||||||
|
* CONFIG_PREEMPT_ENABLED=n as an optimization hint
|
||||||
|
* that interrupt exit always returns to the
|
||||||
|
* interrupted context. So in that setup we need to
|
||||||
|
* explicitly yield in the idle thread otherwise
|
||||||
|
* nothing else will run once it starts.
|
||||||
*/
|
*/
|
||||||
if (K_IDLE_PRIO < 0) {
|
if (_kernel.ready_q.cache != _current) {
|
||||||
k_yield();
|
z_swap_unlocked();
|
||||||
}
|
}
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,27 +250,22 @@ static inline void _ready_one_thread(_wait_q_t *wq)
|
||||||
|
|
||||||
static inline void z_sched_lock(void)
|
static inline void z_sched_lock(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
__ASSERT(_current->base.sched_locked != 1U, "");
|
__ASSERT(_current->base.sched_locked != 1U, "");
|
||||||
|
|
||||||
--_current->base.sched_locked;
|
--_current->base.sched_locked;
|
||||||
|
|
||||||
compiler_barrier();
|
compiler_barrier();
|
||||||
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
|
static ALWAYS_INLINE void z_sched_unlock_no_reschedule(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
__ASSERT(_current->base.sched_locked != 0U, "");
|
__ASSERT(_current->base.sched_locked != 0U, "");
|
||||||
|
|
||||||
compiler_barrier();
|
compiler_barrier();
|
||||||
|
|
||||||
++_current->base.sched_locked;
|
++_current->base.sched_locked;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
|
static ALWAYS_INLINE bool z_is_thread_timeout_expired(struct k_thread *thread)
|
||||||
|
|
|
@ -259,7 +259,7 @@ static void init_idle_thread(int i)
|
||||||
|
|
||||||
z_setup_new_thread(thread, stack,
|
z_setup_new_thread(thread, stack,
|
||||||
CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
|
CONFIG_IDLE_STACK_SIZE, idle, &_kernel.cpus[i],
|
||||||
NULL, NULL, K_LOWEST_THREAD_PRIO, K_ESSENTIAL,
|
NULL, NULL, K_IDLE_PRIO, K_ESSENTIAL,
|
||||||
tname);
|
tname);
|
||||||
z_mark_thread_as_started(thread);
|
z_mark_thread_as_started(thread);
|
||||||
|
|
||||||
|
|
|
@ -53,12 +53,8 @@ static void end_thread(struct k_thread *thread);
|
||||||
|
|
||||||
static inline int is_preempt(struct k_thread *thread)
|
static inline int is_preempt(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
|
||||||
/* explanation in kernel_struct.h */
|
/* explanation in kernel_struct.h */
|
||||||
return thread->base.preempt <= _PREEMPT_THRESHOLD;
|
return thread->base.preempt <= _PREEMPT_THRESHOLD;
|
||||||
#else
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int is_metairq(struct k_thread *thread)
|
static inline int is_metairq(struct k_thread *thread)
|
||||||
|
@ -154,15 +150,6 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *thread,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The idle threads can look "cooperative" if there are no
|
|
||||||
* preemptible priorities (this is sort of an API glitch).
|
|
||||||
* They must always be preemptible.
|
|
||||||
*/
|
|
||||||
if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) &&
|
|
||||||
z_is_idle_thread_object(_current)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -845,7 +832,6 @@ void k_sched_lock(void)
|
||||||
|
|
||||||
void k_sched_unlock(void)
|
void k_sched_unlock(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PREEMPT_ENABLED
|
|
||||||
LOCKED(&sched_spinlock) {
|
LOCKED(&sched_spinlock) {
|
||||||
__ASSERT(_current->base.sched_locked != 0U, "");
|
__ASSERT(_current->base.sched_locked != 0U, "");
|
||||||
__ASSERT(!arch_is_in_isr(), "");
|
__ASSERT(!arch_is_in_isr(), "");
|
||||||
|
@ -860,7 +846,6 @@ void k_sched_unlock(void)
|
||||||
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
|
SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
|
||||||
|
|
||||||
z_reschedule_unlocked();
|
z_reschedule_unlocked();
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct k_thread *z_swap_next_thread(void)
|
struct k_thread *z_swap_next_thread(void)
|
||||||
|
@ -1201,7 +1186,6 @@ void z_impl_k_yield(void)
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC(k_thread, yield);
|
SYS_PORT_TRACING_FUNC(k_thread, yield);
|
||||||
|
|
||||||
if (!z_is_idle_thread_object(_current)) {
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
|
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_SMP) ||
|
if (!IS_ENABLED(CONFIG_SMP) ||
|
||||||
|
@ -1212,9 +1196,6 @@ void z_impl_k_yield(void)
|
||||||
queue_thread(&_kernel.ready_q.runq, _current);
|
queue_thread(&_kernel.ready_q.runq, _current);
|
||||||
update_cache(1);
|
update_cache(1);
|
||||||
z_swap(&sched_spinlock, key);
|
z_swap(&sched_spinlock, key);
|
||||||
} else {
|
|
||||||
z_swap_unlocked();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -38,4 +38,4 @@ tests:
|
||||||
sample.kernel.philosopher.coop_only:
|
sample.kernel.philosopher.coop_only:
|
||||||
extra_configs:
|
extra_configs:
|
||||||
- CONFIG_NUM_PREEMPT_PRIORITIES=0
|
- CONFIG_NUM_PREEMPT_PRIORITIES=0
|
||||||
- CONFIG_NUM_COOP_PRIORITIES=6
|
- CONFIG_NUM_COOP_PRIORITIES=7
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue