diff --git a/doc/kernel/threads/scheduling.rst b/doc/kernel/threads/scheduling.rst index 9b4391d9cb7..8cdd0cbdd61 100644 --- a/doc/kernel/threads/scheduling.rst +++ b/doc/kernel/threads/scheduling.rst @@ -175,6 +175,39 @@ becomes the current thread, its non-preemptible status is maintained. Locking out the scheduler is a more efficient way for a preemptible thread to inhibit preemption than changing its priority level to a negative value. +.. _metairq_priorities: + +Meta-IRQ Priorities +=================== + +When enabled (see :option:`CONFIG_NUM_METAIRQ_PRIORITIES`), there is a special +subclass of cooperative priorities at the highest (numerically lowest) +end of the priority space: meta-IRQ threads. These are scheduled +according to their normal priority, but also have the special ability +to preempt all other threads (and other meta-irq threads) at lower +priorities, even if those threads are cooperative and/or have taken a +scheduler lock. + +This behavior makes the act of unblocking a meta-IRQ thread (by any +means, e.g. creating it, calling k_sem_give(), etc.) into the +equivalent of a synchronous system call when done by a lower +priority thread, or an ARM-like "pended IRQ" when done from true +interrupt context. The intent is that this feature will be used to +implement interrupt "bottom half" processing and/or "tasklet" features +in driver subsystems. The thread, once woken, will be guaranteed to +run before the current CPU returns into application code. + +Unlike similar features in other OSes, meta-IRQ threads are true +threads and run on their own stack (which much be allocated normally), +not the per-CPU interrupt stack. Design work to enable the use of the +IRQ stack on supported architectures is pending. + +Note that because this breaks the promise made to cooperative +threads by the Zephyr API (namely that the OS won't schedule other +thread until the current thread deliberately blocks), it should be +used only with great care from application code. These are not simply +very high priority threads and should not be used as such. + .. _thread_sleeping: Thread Sleeping diff --git a/kernel/Kconfig b/kernel/Kconfig index e149440f7b5..4694c8bb636 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -100,6 +100,28 @@ config PRIORITY_CEILING prompt "Priority inheritance ceiling" default 0 +config NUM_METAIRQ_PRIORITIES + int + prompt "Number of very-high priority 'preemptor' threads" + default 0 + help + This defines a set of priorities at the (numerically) lowest + end of the range which have "meta-irq" behavior. Runnable + threads at these priorities will always be scheduled before + threads at lower priorities, EVEN IF those threads are + otherwise cooperative and/or have taken a scheduler lock. + Making such a thread runnable in any way thus has the effect + of "interrupting" the current task and running the meta-irq + thread synchronously, like an exception or system call. The + intent is to use these priorities to implement "interrupt + bottom half" or "tasklet" behavior, allowing driver + subsystems to return from interrupt context but be guaranteed + that user code will not be executed (on the current CPU) + until the remaining work is finished. As this breaks the + "promise" of non-preemptibility granted by the current API + for cooperative threads, this tool probably shouldn't be used + from application code. + config MAIN_STACK_SIZE int prompt "Size of stack for initialization and main thread" diff --git a/kernel/sched.c b/kernel/sched.c index 08e0de166bc..729716e1a8c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -52,6 +52,16 @@ static inline int _is_preempt(struct k_thread *thread) #endif } +static inline int is_metairq(struct k_thread *thread) +{ +#if CONFIG_NUM_METAIRQ_PRIORITIES > 0 + return (thread->base.prio - K_HIGHEST_THREAD_PRIO) + < CONFIG_NUM_METAIRQ_PRIORITIES; +#else + return 0; +#endif +} + static inline int _is_thread_dummy(struct k_thread *thread) { return !!(thread->base.thread_state & _THREAD_DUMMY); @@ -90,6 +100,7 @@ static struct k_thread *next_up(void) return th ? th : _current_cpu->idle_thread; #else + /* Under SMP, the "cache" mechanism for selecting the next * thread doesn't work, so we have more work to do to test * _current against the best choice from the queue. @@ -108,7 +119,8 @@ static struct k_thread *next_up(void) * queue and something better is available (c.f. timeslicing, * yield) */ - if (active && !queued && !_is_t1_higher_prio_than_t2(th, _current)) { + if (active && !queued && !_is_t1_higher_prio_than_t2(th, _current) + && !is_metairq(th)) { th = _current; } @@ -135,7 +147,7 @@ static void update_cache(int preempt_ok) /* Don't preempt cooperative threads unless the caller allows * it (i.e. k_yield()) */ - if (!preempt_ok && !_is_preempt(_current)) { + if (!preempt_ok && !_is_preempt(_current) && !is_metairq(th)) { th = _current; } }