diff --git a/include/kernel.h b/include/kernel.h index 419a1b84106..6db33e6bb61 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -466,6 +466,10 @@ struct _thread_base { u16_t preempt; }; +#ifdef CONFIG_SCHED_DEADLINE + int prio_deadline; +#endif + u32_t order_key; #ifdef CONFIG_SMP @@ -1000,6 +1004,38 @@ __syscall int k_thread_priority_get(k_tid_t thread); */ __syscall void k_thread_priority_set(k_tid_t thread, int prio); + +#ifdef CONFIG_SCHED_DEADLINE +/** + * @brief Set deadline expiration time for scheduler + * + * This sets the "deadline" expiration as a time delta from the + * current time, in the same units used by k_cycle_get_32(). The + * scheduler (when deadline scheduling is enabled) will choose the + * next expiring thread when selecting between threads at the same + * static priority. Threads at different priorities will be scheduled + * according to their static priority. + * + * @note Deadlines that are negative (i.e. in the past) are still seen + * as higher priority than others, even if the thread has "finished" + * its work. If you don't want it scheduled anymore, you have to + * reset the deadline into the future, block/pend the thread, or + * modify its priority with k_thread_priority_set(). + * + * @note Despite the API naming, the scheduler makes no guarantees the + * the thread WILL be scheduled within that deadline, nor does it take + * extra metadata (like e.g. the "runtime" and "period" parameters in + * Linux sched_setattr()) that allows the kernel to validate the + * scheduling for achievability. Such features could be implemented + * above this call, which is simply input to the priority selection + * logic. + * + * @param thread A thread on which to set the deadline + * @param deadline A time delta, in cycle units + */ +__syscall void k_thread_deadline_set(k_tid_t thread, int deadline); +#endif + /** * @brief Suspend a thread. * diff --git a/kernel/Kconfig b/kernel/Kconfig index 4694c8bb636..18ff7af6e83 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -122,6 +122,18 @@ config NUM_METAIRQ_PRIORITIES for cooperative threads, this tool probably shouldn't be used from application code. +config SCHED_DEADLINE + bool + prompt "Enable earliest-deadline-first scheduling" + default n + help + This enables a simple "earliest deadline first" scheduling + mode where threads can set "deadline" deltas measured in + k_cycle_get_32() units. Priority decisions within (!!) a + single priority will choose the next expiring deadline and + not simply the least recently added thread. + + config MAIN_STACK_SIZE int prompt "Size of stack for initialization and main thread" diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 65d280450f2..4cfc3af722f 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -213,11 +213,7 @@ static inline int _is_prio_lower_or_equal(int prio1, int prio2) return _is_prio1_lower_than_or_equal_to_prio2(prio1, prio2); } -static inline int _is_t1_higher_prio_than_t2(struct k_thread *t1, - struct k_thread *t2) -{ - return _is_prio1_higher_than_prio2(t1->base.prio, t2->base.prio); -} +int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2); static inline int _is_valid_prio(int prio, void *entry_point) { diff --git a/kernel/sched.c b/kernel/sched.c index 729716e1a8c..c7fa3bb3884 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -87,6 +87,31 @@ s32_t _ms_to_ticks(s32_t ms) } #endif +int _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2) +{ + if (t1->base.prio < t2->base.prio) { + return 1; + } + +#ifdef CONFIG_SCHED_DEADLINE + /* Note that we don't care about wraparound conditions. The + * expectation is that the application will have arranged to + * block the threads, change their priorities or reset their + * deadlines when the job is complete. Letting the deadlines + * go negative is fine and in fact prevents aliasing bugs. + */ + if (t1->base.prio == t2->base.prio) { + int now = (int) k_cycle_get_32(); + int dt1 = t1->base.prio_deadline - now; + int dt2 = t2->base.prio_deadline - now; + + return dt1 < dt2; + } +#endif + + return 0; +} + static struct k_thread *next_up(void) { #ifndef CONFIG_SMP @@ -607,6 +632,36 @@ Z_SYSCALL_HANDLER(k_thread_priority_set, thread_p, prio) } #endif +#ifdef CONFIG_SCHED_DEADLINE +void _impl_k_thread_deadline_set(k_tid_t tid, int deadline) +{ + struct k_thread *th = tid; + + LOCKED(&sched_lock) { + th->base.prio_deadline = k_cycle_get_32() + deadline; + if (_is_thread_queued(th)) { + _priq_run_remove(&_kernel.ready_q.runq, th); + _priq_run_add(&_kernel.ready_q.runq, th); + } + } +} + +#ifdef CONFIG_USERSPACE +Z_SYSCALL_HANDLER(k_thread_deadline_set, thread_p, deadline) +{ + struct k_thread *thread = (struct k_thread *)thread_p; + + Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD)); + Z_OOPS(Z_SYSCALL_VERIFY_MSG(deadline > 0, + "invalid thread deadline %d", + (int)deadline)); + + _impl_k_thread_deadline_set((k_tid_t)thread, deadline); + return 0; +} +#endif +#endif + void _impl_k_yield(void) { __ASSERT(!_is_in_isr(), ""); diff --git a/kernel/thread.c b/kernel/thread.c index cdc074b4315..827a984fa51 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -313,6 +313,9 @@ void _setup_new_thread(struct k_thread *new_thread, if (options & K_INHERIT_PERMS) { _thread_perms_inherit(_current, new_thread); } +#endif +#ifdef CONFIG_SCHED_DEADLINE + new_thread->base.prio_deadline = 0; #endif new_thread->resource_pool = _current->resource_pool; }