From bc6fb65c81c4a23685410c4cdf55df2922f2d209 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Thu, 23 Aug 2018 11:31:50 -0700 Subject: [PATCH] sched: Properly account for timeslicing in tickless mode When adding a new runnable thread in tickless mode, we need to detect whether it will timeslice with the runnable thread and reset the timer, otherwise it won't get any CPU time until the next interrupt fires at some indeterminate time in the future. This fixes the specific bug discussed in #7193, but the broader problem of tickless and timeslicing interacting badly remains. The code as it exists needs some rework to avoid all the #ifdef mess. Note that the patch also moves _ready_thread() from a ksched.h inline to sched.c. Signed-off-by: Andy Ross --- kernel/include/ksched.h | 11 +---------- kernel/sched.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 9983625ad79..b69175de545 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -47,6 +47,7 @@ void *_get_next_switch_handle(void *interrupted); struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, struct k_thread *from); void idle(void *a, void *b, void *c); +void _ready_thread(struct k_thread *thread); /* find which one is the next thread to run */ /* must be called with interrupts locked */ @@ -216,16 +217,6 @@ static inline int _is_valid_prio(int prio, void *entry_point) return 1; } -static inline void _ready_thread(struct k_thread *thread) -{ - if (_is_thread_ready(thread)) { - _add_thread_to_ready_q(thread); - } - - sys_trace_thread_ready(thread); - -} - static inline void _ready_one_thread(_wait_q_t *wq) { struct k_thread *th = _unpend_first_thread(wq); diff --git a/kernel/sched.c b/kernel/sched.c index 108f5102add..f97c0974d59 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -658,6 +658,21 @@ void _update_time_slice_before_swap(void) } #endif /* CONFIG_TIMESLICING */ +void _ready_thread(struct k_thread *thread) +{ + if (_is_thread_ready(thread)) { + _add_thread_to_ready_q(thread); + } + +#if defined(CONFIG_TICKLESS_KERNEL) && !defined(CONFIG_SMP) + if (_is_thread_time_slicing(_get_next_ready_thread())) { + _set_time(_time_slice_duration); + } +#endif + + sys_trace_thread_ready(thread); +} + int _unpend_all(_wait_q_t *waitq) { int need_sched = 0;