diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 0af01cd3752..237cfc9f5f1 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -57,6 +57,7 @@ struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q, struct k_thread *from); void idle(void *a, void *b, void *c); void z_time_slice(int ticks); +void z_reset_time_slice(void); void z_sched_abort(struct k_thread *thread); void z_sched_ipi(void); diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 0d595a87ec1..7991be97a29 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -64,6 +64,10 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, new_thread = z_get_next_ready_thread(); if (new_thread != old_thread) { +#ifdef CONFIG_TIMESLICING + z_reset_time_slice(); +#endif + old_thread->swap_retval = -EAGAIN; #ifdef CONFIG_SMP diff --git a/kernel/sched.c b/kernel/sched.c index 4d5990fd5b3..f682899b3a6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -249,7 +249,7 @@ static int slice_max_prio; static struct k_thread *pending_current; #endif -static void reset_time_slice(void) +void z_reset_time_slice(void) { /* Add the elapsed time since the last announced tick to the * slice count, as we'll see those "expired" ticks arrive in a @@ -267,7 +267,7 @@ void k_sched_time_slice_set(s32_t slice, int prio) _current_cpu->slice_ticks = 0; slice_time = z_ms_to_ticks(slice); slice_max_prio = prio; - reset_time_slice(); + z_reset_time_slice(); } } @@ -284,7 +284,7 @@ void z_time_slice(int ticks) { #ifdef CONFIG_SWAP_NONATOMIC if (pending_current == _current) { - reset_time_slice(); + z_reset_time_slice(); return; } pending_current = NULL; @@ -293,7 +293,7 @@ void z_time_slice(int ticks) if (slice_time && sliceable(_current)) { if (ticks >= _current_cpu->slice_ticks) { z_move_thread_to_end_of_prio_q(_current); - reset_time_slice(); + z_reset_time_slice(); } else { _current_cpu->slice_ticks -= ticks; } @@ -301,8 +301,6 @@ void z_time_slice(int ticks) _current_cpu->slice_ticks = 0; } } -#else -static void reset_time_slice(void) { /* !CONFIG_TIMESLICING */ } #endif static void update_cache(int preempt_ok) @@ -311,9 +309,11 @@ static void update_cache(int preempt_ok) struct k_thread *th = next_up(); if (should_preempt(th, preempt_ok)) { +#ifdef CONFIG_TIMESLICING if (th != _current) { - reset_time_slice(); + z_reset_time_slice(); } +#endif _kernel.ready_q.cache = th; } else { _kernel.ready_q.cache = _current; @@ -342,7 +342,9 @@ void z_add_thread_to_ready_q(struct k_thread *thread) void z_move_thread_to_end_of_prio_q(struct k_thread *thread) { LOCKED(&sched_spinlock) { - _priq_run_remove(&_kernel.ready_q.runq, thread); + if (z_is_thread_queued(thread)) { + _priq_run_remove(&_kernel.ready_q.runq, thread); + } _priq_run_add(&_kernel.ready_q.runq, thread); z_mark_thread_as_queued(thread); update_cache(thread == _current); @@ -609,7 +611,9 @@ void *z_get_next_switch_handle(void *interrupted) struct k_thread *th = next_up(); if (_current != th) { - reset_time_slice(); +#ifdef CONFIG_TIMESLICING + z_reset_time_slice(); +#endif _current_cpu->swap_ok = 0; set_current(th); #ifdef SPIN_VALIDATE