diff --git a/kernel/idle.c b/kernel/idle.c index 920d05272cd..7baa462f902 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -37,6 +37,9 @@ void __attribute__((weak)) _sys_soc_resume_from_deep_sleep(void) { } #endif + +#endif /* CONFIG_SYS_POWER_MANAGEMENT */ + /** * * @brief Indicate that kernel is idling in tickless mode @@ -50,15 +53,16 @@ void __attribute__((weak)) _sys_soc_resume_from_deep_sleep(void) */ static void set_kernel_idle_time_in_ticks(s32_t ticks) { +#ifdef CONFIG_SYS_POWER_MANAGEMENT _kernel.idle = ticks; -} -#else -#define set_kernel_idle_time_in_ticks(x) do { } while (false) #endif +} #ifndef CONFIG_SMP -static void sys_power_save_idle(s32_t ticks) +static void sys_power_save_idle(void) { + s32_t ticks = _get_next_timeout_expiry(); + /* The documented behavior of CONFIG_TICKLESS_IDLE_THRESH is * that the system should not enter a tickless idle for * periods less than that. This seems... silly, given that it @@ -66,7 +70,7 @@ static void sys_power_save_idle(s32_t ticks) * API we need to honor... */ #ifdef CONFIG_SYS_CLOCK_EXISTS - z_clock_set_timeout(ticks < IDLE_THRESH ? 1 : ticks, true); + z_set_timeout_expiry((ticks < IDLE_THRESH) ? 1 : ticks, true); #endif set_kernel_idle_time_in_ticks(ticks); @@ -148,7 +152,7 @@ void idle(void *unused1, void *unused2, void *unused3) #else for (;;) { (void)irq_lock(); - sys_power_save_idle(_get_next_timeout_expiry()); + sys_power_save_idle(); IDLE_YIELD_IF_COOP(); } diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h index 6cb6a7cdb62..a8d6428c91f 100644 --- a/kernel/include/timeout_q.h +++ b/kernel/include/timeout_q.h @@ -48,6 +48,8 @@ static inline int _abort_thread_timeout(struct k_thread *thread) s32_t _get_next_timeout_expiry(void); +void z_set_timeout_expiry(s32_t ticks, bool idle); + s32_t z_timeout_remaining(struct _timeout *timeout); #else @@ -57,6 +59,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout); #define _add_thread_timeout(th, to) do {} while (0 && (void *)to && (void *)th) #define _abort_thread_timeout(t) (0) #define _get_next_timeout_expiry() (K_FOREVER) +#define z_set_timeout_expiry(t, i) do {} while (0) #endif diff --git a/kernel/sched.c b/kernel/sched.c index 18adef0dcb6..96fb4985571 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -219,17 +219,13 @@ static int slice_max_prio; */ static void reset_time_slice(void) { - int to = _get_next_timeout_expiry(); - /* Add the elapsed time since the last announced tick to the * slice count, as we'll see those "expired" ticks arrive in a * FUTURE z_time_slice() call. */ _current_cpu->slice_ticks = slice_time + z_clock_elapsed(); - if (to == K_FOREVER || slice_time < to) { - z_clock_set_timeout(slice_time, false); - } + z_set_timeout_expiry(slice_time, false); } void k_sched_time_slice_set(s32_t duration_in_ms, int prio) diff --git a/kernel/timeout.c b/kernel/timeout.c index f97076b5a6f..4ffae501c1a 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -85,7 +85,9 @@ void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks) sys_dlist_append(&timeout_list, &to->node); } - z_clock_set_timeout(_get_next_timeout_expiry(), false); + if (to == first()) { + z_clock_set_timeout(_get_next_timeout_expiry(), false); + } } } @@ -182,6 +184,17 @@ s32_t _get_next_timeout_expiry(void) return ret; } +void z_set_timeout_expiry(s32_t ticks, bool idle) +{ + LOCKED(&timeout_lock) { + int next = _get_next_timeout_expiry(); + + if ((next == K_FOREVER) || (ticks < next)) { + z_clock_set_timeout(ticks, idle); + } + } +} + int k_enable_sys_clock_always_on(void) { int ret = !can_wait_forever;