diff --git a/include/zephyr/pm/policy.h b/include/zephyr/pm/policy.h index 6fe91663bb6..caba887805a 100644 --- a/include/zephyr/pm/policy.h +++ b/include/zephyr/pm/policy.h @@ -188,6 +188,8 @@ void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription * will wake up the system at a known time in the future. By registering such * event, the policy manager will be able to decide whether certain power states * are worth entering or not. + * CPU is woken up before the time passed in cycle to prevent the event handling + * latency * * @note It is mandatory to unregister events once they have happened by using * pm_policy_event_unregister(). Not doing so is an API contract violation, @@ -195,21 +197,21 @@ void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription * *far* future, that is, after the cycle counter rollover. * * @param evt Event. - * @param time_us When the event will occur, in microseconds from now. + * @param cycle When the event will occur, in absolute time (cycles). * * @see pm_policy_event_unregister */ -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us); +void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle); /** * @brief Update an event. * * @param evt Event. - * @param time_us When the event will occur, in microseconds from now. + * @param cycle When the event will occur, in absolute time (cycles). * * @see pm_policy_event_register */ -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t time_us); +void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle); /** * @brief Unregister an event. @@ -246,6 +248,14 @@ void pm_policy_device_power_lock_get(const struct device *dev); */ void pm_policy_device_power_lock_put(const struct device *dev); +/** + * @brief Returns the ticks until the next event + * + * If an event is registred, it will return the number of ticks until the next event as + * a positive or zero value. Otherwise it returns -1 + */ +int32_t pm_policy_next_event_ticks(void); + #else static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id) { @@ -287,18 +297,16 @@ static inline void pm_policy_latency_request_remove( ARG_UNUSED(req); } -static inline void pm_policy_event_register(struct pm_policy_event *evt, - uint32_t time_us) +static inline void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle) { ARG_UNUSED(evt); - ARG_UNUSED(time_us); + ARG_UNUSED(cycle); } -static inline void pm_policy_event_update(struct pm_policy_event *evt, - uint32_t time_us) +static inline void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle) { ARG_UNUSED(evt); - ARG_UNUSED(time_us); + ARG_UNUSED(cycle); } static inline void pm_policy_event_unregister(struct pm_policy_event *evt) @@ -316,6 +324,11 @@ static inline void pm_policy_device_power_lock_put(const struct device *dev) ARG_UNUSED(dev); } +static inline int32_t pm_policy_next_event_ticks(void) +{ + return -1; +} + #endif /* CONFIG_PM */ /** diff --git a/subsys/pm/pm.c b/subsys/pm/pm.c index 25e60b68a6d..b58caaa2b78 100644 --- a/subsys/pm/pm.c +++ b/subsys/pm/pm.c @@ -68,6 +68,29 @@ static inline void pm_state_notify(bool entering_state) k_spin_unlock(&pm_notifier_lock, pm_notifier_key); } +static inline int32_t ticks_expiring_sooner(int32_t ticks1, int32_t ticks2) +{ + /* + * Ticks are relative numbers that defines the number of ticks + * until the next event. + * Its maximum value is K_TICKS_FOREVER ((uint32_t)-1) which is -1 + * when we cast it to (int32_t) + * We need to find out which one is the closest + */ + + __ASSERT(ticks1 >= -1, "ticks1 has unexpected negative value"); + __ASSERT(ticks2 >= -1, "ticks2 has unexpected negative value"); + + if (ticks1 == K_TICKS_FOREVER) { + return ticks2; + } + if (ticks2 == K_TICKS_FOREVER) { + return ticks1; + } + /* At this step ticks1 and ticks2 are positive */ + return MIN(ticks1, ticks2); +} + void pm_system_resume(void) { uint8_t id = _current_cpu->id; @@ -117,12 +140,20 @@ bool pm_state_force(uint8_t cpu, const struct pm_state_info *info) return true; } -bool pm_system_suspend(int32_t ticks) +bool pm_system_suspend(int32_t kernel_ticks) { uint8_t id = _current_cpu->id; k_spinlock_key_t key; + int32_t ticks, events_ticks; - SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, ticks); + SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, kernel_ticks); + + /* + * CPU needs to be fully wake up before the event is triggered. + * We need to find out first the ticks to the next event + */ + events_ticks = pm_policy_next_event_ticks(); + ticks = ticks_expiring_sooner(kernel_ticks, events_ticks); key = k_spin_lock(&pm_forced_state_lock); if (z_cpus_pm_forced_state[id].state != PM_STATE_ACTIVE) { diff --git a/subsys/pm/policy.c b/subsys/pm/policy.c index ecb08233af0..b13017f1603 100644 --- a/subsys/pm/policy.c +++ b/subsys/pm/policy.c @@ -143,8 +143,8 @@ static sys_slist_t latency_subs; static struct k_spinlock events_lock; /** List of events. */ static sys_slist_t events_list; -/** Next event, in absolute cycles (<0: none, [0, UINT32_MAX]: cycles) */ -static int64_t next_event_cyc = -1; +/** Pointer to Next Event. */ +static struct pm_policy_event *next_event; /** @brief Update maximum allowed latency. */ static void update_max_latency(void) @@ -182,6 +182,9 @@ static void update_next_event(uint32_t cyc) int64_t new_next_event_cyc = -1; struct pm_policy_event *evt; + /* unset the next event pointer */ + next_event = NULL; + SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) { uint64_t cyc_evt = evt->value_cyc; @@ -199,18 +202,26 @@ static void update_next_event(uint32_t cyc) cyc_evt += (uint64_t)UINT32_MAX + 1U; } - if ((new_next_event_cyc < 0) || - (cyc_evt < new_next_event_cyc)) { + if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) { new_next_event_cyc = cyc_evt; + next_event = evt; } } +} - /* undo padding for events in the [0, cyc) range */ - if (new_next_event_cyc > UINT32_MAX) { - new_next_event_cyc -= (uint64_t)UINT32_MAX + 1U; +int32_t pm_policy_next_event_ticks(void) +{ + int32_t cyc_evt = -1; + + if ((next_event) && (next_event->value_cyc > 0)) { + cyc_evt = next_event->value_cyc - k_cycle_get_32(); + cyc_evt = MAX(0, cyc_evt); + BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, + "HW Cycles per sec should be greater that ticks per sec"); + return k_cyc_to_ticks_floor32(cyc_evt); } - next_event_cyc = new_next_event_cyc; + return -1; } #ifdef CONFIG_PM_POLICY_DEFAULT @@ -232,12 +243,12 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states); - if (next_event_cyc >= 0) { + if ((next_event) && (next_event->value_cyc >= 0)) { uint32_t cyc_curr = k_cycle_get_32(); - int64_t cyc_evt = next_event_cyc - cyc_curr; + int64_t cyc_evt = next_event->value_cyc - cyc_curr; /* event happening after cycle counter max value, pad */ - if (next_event_cyc <= cyc_curr) { + if (next_event->value_cyc <= cyc_curr) { cyc_evt += UINT32_MAX; } @@ -392,13 +403,12 @@ void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us) k_spin_unlock(&events_lock, key); } -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t time_us) +void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle) { k_spinlock_key_t key = k_spin_lock(&events_lock); - uint32_t cyc = k_cycle_get_32(); - evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us); - update_next_event(cyc); + evt->value_cyc = cycle; + update_next_event(k_cycle_get_32()); k_spin_unlock(&events_lock, key); } diff --git a/tests/subsys/pm/policy_api/src/main.c b/tests/subsys/pm/policy_api/src/main.c index 8ea72d7e3f0..bc3564bee8a 100644 --- a/tests/subsys/pm/policy_api/src/main.c +++ b/tests/subsys/pm/policy_api/src/main.c @@ -312,7 +312,7 @@ ZTEST(policy_api, test_pm_policy_events) const struct pm_state_info *next; uint32_t now; - now = k_cyc_to_us_ceil32(k_cycle_get_32()); + now = k_cyc_to_ticks_ceil32(k_cycle_get_32()); /* events: * - 10ms from now (time < runtime idle latency) @@ -323,21 +323,21 @@ ZTEST(policy_api, test_pm_policy_events) * * first event wins, so we must stay active */ - pm_policy_event_register(&evt1, 10000); - pm_policy_event_register(&evt2, 200000); - next = pm_policy_next_state(0U, now + k_us_to_ticks_floor32(2000000)); + pm_policy_event_register(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); + pm_policy_event_register(&evt2, k_ms_to_cyc_floor32(200) + k_cycle_get_32()); + next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); zassert_is_null(next); /* remove first event so second event now wins, meaning we can now enter * runtime idle */ pm_policy_event_unregister(&evt1); - next = pm_policy_next_state(0U, now + k_us_to_ticks_floor32(2000000)); + next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); /* remove second event, now we can enter deepest state */ pm_policy_event_unregister(&evt2); - next = pm_policy_next_state(0U, now + k_us_to_ticks_floor32(2000000)); + next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); zassert_equal(next->state, PM_STATE_SUSPEND_TO_RAM); /* events: @@ -348,15 +348,15 @@ ZTEST(policy_api, test_pm_policy_events) * * system wakeup wins, so we can go up to runtime idle. */ - pm_policy_event_register(&evt1, 2000000); - next = pm_policy_next_state(0U, now + k_us_to_ticks_floor32(200000)); + pm_policy_event_register(&evt1, k_sec_to_cyc_floor32(2) + k_cycle_get_32()); + next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); /* modify event to occur in 10ms, so it now wins system wakeup and * requires to stay awake */ - pm_policy_event_update(&evt1, 10000); - next = pm_policy_next_state(0U, now + k_us_to_ticks_floor32(200000)); + pm_policy_event_update(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); + next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); zassert_is_null(next); pm_policy_event_unregister(&evt1);