pm: Add option to quickly detect power state availability

When all states are locked or latency requirement cannot be met
by any power state it is important to be able to quickly exit
suspend procedure because that usually means that application
requires high performance. Add function for detecting if any
power state is available.

Additionally, add function pm_policy_state_is_available for
checking if given state is available which means that it is not
locked and fulfills current latency requirement.

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruściński 2025-04-03 15:31:50 +02:00 committed by Benjamin Cabé
commit 5a313e141d
3 changed files with 121 additions and 27 deletions

View file

@ -137,6 +137,31 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id);
*/
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id);
/**
* @brief Check if a power state is available.
*
* It is unavailable if locked or latency requirement cannot be fulfilled in that state.
*
* @param state Power state.
* @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
* substates in the given power state.
*
* @retval true if power state is active.
* @retval false if power state is not active.
*/
bool pm_policy_state_is_available(enum pm_state state, uint8_t substate_id);
/**
* @brief Check if any power state can be used.
*
* Function allows to quickly check if any power state is available and exit
* suspend operation early.
*
* @retval true if any power state is active.
* @retval false if all power states are unavailable.
*/
bool pm_policy_state_any_active(void);
/**
* @brief Register an event.
*

View file

@ -147,6 +147,11 @@ bool pm_system_suspend(int32_t kernel_ticks)
SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, kernel_ticks);
if (!pm_policy_state_any_active()) {
/* Return early if all states are unavailable. */
return false;
}
/*
* CPU needs to be fully wake up before the event is triggered.
* We need to find out first the ticks to the next event

View file

@ -10,44 +10,56 @@
#include <zephyr/sys/__assert.h>
#include <zephyr/sys/atomic.h>
#include <zephyr/toolchain.h>
#include <zephyr/spinlock.h>
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
#define DT_SUB_LOCK_INIT(node_id) \
{ .state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
.lock = ATOMIC_INIT(0), \
#define DT_SUB_LOCK_INIT(node_id) \
{ .state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
.exit_latency_us = DT_PROP_OR(node_id, exit_latency_us, 0), \
},
/**
* State and substate lock structure.
*
* This struct is associating a reference counting to each <state,substate>
* couple to be used with the pm_policy_substate_lock_* functions.
* Struct holds all power states defined in the device tree. Array with counter
* variables is in RAM and n-th counter is used for n-th power state. Structure
* also holds exit latency for each state. It is used to disable power states
* based on current latency requirement.
*
* Operations on this array are in the order of O(n) with the number of power
* states and this is mostly due to the random nature of the substate value
* (that can be anything from a small integer value to a bitmask). We can
* probably do better with an hashmap.
*/
static struct {
static const struct {
enum pm_state state;
uint8_t substate_id;
atomic_t lock;
} substate_lock_t[] = {
uint32_t exit_latency_us;
} substates[] = {
DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
};
static atomic_t lock_cnt[ARRAY_SIZE(substates)];
static atomic_t latency_mask = BIT_MASK(ARRAY_SIZE(substates));
static atomic_t unlock_mask = BIT_MASK(ARRAY_SIZE(substates));
static struct k_spinlock lock;
#endif
void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_inc(&substate_lock_t[i].lock);
for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
if (substates[i].state == state &&
(substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
k_spinlock_key_t key = k_spin_lock(&lock);
if (lock_cnt[i] == 0) {
unlock_mask &= ~BIT(i);
}
lock_cnt[i]++;
k_spin_unlock(&lock, key);
}
}
#endif
@ -56,15 +68,17 @@ void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_t cnt = atomic_dec(&substate_lock_t[i].lock);
for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
if (substates[i].state == state &&
(substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
k_spinlock_key_t key = k_spin_lock(&lock);
ARG_UNUSED(cnt);
__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
__ASSERT(lock_cnt[i] > 0, "Unbalanced state lock get/put");
lock_cnt[i]--;
if (lock_cnt[i] == 0) {
unlock_mask |= BIT(i);
}
k_spin_unlock(&lock, key);
}
}
#endif
@ -73,14 +87,64 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
return (atomic_get(&substate_lock_t[i].lock) != 0);
for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
if (substates[i].state == state &&
(substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
return atomic_get(&lock_cnt[i]) != 0;
}
}
#endif
return false;
}
bool pm_policy_state_is_available(enum pm_state state, uint8_t substate_id)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
if (substates[i].state == state &&
(substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
return (atomic_get(&lock_cnt[i]) == 0) &&
(atomic_get(&latency_mask) & BIT(i));
}
}
#endif
return false;
}
bool pm_policy_state_any_active(void)
{
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
/* Check if there is any power state that is not locked and not disabled due
* to latency requirements.
*/
return atomic_get(&unlock_mask) & atomic_get(&latency_mask);
#endif
return true;
}
#if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
/* Callback is called whenever latency requirement changes. It is called under lock. */
static void pm_policy_latency_update_locked(int32_t max_latency_us)
{
for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
if (substates[i].exit_latency_us >= max_latency_us) {
latency_mask &= ~BIT(i);
} else {
latency_mask |= BIT(i);
}
}
}
static int pm_policy_latency_init(void)
{
static struct pm_policy_latency_subscription sub;
pm_policy_latency_changed_subscribe(&sub, pm_policy_latency_update_locked);
return 0;
}
SYS_INIT(pm_policy_latency_init, PRE_KERNEL_1, 0);
#endif