pm: policy: Consider substates for state lock functions

Extend the current pm_policy_state_lock_*() functions to support
substates.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2022-04-07 13:06:12 +02:00 committed by Carles Cufí
commit 69b28bfd07
19 changed files with 145 additions and 55 deletions

View file

@ -216,9 +216,9 @@ disable sleep state 2 while polling:
.. code-block:: c
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
<code that calls uart_poll_in() and expects input at any point in time>
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
References

View file

@ -242,9 +242,9 @@ disable sleep state 2 while polling:
.. code-block:: c
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
<code that calls uart_poll_in() and expects input at any point in time>
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
References

View file

@ -222,9 +222,9 @@ disable sleep state 2 while polling:
.. code-block:: c
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
<code that calls uart_poll_in() and expects input at any point in time>
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
References

View file

@ -101,7 +101,7 @@ static int entropy_cc13xx_cc26xx_get_entropy(const struct device *dev,
unsigned int key = irq_lock();
if (!data->constrained) {
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->constrained = true;
}
irq_unlock(key);
@ -147,7 +147,8 @@ static void entropy_cc13xx_cc26xx_isr(const struct device *dev)
#ifdef CONFIG_PM
if (data->constrained) {
pm_policy_state_lock_put(
PM_STATE_STANDBY);
PM_STATE_STANDBY,
PM_ALL_SUBSTATES);
data->constrained = false;
}
#endif
@ -290,7 +291,7 @@ static int entropy_cc13xx_cc26xx_init(const struct device *dev)
#if defined(CONFIG_PM)
Power_setDependency(PowerCC26XX_PERIPH_TRNG);
/* Stay out of standby until buffer is filled with entropy */
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->constrained = true;
/* Register notification function */
Power_registerNotify(&data->post_notify,

View file

@ -246,7 +246,7 @@ static uint16_t rng_pool_get(struct rng_pool *rngp, uint8_t *buf, uint16_t len)
available = available - len;
if ((available <= rngp->threshold)
&& !LL_RNG_IsEnabledIT(entropy_stm32_rng_data.rng)) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
LL_RNG_EnableIT(entropy_stm32_rng_data.rng);
}
@ -300,7 +300,7 @@ static void stm32_rng_isr(const void *arg)
byte);
if (ret < 0) {
LL_RNG_DisableIT(entropy_stm32_rng_data.rng);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
k_sem_give(&entropy_stm32_rng_data.sem_sync);
@ -512,7 +512,7 @@ static int entropy_stm32_rng_init(const struct device *dev)
* rng pool is being populated. The ISR will release the constraint again
* when the rng pool is filled.
*/
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
LL_RNG_EnableIT(dev_data->rng);

View file

@ -196,7 +196,7 @@ static int i2c_cc13xx_cc26xx_transfer(const struct device *dev,
k_sem_take(&data->lock, K_FOREVER);
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
for (int i = 0; i < num_msgs; i++) {
/* Not supported by hardware */
@ -218,7 +218,7 @@ static int i2c_cc13xx_cc26xx_transfer(const struct device *dev,
}
}
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
k_sem_give(&data->lock);

View file

@ -250,7 +250,7 @@ static void uart_cc13xx_cc26xx_irq_tx_enable(const struct device *dev)
* standby mode instead, since it is the power state that
* would interfere with a transfer.
*/
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->tx_constrained = true;
}
#endif
@ -268,7 +268,7 @@ static void uart_cc13xx_cc26xx_irq_tx_disable(const struct device *dev)
struct uart_cc13xx_cc26xx_data *data = dev->data;
if (data->tx_constrained) {
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->tx_constrained = false;
}
#endif
@ -294,7 +294,7 @@ static void uart_cc13xx_cc26xx_irq_rx_enable(const struct device *dev)
* standby.
*/
if (!data->rx_constrained) {
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->rx_constrained = true;
}
#endif
@ -310,7 +310,7 @@ static void uart_cc13xx_cc26xx_irq_rx_disable(const struct device *dev)
struct uart_cc13xx_cc26xx_data *data = dev->data;
if (data->rx_constrained) {
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
data->rx_constrained = false;
}
#endif

View file

@ -56,7 +56,7 @@ void uart1_wui_isr(const struct device *gpio, struct gpio_callback *cb,
* The pm state of it8xxx2 chip only supports standby, so here we
* can directly set the constraint for standby.
*/
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
k_work_reschedule(&uart_console_data->rx_refresh_timeout_work, delay);
#endif
}
@ -76,7 +76,7 @@ void uart2_wui_isr(const struct device *gpio, struct gpio_callback *cb,
* The pm state of it8xxx2 chip only supports standby, so here we
* can directly set the constraint for standby.
*/
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
k_work_reschedule(&uart_console_data->rx_refresh_timeout_work, delay);
#endif
}
@ -116,7 +116,7 @@ static void uart_it8xxx2_rx_refresh_timeout(struct k_work *work)
{
ARG_UNUSED(work);
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
}
#endif
#endif /* CONFIG_PM_DEVICE */

View file

@ -105,7 +105,7 @@ static void mcux_lpuart_pm_policy_state_lock_get(const struct device *dev)
if (!data->pm_state_lock_on) {
data->pm_state_lock_on = true;
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
@ -115,7 +115,7 @@ static void mcux_lpuart_pm_policy_state_lock_put(const struct device *dev)
if (data->pm_state_lock_on) {
data->pm_state_lock_on = false;
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
#endif /* CONFIG_PM */

View file

@ -64,7 +64,7 @@ static void uart_npcx_pm_policy_state_lock_get(struct uart_npcx_data *data,
enum uart_pm_policy_state_flag flag)
{
if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
@ -72,7 +72,7 @@ static void uart_npcx_pm_policy_state_lock_put(struct uart_npcx_data *data,
enum uart_pm_policy_state_flag flag)
{
if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
#endif /* defined(CONFIG_PM) && defined(CONFIG_UART_INTERRUPT_DRIVEN) */

View file

@ -681,7 +681,7 @@ static void uart_ns16550_irq_tx_enable(const struct device *dev)
* different states.
*/
for (uint8_t i = 0U; i < num_cpu_states; i++) {
pm_policy_state_lock_get(cpu_states[i].state);
pm_policy_state_lock_get(cpu_states[i].state, PM_ALL_SUBSTATES);
}
}
#endif
@ -718,7 +718,7 @@ static void uart_ns16550_irq_tx_disable(const struct device *dev)
* to different states.
*/
for (uint8_t i = 0U; i < num_cpu_states; i++) {
pm_policy_state_lock_put(cpu_states[i].state);
pm_policy_state_lock_put(cpu_states[i].state, PM_ALL_SUBSTATES);
}
}
#endif

View file

@ -78,7 +78,7 @@ static void uart_stm32_pm_policy_state_lock_get(const struct device *dev)
if (!data->pm_policy_state_on) {
data->pm_policy_state_on = true;
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
@ -88,7 +88,7 @@ static void uart_stm32_pm_policy_state_lock_put(const struct device *dev)
if (data->pm_policy_state_on) {
data->pm_policy_state_on = false;
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE);
pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
}
}
#endif /* CONFIG_PM */

View file

@ -141,7 +141,7 @@ static int spi_cc13xx_cc26xx_transceive(const struct device *dev,
int err;
spi_context_lock(ctx, false, NULL, config);
pm_policy_state_lock_get(PM_STATE_STANDBY);
pm_policy_state_lock_get(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
err = spi_cc13xx_cc26xx_configure(dev, config);
if (err) {
@ -175,7 +175,7 @@ static int spi_cc13xx_cc26xx_transceive(const struct device *dev,
spi_context_cs_control(ctx, false);
done:
pm_policy_state_lock_put(PM_STATE_STANDBY);
pm_policy_state_lock_put(PM_STATE_STANDBY, PM_ALL_SUBSTATES);
spi_context_release(ctx, err);
return err;
}

View file

@ -59,6 +59,9 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks);
/** @endcond */
/** Special value for 'all substates'. */
#define PM_ALL_SUBSTATES (UINT8_MAX)
#if defined(CONFIG_PM) || defined(__DOXYGEN__)
/**
* @brief Increase a power state lock counter.
@ -73,29 +76,35 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks);
* with PM_STATE_ACTIVE will have no effect.
*
* @param state Power state.
* @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
* substates in the given power state.
*
* @see pm_policy_state_lock_put()
*/
void pm_policy_state_lock_get(enum pm_state state);
void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id);
/**
* @brief Decrease a power state lock counter.
*
* @param state Power state.
* @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
* substates in the given power state.
*
* @see pm_policy_state_lock_get()
*/
void pm_policy_state_lock_put(enum pm_state state);
void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id);
/**
* @brief Check if a power state lock is active (not allowed).
*
* @param state Power state.
* @param substate_id Power substate ID. Use PM_ALL_SUBSTATES to affect all the
* substates in the given power state.
*
* @retval true if power state lock is active.
* @retval false if power state lock is not active.
*/
bool pm_policy_state_lock_is_active(enum pm_state state);
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id);
/**
* @brief Add a new latency requirement.
@ -132,19 +141,22 @@ void pm_policy_latency_request_remove(struct pm_policy_latency_request *req);
*/
void pm_policy_latency_changed(pm_policy_latency_changed_cb_t cb);
#else
static inline void pm_policy_state_lock_get(enum pm_state state)
static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
{
ARG_UNUSED(state);
ARG_UNUSED(substate_id);
}
static inline void pm_policy_state_lock_put(enum pm_state state)
static inline void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
{
ARG_UNUSED(state);
ARG_UNUSED(substate_id);
}
static inline bool pm_policy_state_lock_is_active(enum pm_state state)
static inline bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
{
ARG_UNUSED(state);
ARG_UNUSED(substate_id);
return false;
}

View file

@ -31,7 +31,7 @@ static int disable_ds_1(const struct device *dev)
{
ARG_UNUSED(dev);
pm_policy_state_lock_get(PM_STATE_SOFT_OFF);
pm_policy_state_lock_get(PM_STATE_SOFT_OFF, PM_ALL_SUBSTATES);
return 0;
}

View file

@ -15,8 +15,30 @@
#include <sys/atomic.h>
#include <toolchain.h>
/** State lock reference counting */
static atomic_t state_lock_cnt[PM_STATE_COUNT];
#define DT_SUB_LOCK_INIT(node_id) \
{ .state = PM_STATE_DT_INIT(node_id), \
.substate_id = DT_PROP_OR(node_id, substate_id, 0), \
.lock = ATOMIC_INIT(0), \
},
/**
* State and substate lock structure.
*
* This struct is associating a reference counting to each <state,substate>
* couple to be used with the pm_policy_substate_lock_* functions.
*
* Operations on this array are in the order of O(n) with the number of power
* states and this is mostly due to the random nature of the substate value
* (that can be anything from a small integer value to a bitmask). We can
* probably do better with an hashmap.
*/
static struct {
enum pm_state state;
uint8_t substate_id;
atomic_t lock;
} substate_lock_t[] = {
DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
};
/** Lock to synchronize access to the latency request list. */
static struct k_spinlock latency_lock;
@ -68,7 +90,8 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
const struct pm_state_info *state = &cpu_states[i];
uint32_t min_residency, exit_latency;
if (pm_policy_state_lock_is_active(state->state)) {
/* check if there is a lock on state + substate */
if (pm_policy_state_lock_is_active(state->state, state->substate_id)) {
continue;
}
@ -91,23 +114,43 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks)
}
#endif
void pm_policy_state_lock_get(enum pm_state state)
void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
{
atomic_inc(&state_lock_cnt[state]);
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_inc(&substate_lock_t[i].lock);
}
}
}
void pm_policy_state_lock_put(enum pm_state state)
void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
{
atomic_t cnt = atomic_dec(&state_lock_cnt[state]);
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
atomic_t cnt = atomic_dec(&substate_lock_t[i].lock);
ARG_UNUSED(cnt);
ARG_UNUSED(cnt);
__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
__ASSERT(cnt >= 1, "Unbalanced state lock get/put");
}
}
}
bool pm_policy_state_lock_is_active(enum pm_state state)
bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
{
return (atomic_get(&state_lock_cnt[state]) != 0);
for (size_t i = 0; i < ARRAY_SIZE(substate_lock_t); i++) {
if (substate_lock_t[i].state == state &&
(substate_lock_t[i].substate_id == substate_id ||
substate_id == PM_ALL_SUBSTATES)) {
return (atomic_get(&substate_lock_t[i].lock) != 0);
}
}
return false;
}
void pm_policy_latency_request_add(struct pm_policy_latency_request *req,

View file

@ -18,6 +18,7 @@
power-state-name = "runtime-idle";
min-residency-us = <100000>;
exit-latency-us = <10000>;
substate-id = <1>;
};
state1: state1 {
@ -25,6 +26,7 @@
power-state-name = "suspend-to-ram";
min-residency-us = <1000000>;
exit-latency-us = <100000>;
substate-id = <10>;
};
state2: state2 {
@ -32,6 +34,7 @@
power-state-name = "suspend-to-ram";
min-residency-us = <500000>;
exit-latency-us = <50000>;
substate-id = <100>;
};
};
};

View file

@ -69,7 +69,7 @@ static void test_pm_policy_next_state_default_allowed(void)
/* initial state: PM_STATE_RUNTIME_IDLE allowed
* next state: PM_STATE_RUNTIME_IDLE
*/
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
zassert_false(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));
@ -78,9 +78,9 @@ static void test_pm_policy_next_state_default_allowed(void)
/* disallow PM_STATE_RUNTIME_IDLE
* next state: NULL (active)
*/
pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE);
pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
zassert_true(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));
@ -89,9 +89,40 @@ static void test_pm_policy_next_state_default_allowed(void)
/* allow PM_STATE_RUNTIME_IDLE again
* next state: PM_STATE_RUNTIME_IDLE
*/
pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE);
pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES);
zassert_false(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));
zassert_equal(next->state, PM_STATE_RUNTIME_IDLE, NULL);
/* initial state: PM_STATE_RUNTIME_IDLE and substate 1 allowed
* next state: PM_STATE_RUNTIME_IDLE
*/
pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, 1);
zassert_false(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));
zassert_equal(next->state, PM_STATE_RUNTIME_IDLE, NULL);
/* disallow PM_STATE_RUNTIME_IDLE and substate 1
* next state: NULL (active)
*/
pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, 1);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, 1);
zassert_true(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));
zassert_equal(next, NULL, NULL);
/* allow PM_STATE_RUNTIME_IDLE and substate 1 again
* next state: PM_STATE_RUNTIME_IDLE
*/
pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, 1);
active = pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, 1);
zassert_false(active, NULL);
next = pm_policy_next_state(0U, k_us_to_ticks_floor32(110000));

View file

@ -142,7 +142,7 @@ manifest:
groups:
- hal
- name: hal_ti
revision: 2a5891d874cae92fcac04f9822957ac0785fe835
revision: a29a32b806a0a6380b785b950cb18229025ed699
path: modules/hal/ti
groups:
- hal