pm: Use pointers for current and forced power states

Use power state pointers instead of copies which improves performance.

Align power_mgmt_multicore test which was creating pm states in
runtime.

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruściński 2025-04-04 07:35:46 +02:00 committed by Benjamin Cabé
commit ecabcf5db5
4 changed files with 89 additions and 49 deletions

View file

@ -370,6 +370,16 @@ struct pm_state_constraint {
*/ */
uint8_t pm_state_cpu_get_all(uint8_t cpu, const struct pm_state_info **states); uint8_t pm_state_cpu_get_all(uint8_t cpu, const struct pm_state_info **states);
/**
* Get power state structure.
*
* @param cpu CPU index.
* @param state Power state.
* @param substate_id Substate.
*
* @return Pointer to the power state structure or NULL if state is not found.
*/
const struct pm_state_info *pm_state_get(uint8_t cpu, enum pm_state state, uint8_t substate_id);
/** /**
* @} * @}
*/ */
@ -384,6 +394,17 @@ static inline uint8_t pm_state_cpu_get_all(uint8_t cpu, const struct pm_state_in
return 0; return 0;
} }
static inline const struct pm_state_info *pm_state_get(uint8_t cpu,
enum pm_state state,
uint8_t substate_id)
{
ARG_UNUSED(cpu);
ARG_UNUSED(state);
ARG_UNUSED(substate_id);
return NULL;
}
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -31,19 +31,10 @@ static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_NEAR) ? k_us_to_ticks_near32(us) : \ IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_NEAR) ? k_us_to_ticks_near32(us) : \
IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_CEIL) ? k_us_to_ticks_ceil32(us) : \ IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_CEIL) ? k_us_to_ticks_ceil32(us) : \
k_us_to_ticks_floor32(us) k_us_to_ticks_floor32(us)
/*
* Properly initialize cpu power states. Do not make assumptions that
* ACTIVE_STATE is 0
*/
#define CPU_PM_STATE_INIT(_, __) \
{ .state = PM_STATE_ACTIVE }
static struct pm_state_info z_cpus_pm_state[] = {
LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,))
};
static struct pm_state_info z_cpus_pm_forced_state[] = { /* State pointers which are set to NULL indicate ACTIVE state. */
LISTIFY(CONFIG_MP_MAX_NUM_CPUS, CPU_PM_STATE_INIT, (,)) static const struct pm_state_info *z_cpus_pm_state[CONFIG_MP_MAX_NUM_CPUS];
}; static const struct pm_state_info *z_cpus_pm_forced_state[CONFIG_MP_MAX_NUM_CPUS];
static struct k_spinlock pm_forced_state_lock; static struct k_spinlock pm_forced_state_lock;
static struct k_spinlock pm_notifier_lock; static struct k_spinlock pm_notifier_lock;
@ -67,7 +58,7 @@ static inline void pm_state_notify(bool entering_state)
} }
if (callback) { if (callback) {
callback(z_cpus_pm_state[CPU_ID].state); callback(z_cpus_pm_state[CPU_ID]->state);
} }
} }
k_spin_unlock(&pm_notifier_lock, pm_notifier_key); k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
@ -115,19 +106,19 @@ void pm_system_resume(void)
if (atomic_test_and_clear_bit(z_post_ops_required, id)) { if (atomic_test_and_clear_bit(z_post_ops_required, id)) {
#ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
if (atomic_add(&_cpus_active, 1) == 0) { if (atomic_add(&_cpus_active, 1) == 0) {
if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) && if ((z_cpus_pm_state[id]->state != PM_STATE_RUNTIME_IDLE) &&
!z_cpus_pm_state[id].pm_device_disabled) { !z_cpus_pm_state[id]->pm_device_disabled) {
pm_resume_devices(); pm_resume_devices();
} }
} }
#endif #endif
pm_state_exit_post_ops(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id); pm_state_exit_post_ops(z_cpus_pm_state[id]->state,
z_cpus_pm_state[id]->substate_id);
pm_state_notify(false); pm_state_notify(false);
#ifdef CONFIG_SYS_CLOCK_EXISTS #ifdef CONFIG_SYS_CLOCK_EXISTS
sys_clock_idle_exit(); sys_clock_idle_exit();
#endif /* CONFIG_SYS_CLOCK_EXISTS */ #endif /* CONFIG_SYS_CLOCK_EXISTS */
z_cpus_pm_state[id] = (struct pm_state_info){PM_STATE_ACTIVE, 0, false, z_cpus_pm_state[id] = NULL;
0, 0};
} }
} }
@ -138,8 +129,10 @@ bool pm_state_force(uint8_t cpu, const struct pm_state_info *info)
__ASSERT(info->state < PM_STATE_COUNT, __ASSERT(info->state < PM_STATE_COUNT,
"Invalid power state %d!", info->state); "Invalid power state %d!", info->state);
info = pm_state_get(cpu, info->state, info->substate_id);
key = k_spin_lock(&pm_forced_state_lock); key = k_spin_lock(&pm_forced_state_lock);
z_cpus_pm_forced_state[cpu] = *info; z_cpus_pm_forced_state[cpu] = info;
k_spin_unlock(&pm_forced_state_lock, key); k_spin_unlock(&pm_forced_state_lock, key);
return true; return true;
@ -162,45 +155,37 @@ bool pm_system_suspend(int32_t kernel_ticks)
ticks = ticks_expiring_sooner(kernel_ticks, events_ticks); ticks = ticks_expiring_sooner(kernel_ticks, events_ticks);
key = k_spin_lock(&pm_forced_state_lock); key = k_spin_lock(&pm_forced_state_lock);
if (z_cpus_pm_forced_state[id].state != PM_STATE_ACTIVE) { if (z_cpus_pm_forced_state[id] != NULL) {
z_cpus_pm_state[id] = z_cpus_pm_forced_state[id]; z_cpus_pm_state[id] = z_cpus_pm_forced_state[id];
z_cpus_pm_forced_state[id].state = PM_STATE_ACTIVE; z_cpus_pm_forced_state[id] = NULL;
} else { } else {
const struct pm_state_info *info; z_cpus_pm_state[id] = pm_policy_next_state(id, ticks);
info = pm_policy_next_state(id, ticks);
if (info != NULL) {
z_cpus_pm_state[id] = *info;
} else {
z_cpus_pm_state[id].state = PM_STATE_ACTIVE;
}
} }
k_spin_unlock(&pm_forced_state_lock, key); k_spin_unlock(&pm_forced_state_lock, key);
if (z_cpus_pm_state[id].state == PM_STATE_ACTIVE) { if (z_cpus_pm_state[id] == NULL) {
LOG_DBG("No PM operations done."); LOG_DBG("No PM operations done.");
SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, PM_STATE_ACTIVE);
z_cpus_pm_state[id].state);
return false; return false;
} }
#ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
if (atomic_sub(&_cpus_active, 1) == 1) { if (atomic_sub(&_cpus_active, 1) == 1) {
if ((z_cpus_pm_state[id].state != PM_STATE_RUNTIME_IDLE) && if ((z_cpus_pm_state[id]->state != PM_STATE_RUNTIME_IDLE) &&
!z_cpus_pm_state[id].pm_device_disabled) { !z_cpus_pm_state[id]->pm_device_disabled) {
if (!pm_suspend_devices()) { if (!pm_suspend_devices()) {
pm_resume_devices(); pm_resume_devices();
z_cpus_pm_state[id].state = PM_STATE_ACTIVE; z_cpus_pm_state[id] = NULL;
(void)atomic_add(&_cpus_active, 1); (void)atomic_add(&_cpus_active, 1);
SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
z_cpus_pm_state[id].state); PM_STATE_ACTIVE);
return false; return false;
} }
} }
} }
#endif #endif
exit_latency_ticks = EXIT_LATENCY_US_TO_TICKS(z_cpus_pm_state[id].exit_latency_us); exit_latency_ticks = EXIT_LATENCY_US_TO_TICKS(z_cpus_pm_state[id]->exit_latency_us);
if ((exit_latency_ticks > 0) && (ticks != K_TICKS_FOREVER)) { if ((exit_latency_ticks > 0) && (ticks != K_TICKS_FOREVER)) {
/* /*
* We need to set the timer to interrupt a little bit early to * We need to set the timer to interrupt a little bit early to
@ -223,15 +208,16 @@ bool pm_system_suspend(int32_t kernel_ticks)
/* Enter power state */ /* Enter power state */
pm_state_notify(true); pm_state_notify(true);
atomic_set_bit(z_post_ops_required, id); atomic_set_bit(z_post_ops_required, id);
pm_state_set(z_cpus_pm_state[id].state, z_cpus_pm_state[id].substate_id); pm_state_set(z_cpus_pm_state[id]->state, z_cpus_pm_state[id]->substate_id);
pm_stats_stop(); pm_stats_stop();
/* Wake up sequence starts here */ /* Wake up sequence starts here */
pm_stats_update(z_cpus_pm_state[id].state); pm_stats_update(z_cpus_pm_state[id]->state);
pm_system_resume(); pm_system_resume();
k_sched_unlock(); k_sched_unlock();
SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
z_cpus_pm_state[id].state); z_cpus_pm_state[id] ?
z_cpus_pm_state[id]->state : PM_STATE_ACTIVE);
return true; return true;
} }
@ -260,5 +246,9 @@ int pm_notifier_unregister(struct pm_notifier *notifier)
const struct pm_state_info *pm_state_next_get(uint8_t cpu) const struct pm_state_info *pm_state_next_get(uint8_t cpu)
{ {
return &z_cpus_pm_state[cpu]; static const struct pm_state_info active = {
.state = PM_STATE_ACTIVE
};
return z_cpus_pm_state[cpu] ? z_cpus_pm_state[cpu] : &active;
} }

View file

@ -66,3 +66,18 @@ uint8_t pm_state_cpu_get_all(uint8_t cpu, const struct pm_state_info **states)
return states_per_cpu[cpu]; return states_per_cpu[cpu];
} }
const struct pm_state_info *pm_state_get(uint8_t cpu, enum pm_state state, uint8_t substate_id)
{
__ASSERT_NO_MSG(cpu < ARRAY_SIZE(cpus_states));
const struct pm_state_info *states = cpus_states[cpu];
uint8_t cnt = states_per_cpu[cpu];
for (uint8_t i = 0; i < cnt; i++) {
if ((states[i].state == state) && (states[i].substate_id == substate_id)) {
return &states[i];
}
}
return NULL;
}

View file

@ -63,26 +63,40 @@ void pm_state_exit_post_ops(enum pm_state state, uint8_t substate_id)
const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int ticks) const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int ticks)
{ {
static struct pm_state_info info = {}; static const struct pm_state_info states[] = {
{
.state = PM_STATE_ACTIVE
},
{
.state = PM_STATE_RUNTIME_IDLE
},
{
.state = PM_STATE_SUSPEND_TO_IDLE
},
{
.state = PM_STATE_STANDBY
},
};
static const struct pm_state_info *info;
int32_t msecs = k_ticks_to_ms_floor64(ticks); int32_t msecs = k_ticks_to_ms_floor64(ticks);
if (msecs < ACTIVE_MSEC) { if (msecs < ACTIVE_MSEC) {
info.state = PM_STATE_ACTIVE; info = NULL;
} else if (msecs <= IDLE_MSEC) { } else if (msecs <= IDLE_MSEC) {
info.state = PM_STATE_RUNTIME_IDLE; info = &states[1];
} else if (msecs <= SUSPEND_TO_IDLE_MSEC) { } else if (msecs <= SUSPEND_TO_IDLE_MSEC) {
info.state = PM_STATE_SUSPEND_TO_IDLE; info = &states[2];
} else { } else {
if (cpu == 0U) { if (cpu == 0U) {
info.state = PM_STATE_SUSPEND_TO_IDLE; info = &states[2];
} else { } else {
info.state = PM_STATE_STANDBY; info = &states[3];
} }
} }
state_testing[cpu] = info.state; state_testing[cpu] = info ? info->state : PM_STATE_ACTIVE;
return &info; return info;
} }
/* /*