pm: device_runtime: Use pm flags for runtime state

Although we are declaring `pm->enable`as bitfield, it ends up using
more memory due memory alignment.

Since we already have an atomic variable for device flags, this commit
adds a new flag to indicates whether or not device runtime is enabled.
Doing it we are saving some extra bits and avoiding need to lock the
mutex in several situations since we can atomically check if pm
runtime is enabled on a given device.

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2021-12-01 15:44:57 -08:00 committed by Anas Nashif
commit 3624b51f24
2 changed files with 9 additions and 18 deletions

View file

@ -37,6 +37,8 @@ enum pm_device_flag {
PM_DEVICE_FLAG_WS_CAPABLE, PM_DEVICE_FLAG_WS_CAPABLE,
/** Indicates if the device is being used as wakeup source. */ /** Indicates if the device is being used as wakeup source. */
PM_DEVICE_FLAG_WS_ENABLED, PM_DEVICE_FLAG_WS_ENABLED,
/** Indicates if device runtime is enabled */
PM_DEVICE_FLAG_RUNTIME_ENABLED,
}; };
/** @endcond */ /** @endcond */
@ -99,8 +101,6 @@ struct pm_device {
const struct device *dev; const struct device *dev;
/** Lock to synchronize the get/put operations */ /** Lock to synchronize the get/put operations */
struct k_mutex lock; struct k_mutex lock;
/** Device pm enable flag */
bool enable : 1;
/** Device usage count */ /** Device usage count */
uint32_t usage; uint32_t usage;
/** Work object for asynchronous calls */ /** Work object for asynchronous calls */

View file

@ -41,7 +41,7 @@ static int runtime_suspend(const struct device *dev, bool async)
(void)k_mutex_lock(&pm->lock, K_FOREVER); (void)k_mutex_lock(&pm->lock, K_FOREVER);
} }
if (!pm->enable) { if ((pm->flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) == 0U) {
ret = -ENOTSUP; ret = -ENOTSUP;
goto unlock; goto unlock;
} }
@ -108,7 +108,7 @@ int pm_device_runtime_get(const struct device *dev)
(void)k_mutex_lock(&pm->lock, K_FOREVER); (void)k_mutex_lock(&pm->lock, K_FOREVER);
} }
if (!pm->enable) { if ((pm->flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) == 0U) {
ret = -ENOTSUP; ret = -ENOTSUP;
goto unlock; goto unlock;
} }
@ -176,7 +176,7 @@ void pm_device_runtime_enable(const struct device *dev)
(void)k_mutex_lock(&pm->lock, K_FOREVER); (void)k_mutex_lock(&pm->lock, K_FOREVER);
} }
if (pm->enable) { if ((pm->flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) != 0U) {
goto unlock; goto unlock;
} }
@ -187,7 +187,7 @@ void pm_device_runtime_enable(const struct device *dev)
k_work_init_delayable(&pm->work, runtime_suspend_work); k_work_init_delayable(&pm->work, runtime_suspend_work);
} }
pm->enable = true; atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
unlock: unlock:
if (!k_is_pre_kernel()) { if (!k_is_pre_kernel()) {
@ -208,7 +208,7 @@ int pm_device_runtime_disable(const struct device *dev)
(void)k_mutex_lock(&pm->lock, K_FOREVER); (void)k_mutex_lock(&pm->lock, K_FOREVER);
} }
if (!pm->enable) { if ((pm->flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED)) == 0U) {
goto unlock; goto unlock;
} }
@ -230,7 +230,7 @@ int pm_device_runtime_disable(const struct device *dev)
pm->state = PM_DEVICE_STATE_ACTIVE; pm->state = PM_DEVICE_STATE_ACTIVE;
} }
pm->enable = false; atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
unlock: unlock:
if (!k_is_pre_kernel()) { if (!k_is_pre_kernel()) {
@ -244,16 +244,7 @@ unlock:
bool pm_device_runtime_is_enabled(const struct device *dev) bool pm_device_runtime_is_enabled(const struct device *dev)
{ {
bool ret = false;
struct pm_device *pm = dev->pm; struct pm_device *pm = dev->pm;
if (!k_is_pre_kernel()) { return atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
ret = pm->enable;
(void)k_mutex_unlock(&pm->lock);
} else {
ret = pm->enable;
}
return ret;
} }