pm: device: access members of pm_device directly

Accessing members from pm_device improves code readability, since it
removes dev-> from most accesses.

Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
This commit is contained in:
Gerard Marull-Paretas 2021-10-13 15:48:22 +02:00 committed by Anas Nashif
commit a78ee67b61
2 changed files with 92 additions and 64 deletions

View file

@ -95,41 +95,42 @@ int pm_device_state_set(const struct device *dev,
{
int ret;
enum pm_device_action action;
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return -ENOSYS;
}
if (atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
return -EBUSY;
}
switch (state) {
case PM_DEVICE_STATE_SUSPENDED:
if (dev->pm->state == PM_DEVICE_STATE_SUSPENDED) {
if (pm->state == PM_DEVICE_STATE_SUSPENDED) {
return -EALREADY;
} else if (dev->pm->state == PM_DEVICE_STATE_OFF) {
} else if (pm->state == PM_DEVICE_STATE_OFF) {
return -ENOTSUP;
}
action = PM_DEVICE_ACTION_SUSPEND;
break;
case PM_DEVICE_STATE_ACTIVE:
if (dev->pm->state == PM_DEVICE_STATE_ACTIVE) {
if (pm->state == PM_DEVICE_STATE_ACTIVE) {
return -EALREADY;
}
action = PM_DEVICE_ACTION_RESUME;
break;
case PM_DEVICE_STATE_LOW_POWER:
if (dev->pm->state == state) {
if (pm->state == state) {
return -EALREADY;
}
action = PM_DEVICE_ACTION_LOW_POWER;
break;
case PM_DEVICE_STATE_OFF:
if (dev->pm->state == state) {
if (pm->state == state) {
return -EALREADY;
}
@ -144,7 +145,7 @@ int pm_device_state_set(const struct device *dev,
return ret;
}
dev->pm->state = state;
pm->state = state;
return 0;
}
@ -152,11 +153,13 @@ int pm_device_state_set(const struct device *dev,
int pm_device_state_get(const struct device *dev,
enum pm_device_state *state)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return -ENOSYS;
}
*state = dev->pm->state;
*state = pm->state;
return 0;
}
@ -169,10 +172,13 @@ bool pm_device_is_any_busy(void)
devc = z_device_get_all_static(&devs);
for (const struct device *dev = devs; dev < (devs + devc); dev++) {
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
continue;
}
if (atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_BUSY)) {
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_BUSY)) {
return true;
}
}
@ -182,37 +188,47 @@ bool pm_device_is_any_busy(void)
bool pm_device_is_busy(const struct device *dev)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return false;
}
return atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_BUSY);
return atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_BUSY);
}
void pm_device_busy_set(const struct device *dev)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return;
}
atomic_set_bit(&dev->pm->flags, PM_DEVICE_FLAG_BUSY);
atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_BUSY);
}
void pm_device_busy_clear(const struct device *dev)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return;
}
atomic_clear_bit(&dev->pm->flags, PM_DEVICE_FLAG_BUSY);
atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_BUSY);
}
bool pm_device_wakeup_enable(struct device *dev, bool enable)
{
atomic_val_t flags, new_flags;
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return false;
}
flags = atomic_get(&dev->pm->flags);
flags = atomic_get(&pm->flags);
if ((flags & BIT(PM_DEVICE_FLAGS_WS_CAPABLE)) == 0U) {
return false;
@ -225,23 +241,29 @@ bool pm_device_wakeup_enable(struct device *dev, bool enable)
new_flags = flags & ~BIT(PM_DEVICE_FLAGS_WS_ENABLED);
}
return atomic_cas(&dev->pm->flags, flags, new_flags);
return atomic_cas(&pm->flags, flags, new_flags);
}
bool pm_device_wakeup_is_enabled(const struct device *dev)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return false;
}
return atomic_test_bit(&dev->pm->flags,
return atomic_test_bit(&pm->flags,
PM_DEVICE_FLAGS_WS_ENABLED);
}
bool pm_device_wakeup_is_capable(const struct device *dev)
{
struct pm_device *pm = dev->pm;
if (dev->pm_control == NULL) {
return false;
}
return atomic_test_bit(&dev->pm->flags,
return atomic_test_bit(&pm->flags,
PM_DEVICE_FLAGS_WS_CAPABLE);
}

View file

@ -21,16 +21,16 @@ static void pm_device_runtime_state_set(struct pm_device *pm)
int ret = 0;
/* Clear transitioning flags */
atomic_clear_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
switch (dev->pm->state) {
switch (pm->state) {
case PM_DEVICE_STATE_ACTIVE:
if ((dev->pm->usage == 0) && dev->pm->enable) {
if ((pm->usage == 0) && pm->enable) {
ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
}
break;
case PM_DEVICE_STATE_SUSPENDED:
if ((dev->pm->usage > 0) || !dev->pm->enable) {
if ((pm->usage > 0) || !pm->enable) {
ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
}
break;
@ -44,7 +44,7 @@ static void pm_device_runtime_state_set(struct pm_device *pm)
* This function returns the number of woken threads on success. There
* is nothing we can do with this information. Just ignoring it.
*/
(void)k_condvar_broadcast(&dev->pm->condvar);
(void)k_condvar_broadcast(&pm->condvar);
}
static void pm_work_handler(struct k_work *work)
@ -61,6 +61,7 @@ static int pm_device_request(const struct device *dev,
enum pm_device_state state, uint32_t pm_flags)
{
int ret = 0;
struct pm_device *pm = dev->pm;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_request, dev, state);
@ -70,9 +71,9 @@ static int pm_device_request(const struct device *dev,
if (k_is_pre_kernel()) {
if (state == PM_DEVICE_STATE_ACTIVE) {
dev->pm->usage++;
pm->usage++;
} else {
dev->pm->usage--;
pm->usage--;
}
/* If we are being called before the kernel was initialized
@ -86,34 +87,34 @@ static int pm_device_request(const struct device *dev,
* cases, for example, like the pinmux being initialized before
* the gpio. Lets just power on/off the device.
*/
if (dev->pm->usage == 1) {
if (pm->usage == 1) {
(void)pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
} else if (dev->pm->usage == 0) {
} else if (pm->usage == 0) {
(void)pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
}
goto out;
}
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
if (!dev->pm->enable) {
if (!pm->enable) {
ret = -ENOTSUP;
goto out_unlock;
}
if (state == PM_DEVICE_STATE_ACTIVE) {
dev->pm->usage++;
if (dev->pm->usage > 1) {
pm->usage++;
if (pm->usage > 1) {
goto out_unlock;
}
} else {
/* Check if it is already 0 to avoid an underflow */
if (dev->pm->usage == 0) {
if (pm->usage == 0) {
goto out_unlock;
}
dev->pm->usage--;
if (dev->pm->usage > 0) {
pm->usage--;
if (pm->usage > 0) {
goto out_unlock;
}
}
@ -121,31 +122,31 @@ static int pm_device_request(const struct device *dev,
/* Return in case of Async request */
if (pm_flags & PM_DEVICE_ASYNC) {
atomic_set_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
(void)k_work_schedule(&dev->pm->work, K_NO_WAIT);
atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
(void)k_work_schedule(&pm->work, K_NO_WAIT);
goto out_unlock;
}
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
while ((k_work_delayable_is_pending(&pm->work)) ||
atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
ret = k_condvar_wait(&pm->condvar, &pm->lock,
K_FOREVER);
if (ret != 0) {
break;
}
}
pm_device_runtime_state_set(dev->pm);
pm_device_runtime_state_set(pm);
/*
* dev->pm->state was set in pm_device_runtime_state_set(). As the
* pm->state was set in pm_device_runtime_state_set(). As the
* device may not have been properly changed to the state or
* another thread we check it here before returning.
*/
ret = state == dev->pm->state ? 0 : -EIO;
ret = state == pm->state ? 0 : -EIO;
out_unlock:
(void)k_mutex_unlock(&dev->pm->lock);
(void)k_mutex_unlock(&pm->lock);
out:
SYS_PORT_TRACING_FUNC_EXIT(pm, device_request, dev, ret);
return ret;
@ -173,73 +174,78 @@ int pm_device_put_async(const struct device *dev)
void pm_device_enable(const struct device *dev)
{
struct pm_device *pm = dev->pm;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_enable, dev);
if (k_is_pre_kernel()) {
dev->pm->dev = dev;
pm->dev = dev;
if (dev->pm_control != NULL) {
dev->pm->enable = true;
dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
k_work_init_delayable(&dev->pm->work, pm_work_handler);
pm->enable = true;
pm->state = PM_DEVICE_STATE_SUSPENDED;
k_work_init_delayable(&pm->work, pm_work_handler);
}
goto out;
}
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
if (dev->pm_control == NULL) {
dev->pm->enable = false;
pm->enable = false;
goto out_unlock;
}
dev->pm->enable = true;
pm->enable = true;
/* During the driver init, device can set the
* PM state accordingly. For later cases we need
* to check the usage and set the device PM state.
*/
if (!dev->pm->dev) {
dev->pm->dev = dev;
dev->pm->state = PM_DEVICE_STATE_SUSPENDED;
k_work_init_delayable(&dev->pm->work, pm_work_handler);
if (!pm->dev) {
pm->dev = dev;
pm->state = PM_DEVICE_STATE_SUSPENDED;
k_work_init_delayable(&pm->work, pm_work_handler);
} else {
k_work_schedule(&dev->pm->work, K_NO_WAIT);
k_work_schedule(&pm->work, K_NO_WAIT);
}
out_unlock:
(void)k_mutex_unlock(&dev->pm->lock);
(void)k_mutex_unlock(&pm->lock);
out:
SYS_PORT_TRACING_FUNC_EXIT(pm, device_enable, dev);
}
void pm_device_disable(const struct device *dev)
{
struct pm_device *pm = dev->pm;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_disable, dev);
__ASSERT(k_is_pre_kernel() == false, "Device should not be disabled "
"before kernel is initialized");
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
if (dev->pm->enable) {
dev->pm->enable = false;
(void)k_mutex_lock(&pm->lock, K_FOREVER);
if (pm->enable) {
pm->enable = false;
/* Bring up the device before disabling the Idle PM */
k_work_schedule(&dev->pm->work, K_NO_WAIT);
k_work_schedule(&pm->work, K_NO_WAIT);
}
(void)k_mutex_unlock(&dev->pm->lock);
(void)k_mutex_unlock(&pm->lock);
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
}
int pm_device_wait(const struct device *dev, k_timeout_t timeout)
{
int ret = 0;
struct pm_device *pm = dev->pm;
k_mutex_lock(&dev->pm->lock, K_FOREVER);
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
atomic_test_bit(&dev->pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
k_mutex_lock(&pm->lock, K_FOREVER);
while ((k_work_delayable_is_pending(&pm->work)) ||
atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
ret = k_condvar_wait(&pm->condvar, &pm->lock,
timeout);
if (ret != 0) {
break;
}
}
k_mutex_unlock(&dev->pm->lock);
k_mutex_unlock(&pm->lock);
return ret;
}