pm: device_runtime: Get rid of atomic for state
Since we are using mutex to protect critical sections and mutexes are reentrant, it is possible to get rid of atomic for the state because we can lock the mutex in device_pm_callback. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
69a34e5947
commit
d325642892
3 changed files with 14 additions and 14 deletions
|
@ -114,7 +114,7 @@ struct pm_device {
|
||||||
/** Device usage count */
|
/** Device usage count */
|
||||||
uint32_t usage;
|
uint32_t usage;
|
||||||
/** Device idle internal power state */
|
/** Device idle internal power state */
|
||||||
atomic_t state;
|
uint8_t state;
|
||||||
/** Work object for asynchronous calls */
|
/** Work object for asynchronous calls */
|
||||||
struct k_work_delayable work;
|
struct k_work_delayable work;
|
||||||
/** Event conditional var to listen to the sync request events */
|
/** Event conditional var to listen to the sync request events */
|
||||||
|
|
|
@ -33,7 +33,7 @@ static int dummy_open(const struct device *dev)
|
||||||
|
|
||||||
(void) pm_device_wait(dev, K_FOREVER);
|
(void) pm_device_wait(dev, K_FOREVER);
|
||||||
|
|
||||||
if (atomic_get(&dev->pm->state) == PM_DEVICE_STATE_ACTIVE) {
|
if (dev->pm->state == PM_DEVICE_STATE_ACTIVE) {
|
||||||
printk("Dummy device resumed\n");
|
printk("Dummy device resumed\n");
|
||||||
ret = 0;
|
ret = 0;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -24,7 +24,9 @@ static void device_pm_callback(const struct device *dev,
|
||||||
{
|
{
|
||||||
__ASSERT(retval == 0, "Device set power state failed");
|
__ASSERT(retval == 0, "Device set power state failed");
|
||||||
|
|
||||||
atomic_set(&dev->pm->state, *state);
|
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
||||||
|
dev->pm->state = *state;
|
||||||
|
(void)k_mutex_unlock(&dev->pm->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function returns the number of woken threads on success. There
|
* This function returns the number of woken threads on success. There
|
||||||
|
@ -42,11 +44,10 @@ static void pm_work_handler(struct k_work *work)
|
||||||
|
|
||||||
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
||||||
|
|
||||||
switch (atomic_get(&dev->pm->state)) {
|
switch (dev->pm->state) {
|
||||||
case PM_DEVICE_STATE_ACTIVE:
|
case PM_DEVICE_STATE_ACTIVE:
|
||||||
if ((dev->pm->usage == 0) && dev->pm->enable) {
|
if ((dev->pm->usage == 0) && dev->pm->enable) {
|
||||||
atomic_set(&dev->pm->state,
|
dev->pm->state = PM_DEVICE_STATE_SUSPENDING;
|
||||||
PM_DEVICE_STATE_SUSPENDING);
|
|
||||||
ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPEND,
|
ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPEND,
|
||||||
device_pm_callback, NULL);
|
device_pm_callback, NULL);
|
||||||
} else {
|
} else {
|
||||||
|
@ -55,8 +56,7 @@ static void pm_work_handler(struct k_work *work)
|
||||||
break;
|
break;
|
||||||
case PM_DEVICE_STATE_SUSPEND:
|
case PM_DEVICE_STATE_SUSPEND:
|
||||||
if ((dev->pm->usage > 0) || !dev->pm->enable) {
|
if ((dev->pm->usage > 0) || !dev->pm->enable) {
|
||||||
atomic_set(&dev->pm->state,
|
dev->pm->state = PM_DEVICE_STATE_RESUMING;
|
||||||
PM_DEVICE_STATE_RESUMING);
|
|
||||||
ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE,
|
ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE,
|
||||||
device_pm_callback, NULL);
|
device_pm_callback, NULL);
|
||||||
} else {
|
} else {
|
||||||
|
@ -159,8 +159,8 @@ static int pm_device_request(const struct device *dev,
|
||||||
* may not have been properly changed to the target_state or another
|
* may not have been properly changed to the target_state or another
|
||||||
* thread we check it here before returning.
|
* thread we check it here before returning.
|
||||||
*/
|
*/
|
||||||
ret = target_state == atomic_get(&dev->pm->state) ? 0 : -EIO;
|
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
||||||
goto out;
|
ret = target_state == dev->pm->state ? 0 : -EIO;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
(void)k_mutex_unlock(&dev->pm->lock);
|
(void)k_mutex_unlock(&dev->pm->lock);
|
||||||
|
@ -198,7 +198,7 @@ void pm_device_enable(const struct device *dev)
|
||||||
dev->pm->dev = dev;
|
dev->pm->dev = dev;
|
||||||
if (dev->pm_control != NULL) {
|
if (dev->pm_control != NULL) {
|
||||||
dev->pm->enable = true;
|
dev->pm->enable = true;
|
||||||
atomic_set(&dev->pm->state, PM_DEVICE_STATE_SUSPEND);
|
dev->pm->state = PM_DEVICE_STATE_SUSPEND;
|
||||||
k_work_init_delayable(&dev->pm->work, pm_work_handler);
|
k_work_init_delayable(&dev->pm->work, pm_work_handler);
|
||||||
}
|
}
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -218,7 +218,7 @@ void pm_device_enable(const struct device *dev)
|
||||||
*/
|
*/
|
||||||
if (!dev->pm->dev) {
|
if (!dev->pm->dev) {
|
||||||
dev->pm->dev = dev;
|
dev->pm->dev = dev;
|
||||||
atomic_set(&dev->pm->state, PM_DEVICE_STATE_SUSPEND);
|
dev->pm->state = PM_DEVICE_STATE_SUSPEND;
|
||||||
k_work_init_delayable(&dev->pm->work, pm_work_handler);
|
k_work_init_delayable(&dev->pm->work, pm_work_handler);
|
||||||
} else {
|
} else {
|
||||||
k_work_schedule(&dev->pm->work, K_NO_WAIT);
|
k_work_schedule(&dev->pm->work, K_NO_WAIT);
|
||||||
|
@ -252,8 +252,8 @@ int pm_device_wait(const struct device *dev, k_timeout_t timeout)
|
||||||
|
|
||||||
k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
k_mutex_lock(&dev->pm->lock, K_FOREVER);
|
||||||
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
|
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
|
||||||
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_SUSPENDING) ||
|
(dev->pm->state == PM_DEVICE_STATE_SUSPENDING) ||
|
||||||
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_RESUMING)) {
|
(dev->pm->state == PM_DEVICE_STATE_RESUMING)) {
|
||||||
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
|
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
|
||||||
timeout);
|
timeout);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue