pm: device_runtime: get rid of the spinlock

Protect critical sections using the mutex.
The mutex is required to use the conditional variable and since we
need to atomically check the pm state and the workqueue before wait
the condition, it is necessary to protect them using the same mutex.

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2021-05-20 14:47:04 -07:00 committed by Anas Nashif
commit 97281b3862
3 changed files with 14 additions and 23 deletions

View file

@ -105,7 +105,7 @@ struct pm_device {
/** Pointer to the device */
const struct device *dev;
/** Lock to synchronize the get/put operations */
struct k_spinlock lock;
struct k_mutex lock;
/* Following are packed fields protected by #lock. */
/** Device pm enable flag */
bool enable : 1;
@ -119,8 +119,6 @@ struct pm_device {
struct k_work_delayable work;
/** Event conditional var to listen to the sync request events */
struct k_condvar condvar;
/** Condvar mutex */
struct k_mutex condvar_lock;
};
/** Bit position in device_pm::atomic_flags that records whether the

View file

@ -30,9 +30,8 @@ static inline void device_pm_state_init(const struct device *dev)
#ifdef CONFIG_PM_DEVICE
*dev->pm = (struct pm_device){
.usage = ATOMIC_INIT(0),
.lock = {},
.lock = Z_MUTEX_INITIALIZER(dev->pm->lock),
.condvar = Z_CONDVAR_INITIALIZER(dev->pm->condvar),
.condvar_lock = Z_MUTEX_INITIALIZER(dev->pm->condvar_lock),
};
#endif /* CONFIG_PM_DEVICE */
}

View file

@ -39,9 +39,8 @@ static void pm_work_handler(struct k_work *work)
struct pm_device, work);
const struct device *dev = pm->dev;
int ret = 0;
k_spinlock_key_t key;
key = k_spin_lock(&dev->pm->lock);
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
switch (atomic_get(&dev->pm->state)) {
case PM_DEVICE_STATE_ACTIVE:
@ -83,14 +82,13 @@ handler_out:
*/
(void)k_condvar_broadcast(&dev->pm->condvar);
end:
k_spin_unlock(&dev->pm->lock, key);
(void)k_mutex_unlock(&dev->pm->lock);
}
static int pm_device_request(const struct device *dev,
uint32_t target_state, uint32_t pm_flags)
{
int ret = 0;
k_spinlock_key_t key;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_request, dev, target_state);
__ASSERT((target_state == PM_DEVICE_STATE_ACTIVE) ||
@ -127,7 +125,7 @@ static int pm_device_request(const struct device *dev,
goto out;
}
key = k_spin_lock(&dev->pm->lock);
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
if (!dev->pm->enable) {
ret = -ENOTSUP;
@ -153,7 +151,7 @@ static int pm_device_request(const struct device *dev,
goto out_unlock;
}
k_spin_unlock(&dev->pm->lock, key);
(void)k_mutex_unlock(&dev->pm->lock);
pm_device_wait(dev, K_FOREVER);
/*
@ -165,7 +163,7 @@ static int pm_device_request(const struct device *dev,
goto out;
out_unlock:
k_spin_unlock(&dev->pm->lock, key);
(void)k_mutex_unlock(&dev->pm->lock);
out:
SYS_PORT_TRACING_FUNC_EXIT(pm, device_request, dev, ret);
return ret;
@ -195,8 +193,6 @@ int pm_device_put_sync(const struct device *dev)
void pm_device_enable(const struct device *dev)
{
k_spinlock_key_t key;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_enable, dev);
if (k_is_pre_kernel()) {
dev->pm->dev = dev;
@ -208,7 +204,7 @@ void pm_device_enable(const struct device *dev)
goto out;
}
key = k_spin_lock(&dev->pm->lock);
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
if (dev->pm_control == NULL) {
dev->pm->enable = false;
goto out_unlock;
@ -229,26 +225,24 @@ void pm_device_enable(const struct device *dev)
}
out_unlock:
k_spin_unlock(&dev->pm->lock, key);
(void)k_mutex_unlock(&dev->pm->lock);
out:
SYS_PORT_TRACING_FUNC_EXIT(pm, device_enable, dev);
}
void pm_device_disable(const struct device *dev)
{
k_spinlock_key_t key;
SYS_PORT_TRACING_FUNC_ENTER(pm, device_disable, dev);
__ASSERT(k_is_pre_kernel() == false, "Device should not be disabled "
"before kernel is initialized");
key = k_spin_lock(&dev->pm->lock);
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
if (dev->pm->enable) {
dev->pm->enable = false;
/* Bring up the device before disabling the Idle PM */
k_work_schedule(&dev->pm->work, K_NO_WAIT);
}
k_spin_unlock(&dev->pm->lock, key);
(void)k_mutex_unlock(&dev->pm->lock);
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
}
@ -256,17 +250,17 @@ int pm_device_wait(const struct device *dev, k_timeout_t timeout)
{
int ret = 0;
k_mutex_lock(&dev->pm->condvar_lock, K_FOREVER);
k_mutex_lock(&dev->pm->lock, K_FOREVER);
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_SUSPENDING) ||
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_RESUMING)) {
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->condvar_lock,
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
timeout);
if (ret != 0) {
break;
}
}
k_mutex_unlock(&dev->pm->condvar_lock);
k_mutex_unlock(&dev->pm->lock);
return ret;
}