power: device_pm: Fix concurrence issues

The sync API was using k_poll_signal and in certain conditions is
possible multiple threads waiting on a signal leading to an undefined
behavior.

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2021-04-26 22:37:42 -07:00 committed by Anas Nashif
commit 8705c688e2
4 changed files with 36 additions and 46 deletions

View file

@ -98,11 +98,9 @@ struct pm_device {
/** Device idle internal power state */
atomic_t fsm_state;
/** Work object for asynchronous calls */
struct k_work work;
/** Event object to listen to the sync request events */
struct k_poll_event event;
/** Signal to notify the Async API callers */
struct k_poll_signal signal;
struct k_work_delayable work;
/** Event conditional var to listen to the sync request events */
struct k_condvar condvar;
};
/** Bit position in device_pm::atomic_flags that records whether the

View file

@ -31,11 +31,7 @@ static inline void device_pm_state_init(const struct device *dev)
*dev->pm = (struct pm_device){
.usage = ATOMIC_INIT(0),
.lock = Z_SEM_INITIALIZER(dev->pm->lock, 1, 1),
.signal = K_POLL_SIGNAL_INITIALIZER(dev->pm->signal),
.event = K_POLL_EVENT_INITIALIZER(
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&dev->pm->signal),
.condvar = Z_CONDVAR_INITIALIZER(dev->pm->condvar),
};
#endif /* CONFIG_PM_DEVICE */
}

View file

@ -9,14 +9,13 @@
#include "dummy_parent.h"
#include "dummy_driver.h"
static struct k_poll_event async_evt;
uint32_t device_power_state;
static const struct device *parent;
static int dummy_open(const struct device *dev)
{
int ret;
int signaled = 0, result;
struct k_mutex wait_mutex;
printk("open()\n");
@ -33,16 +32,12 @@ static int dummy_open(const struct device *dev)
printk("Async wakeup request queued\n");
do {
(void)k_poll(&async_evt, 1, K_FOREVER);
k_poll_signal_check(&dev->pm->signal,
&signaled, &result);
} while (!signaled);
k_mutex_init(&wait_mutex);
k_mutex_lock(&wait_mutex, K_FOREVER);
(void) k_condvar_wait(&dev->pm->condvar, &wait_mutex, K_FOREVER);
k_mutex_unlock(&wait_mutex);
async_evt.state = K_POLL_STATE_NOT_READY;
k_poll_signal_reset(&dev->pm->signal);
if (result == PM_DEVICE_ACTIVE_STATE) {
if (atomic_get(&dev->pm->fsm_state) == PM_DEVICE_ACTIVE_STATE) {
printk("Dummy device resumed\n");
ret = 0;
} else {
@ -159,8 +154,6 @@ int dummy_init(const struct device *dev)
pm_device_enable(dev);
device_power_state = PM_DEVICE_ACTIVE_STATE;
k_poll_event_init(&async_evt, K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY, &dev->pm->signal);
return 0;
}

View file

@ -40,7 +40,11 @@ static void device_pm_callback(const struct device *dev,
PM_DEVICE_STATE_SUSPENDED);
}
k_work_submit(&dev->pm->work);
/*
* This function returns the number of woken threads on success. There
* is nothing we can do with this information. Just ignore it.
*/
(void)k_condvar_broadcast(&dev->pm->condvar);
}
static void pm_work_handler(struct k_work *work)
@ -49,7 +53,6 @@ static void pm_work_handler(struct k_work *work)
struct pm_device, work);
const struct device *dev = pm->dev;
int ret = 0;
uint8_t pm_state;
switch (atomic_get(&dev->pm->fsm_state)) {
case PM_DEVICE_STATE_ACTIVE:
@ -60,7 +63,6 @@ static void pm_work_handler(struct k_work *work)
ret = pm_device_state_set(dev, PM_DEVICE_SUSPEND_STATE,
device_pm_callback, NULL);
} else {
pm_state = PM_DEVICE_ACTIVE_STATE;
goto fsm_out;
}
break;
@ -72,7 +74,6 @@ static void pm_work_handler(struct k_work *work)
ret = pm_device_state_set(dev, PM_DEVICE_ACTIVE_STATE,
device_pm_callback, NULL);
} else {
pm_state = PM_DEVICE_SUSPEND_STATE;
goto fsm_out;
}
break;
@ -85,17 +86,20 @@ static void pm_work_handler(struct k_work *work)
}
__ASSERT(ret == 0, "Set Power state error");
return;
fsm_out:
k_poll_signal_raise(&dev->pm->signal, pm_state);
/*
* This function returns the number of woken threads on success. There
* is nothing we can do with this information. Just ignoring it.
*/
(void)k_condvar_broadcast(&dev->pm->condvar);
}
static int pm_device_request(const struct device *dev,
uint32_t target_state, uint32_t pm_flags)
{
int result, signaled = 0;
struct k_mutex request_mutex;
__ASSERT((target_state == PM_DEVICE_ACTIVE_STATE) ||
(target_state == PM_DEVICE_SUSPEND_STATE),
@ -121,25 +125,24 @@ static int pm_device_request(const struct device *dev,
return 0;
}
k_work_submit(&dev->pm->work);
(void)k_work_schedule(&dev->pm->work, K_NO_WAIT);
/* Return in case of Async request */
if (pm_flags & PM_DEVICE_ASYNC) {
return 0;
}
/* Incase of Sync request wait for completion event */
do {
(void)k_poll(&dev->pm->event, 1, K_FOREVER);
k_poll_signal_check(&dev->pm->signal,
&signaled, &result);
} while (!signaled);
k_mutex_init(&request_mutex);
k_mutex_lock(&request_mutex, K_FOREVER);
(void)k_condvar_wait(&dev->pm->condvar, &request_mutex, K_FOREVER);
k_mutex_unlock(&request_mutex);
dev->pm->event.state = K_POLL_STATE_NOT_READY;
k_poll_signal_reset(&dev->pm->signal);
return result == target_state ? 0 : -EIO;
/*
* dev->pm->fsm_state was set in device_pm_callback(). As the device
* may not have been properly changed to the target_state or another
* thread we check it here before returning.
*/
return target_state == atomic_get(&dev->pm->fsm_state) ? 0 : -EIO;
}
int pm_device_get(const struct device *dev)
@ -170,7 +173,7 @@ void pm_device_enable(const struct device *dev)
dev->pm->dev = dev;
dev->pm->enable = true;
atomic_set(&dev->pm->fsm_state, PM_DEVICE_STATE_SUSPENDED);
k_work_init(&dev->pm->work, pm_work_handler);
k_work_init_delayable(&dev->pm->work, pm_work_handler);
return;
}
@ -185,9 +188,9 @@ void pm_device_enable(const struct device *dev)
dev->pm->dev = dev;
atomic_set(&dev->pm->fsm_state,
PM_DEVICE_STATE_SUSPENDED);
k_work_init(&dev->pm->work, pm_work_handler);
k_work_init_delayable(&dev->pm->work, pm_work_handler);
} else {
k_work_submit(&dev->pm->work);
k_work_schedule(&dev->pm->work, K_NO_WAIT);
}
k_sem_give(&dev->pm->lock);
}
@ -200,6 +203,6 @@ void pm_device_disable(const struct device *dev)
k_sem_take(&dev->pm->lock, K_FOREVER);
dev->pm->enable = false;
/* Bring up the device before disabling the Idle PM */
k_work_submit(&dev->pm->work);
k_work_schedule(&dev->pm->work, K_NO_WAIT);
k_sem_give(&dev->pm->lock);
}