pm: device_runtime: Add helper to wait on async ops
Add a function that properly uses a mutex to check a condition before wait on the conditional variable. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
b6c95edc63
commit
378a19d2a8
4 changed files with 41 additions and 0 deletions
|
@ -119,6 +119,8 @@ struct pm_device {
|
||||||
struct k_work_delayable work;
|
struct k_work_delayable work;
|
||||||
/** Event conditional var to listen to the sync request events */
|
/** Event conditional var to listen to the sync request events */
|
||||||
struct k_condvar condvar;
|
struct k_condvar condvar;
|
||||||
|
/** Condvar mutex */
|
||||||
|
struct k_mutex condvar_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Bit position in device_pm::atomic_flags that records whether the
|
/** Bit position in device_pm::atomic_flags that records whether the
|
||||||
|
|
|
@ -109,6 +109,23 @@ int pm_device_put(const struct device *dev);
|
||||||
* @retval Errno Negative errno code if failure.
|
* @retval Errno Negative errno code if failure.
|
||||||
*/
|
*/
|
||||||
int pm_device_put_sync(const struct device *dev);
|
int pm_device_put_sync(const struct device *dev);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Wait on a device to finish an operation.
|
||||||
|
*
|
||||||
|
* The calling thread blocks until the device finishes a @ref pm_device_put or
|
||||||
|
* @ref pm_device_get operation. If there is no operation in progress
|
||||||
|
* this function will return immediately.
|
||||||
|
*
|
||||||
|
* @param dev Pointer to device structure of the specific device driver
|
||||||
|
* the caller is interested in.
|
||||||
|
* @param timeout The timeout passed to k_condvar_wait. If a timeout happens
|
||||||
|
* this function will return immediately.
|
||||||
|
* @retval 0 If successful.
|
||||||
|
* @retval Errno Negative errno code if failure.
|
||||||
|
*/
|
||||||
|
int pm_device_wait(const struct device *dev, k_timeout_t timeout);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void pm_device_enable(const struct device *dev) { }
|
static inline void pm_device_enable(const struct device *dev) { }
|
||||||
static inline void pm_device_disable(const struct device *dev) { }
|
static inline void pm_device_disable(const struct device *dev) { }
|
||||||
|
@ -116,6 +133,8 @@ static inline int pm_device_get(const struct device *dev) { return -ENOSYS; }
|
||||||
static inline int pm_device_get_sync(const struct device *dev) { return -ENOSYS; }
|
static inline int pm_device_get_sync(const struct device *dev) { return -ENOSYS; }
|
||||||
static inline int pm_device_put(const struct device *dev) { return -ENOSYS; }
|
static inline int pm_device_put(const struct device *dev) { return -ENOSYS; }
|
||||||
static inline int pm_device_put_sync(const struct device *dev) { return -ENOSYS; }
|
static inline int pm_device_put_sync(const struct device *dev) { return -ENOSYS; }
|
||||||
|
static inline int pm_device_wait(const struct device *dev,
|
||||||
|
k_timeout_t timeout) { return -ENOSYS; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/** @} */
|
/** @} */
|
||||||
|
|
|
@ -32,6 +32,7 @@ static inline void device_pm_state_init(const struct device *dev)
|
||||||
.usage = ATOMIC_INIT(0),
|
.usage = ATOMIC_INIT(0),
|
||||||
.lock = {},
|
.lock = {},
|
||||||
.condvar = Z_CONDVAR_INITIALIZER(dev->pm->condvar),
|
.condvar = Z_CONDVAR_INITIALIZER(dev->pm->condvar),
|
||||||
|
.condvar_lock = Z_MUTEX_INITIALIZER(dev->pm->condvar_lock),
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_PM_DEVICE */
|
#endif /* CONFIG_PM_DEVICE */
|
||||||
}
|
}
|
||||||
|
|
|
@ -224,3 +224,22 @@ void pm_device_disable(const struct device *dev)
|
||||||
k_spin_unlock(&dev->pm->lock, key);
|
k_spin_unlock(&dev->pm->lock, key);
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int pm_device_wait(const struct device *dev, k_timeout_t timeout)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
k_mutex_lock(&dev->pm->condvar_lock, K_FOREVER);
|
||||||
|
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
|
||||||
|
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_SUSPENDING) ||
|
||||||
|
(atomic_get(&dev->pm->state) == PM_DEVICE_STATE_RESUMING)) {
|
||||||
|
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->condvar_lock,
|
||||||
|
timeout);
|
||||||
|
if (ret != 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
k_mutex_unlock(&dev->pm->condvar_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue