pm: runtime: Move from mutexes to semaphores
To be able to call pm_device_runtime_put() from an IRQ context, move from mutexes to semaphores and force the async path when the put operation is called from an ISR. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
parent
d38a6b4bcd
commit
a337bfc3b3
3 changed files with 24 additions and 17 deletions
|
@ -123,7 +123,7 @@ struct pm_device {
|
||||||
/** Pointer to the device */
|
/** Pointer to the device */
|
||||||
const struct device *dev;
|
const struct device *dev;
|
||||||
/** Lock to synchronize the get/put operations */
|
/** Lock to synchronize the get/put operations */
|
||||||
struct k_mutex lock;
|
struct k_sem lock;
|
||||||
/** Event var to listen to the sync request events */
|
/** Event var to listen to the sync request events */
|
||||||
struct k_event event;
|
struct k_event event;
|
||||||
/** Device usage count */
|
/** Device usage count */
|
||||||
|
@ -147,7 +147,7 @@ struct pm_device {
|
||||||
|
|
||||||
#ifdef CONFIG_PM_DEVICE_RUNTIME
|
#ifdef CONFIG_PM_DEVICE_RUNTIME
|
||||||
#define Z_PM_DEVICE_RUNTIME_INIT(obj) \
|
#define Z_PM_DEVICE_RUNTIME_INIT(obj) \
|
||||||
.lock = Z_MUTEX_INITIALIZER(obj.lock), \
|
.lock = Z_SEM_INITIALIZER(obj.lock, 1, 1), \
|
||||||
.event = Z_EVENT_INITIALIZER(obj.event),
|
.event = Z_EVENT_INITIALIZER(obj.event),
|
||||||
#else
|
#else
|
||||||
#define Z_PM_DEVICE_RUNTIME_INIT(obj)
|
#define Z_PM_DEVICE_RUNTIME_INIT(obj)
|
||||||
|
|
|
@ -114,13 +114,14 @@ int pm_device_runtime_put(const struct device *dev);
|
||||||
* this case, the function will be blocking (equivalent to
|
* this case, the function will be blocking (equivalent to
|
||||||
* pm_device_runtime_put()).
|
* pm_device_runtime_put()).
|
||||||
*
|
*
|
||||||
* @funcprops \pre_kernel_ok, \async
|
* @funcprops \pre_kernel_ok, \async, \isr_ok
|
||||||
*
|
*
|
||||||
* @param dev Device instance.
|
* @param dev Device instance.
|
||||||
*
|
*
|
||||||
* @retval 0 If it succeeds. In case device runtime PM is not enabled or not
|
* @retval 0 If it succeeds. In case device runtime PM is not enabled or not
|
||||||
* available this function will be a no-op and will also return 0.
|
* available this function will be a no-op and will also return 0.
|
||||||
* @retval -ENOTSUP If the device does not support PM.
|
* @retval -ENOTSUP If the device does not support PM.
|
||||||
|
* @retval -EBUSY If the device is busy.
|
||||||
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
||||||
* calls are unbalanced).
|
* calls are unbalanced).
|
||||||
*
|
*
|
||||||
|
|
|
@ -39,6 +39,7 @@ LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
|
||||||
* @retval 0 If device has been suspended or queued for suspend.
|
* @retval 0 If device has been suspended or queued for suspend.
|
||||||
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
||||||
* calls are unbalanced).
|
* calls are unbalanced).
|
||||||
|
* @retval -EBUSY If the device is busy.
|
||||||
* @retval -errno Other negative errno, result of the action callback.
|
* @retval -errno Other negative errno, result of the action callback.
|
||||||
*/
|
*/
|
||||||
static int runtime_suspend(const struct device *dev, bool async)
|
static int runtime_suspend(const struct device *dev, bool async)
|
||||||
|
@ -49,7 +50,10 @@ static int runtime_suspend(const struct device *dev, bool async)
|
||||||
if (k_is_pre_kernel()) {
|
if (k_is_pre_kernel()) {
|
||||||
async = false;
|
async = false;
|
||||||
} else {
|
} else {
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
|
||||||
|
if (ret < 0) {
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
||||||
|
@ -84,7 +88,7 @@ static int runtime_suspend(const struct device *dev, bool async)
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -98,7 +102,7 @@ static void runtime_suspend_work(struct k_work *work)
|
||||||
|
|
||||||
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
||||||
|
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pm->usage++;
|
pm->usage++;
|
||||||
pm->state = PM_DEVICE_STATE_ACTIVE;
|
pm->state = PM_DEVICE_STATE_ACTIVE;
|
||||||
|
@ -106,7 +110,7 @@ static void runtime_suspend_work(struct k_work *work)
|
||||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||||
}
|
}
|
||||||
k_event_set(&pm->event, BIT(pm->state));
|
k_event_set(&pm->event, BIT(pm->state));
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On async put, we have to suspend the domain when the device
|
* On async put, we have to suspend the domain when the device
|
||||||
|
@ -131,7 +135,7 @@ int pm_device_runtime_get(const struct device *dev)
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
|
||||||
|
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
||||||
|
@ -160,11 +164,11 @@ int pm_device_runtime_get(const struct device *dev)
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
/* wait until possible async suspend is completed */
|
/* wait until possible async suspend is completed */
|
||||||
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
|
|
||||||
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
|
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
|
||||||
|
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +186,7 @@ int pm_device_runtime_get(const struct device *dev)
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
|
||||||
|
@ -194,6 +198,8 @@ int pm_device_runtime_put(const struct device *dev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
__ASSERT(!k_is_in_isr(), "use pm_device_runtime_put_async() in ISR");
|
||||||
|
|
||||||
if (dev->pm == NULL) {
|
if (dev->pm == NULL) {
|
||||||
return -ENOTSUP;
|
return -ENOTSUP;
|
||||||
}
|
}
|
||||||
|
@ -244,7 +250,7 @@ int pm_device_runtime_enable(const struct device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
||||||
|
@ -271,7 +277,7 @@ int pm_device_runtime_enable(const struct device *dev)
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
end:
|
end:
|
||||||
|
@ -291,7 +297,7 @@ int pm_device_runtime_disable(const struct device *dev)
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
|
||||||
|
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
||||||
|
@ -301,11 +307,11 @@ int pm_device_runtime_disable(const struct device *dev)
|
||||||
/* wait until possible async suspend is completed */
|
/* wait until possible async suspend is completed */
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
|
|
||||||
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
|
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
|
||||||
|
|
||||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -323,7 +329,7 @@ int pm_device_runtime_disable(const struct device *dev)
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
if (!k_is_pre_kernel()) {
|
if (!k_is_pre_kernel()) {
|
||||||
k_mutex_unlock(&pm->lock);
|
k_sem_give(&pm->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue