pm: runtime: Move from mutexes to semaphores

To be able to call pm_device_runtime_put() from an IRQ context, move
from mutexes to semaphores and force the async path when the put
operation is called from an ISR.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2022-12-06 18:17:05 +01:00 committed by Carles Cufí
commit a337bfc3b3
3 changed files with 24 additions and 17 deletions

View file

@ -39,6 +39,7 @@ LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
* @retval 0 If device has been suspended or queued for suspend.
* @retval -EALREADY If device is already suspended (can only happen if get/put
* calls are unbalanced).
* @retval -EBUSY If the device is busy.
* @retval -errno Other negative errno, result of the action callback.
*/
static int runtime_suspend(const struct device *dev, bool async)
@ -49,7 +50,10 @@ static int runtime_suspend(const struct device *dev, bool async)
if (k_is_pre_kernel()) {
async = false;
} else {
(void)k_mutex_lock(&pm->lock, K_FOREVER);
ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
if (ret < 0) {
return -EBUSY;
}
}
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
@ -84,7 +88,7 @@ static int runtime_suspend(const struct device *dev, bool async)
unlock:
if (!k_is_pre_kernel()) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
}
return ret;
@ -98,7 +102,7 @@ static void runtime_suspend_work(struct k_work *work)
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
if (ret < 0) {
pm->usage++;
pm->state = PM_DEVICE_STATE_ACTIVE;
@ -106,7 +110,7 @@ static void runtime_suspend_work(struct k_work *work)
pm->state = PM_DEVICE_STATE_SUSPENDED;
}
k_event_set(&pm->event, BIT(pm->state));
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
/*
* On async put, we have to suspend the domain when the device
@ -131,7 +135,7 @@ int pm_device_runtime_get(const struct device *dev)
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
if (!k_is_pre_kernel()) {
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
}
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
@ -160,11 +164,11 @@ int pm_device_runtime_get(const struct device *dev)
if (!k_is_pre_kernel()) {
/* wait until possible async suspend is completed */
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
}
}
@ -182,7 +186,7 @@ int pm_device_runtime_get(const struct device *dev)
unlock:
if (!k_is_pre_kernel()) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
}
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
@ -194,6 +198,8 @@ int pm_device_runtime_put(const struct device *dev)
{
int ret;
__ASSERT(!k_is_in_isr(), "use pm_device_runtime_put_async() in ISR");
if (dev->pm == NULL) {
return -ENOTSUP;
}
@ -244,7 +250,7 @@ int pm_device_runtime_enable(const struct device *dev)
}
if (!k_is_pre_kernel()) {
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
}
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
@ -271,7 +277,7 @@ int pm_device_runtime_enable(const struct device *dev)
unlock:
if (!k_is_pre_kernel()) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
}
end:
@ -291,7 +297,7 @@ int pm_device_runtime_disable(const struct device *dev)
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
if (!k_is_pre_kernel()) {
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
}
if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
@ -301,11 +307,11 @@ int pm_device_runtime_disable(const struct device *dev)
/* wait until possible async suspend is completed */
if (!k_is_pre_kernel()) {
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER);
(void)k_mutex_lock(&pm->lock, K_FOREVER);
(void)k_sem_take(&pm->lock, K_FOREVER);
}
}
@ -323,7 +329,7 @@ int pm_device_runtime_disable(const struct device *dev)
unlock:
if (!k_is_pre_kernel()) {
k_mutex_unlock(&pm->lock);
k_sem_give(&pm->lock);
}
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);