diff --git a/include/zephyr/pm/device.h b/include/zephyr/pm/device.h index 023d35b3d37..ebff9696197 100644 --- a/include/zephyr/pm/device.h +++ b/include/zephyr/pm/device.h @@ -123,7 +123,7 @@ struct pm_device { /** Pointer to the device */ const struct device *dev; /** Lock to synchronize the get/put operations */ - struct k_mutex lock; + struct k_sem lock; /** Event var to listen to the sync request events */ struct k_event event; /** Device usage count */ @@ -147,7 +147,7 @@ struct pm_device { #ifdef CONFIG_PM_DEVICE_RUNTIME #define Z_PM_DEVICE_RUNTIME_INIT(obj) \ - .lock = Z_MUTEX_INITIALIZER(obj.lock), \ + .lock = Z_SEM_INITIALIZER(obj.lock, 1, 1), \ .event = Z_EVENT_INITIALIZER(obj.event), #else #define Z_PM_DEVICE_RUNTIME_INIT(obj) diff --git a/include/zephyr/pm/device_runtime.h b/include/zephyr/pm/device_runtime.h index 8a403a1a924..d14aa2fee6e 100644 --- a/include/zephyr/pm/device_runtime.h +++ b/include/zephyr/pm/device_runtime.h @@ -114,13 +114,14 @@ int pm_device_runtime_put(const struct device *dev); * this case, the function will be blocking (equivalent to * pm_device_runtime_put()). * - * @funcprops \pre_kernel_ok, \async + * @funcprops \pre_kernel_ok, \async, \isr_ok * * @param dev Device instance. * * @retval 0 If it succeeds. In case device runtime PM is not enabled or not * available this function will be a no-op and will also return 0. * @retval -ENOTSUP If the device does not support PM. + * @retval -EBUSY If the device is busy. * @retval -EALREADY If device is already suspended (can only happen if get/put * calls are unbalanced). * diff --git a/subsys/pm/device_runtime.c b/subsys/pm/device_runtime.c index 55cc645faee..fb759cf2821 100644 --- a/subsys/pm/device_runtime.c +++ b/subsys/pm/device_runtime.c @@ -39,6 +39,7 @@ LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL); * @retval 0 If device has been suspended or queued for suspend. * @retval -EALREADY If device is already suspended (can only happen if get/put * calls are unbalanced). + * @retval -EBUSY If the device is busy. * @retval -errno Other negative errno, result of the action callback. */ static int runtime_suspend(const struct device *dev, bool async) @@ -49,7 +50,10 @@ static int runtime_suspend(const struct device *dev, bool async) if (k_is_pre_kernel()) { async = false; } else { - (void)k_mutex_lock(&pm->lock, K_FOREVER); + ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER); + if (ret < 0) { + return -EBUSY; + } } if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { @@ -84,7 +88,7 @@ static int runtime_suspend(const struct device *dev, bool async) unlock: if (!k_is_pre_kernel()) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); } return ret; @@ -98,7 +102,7 @@ static void runtime_suspend_work(struct k_work *work) ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND); - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); if (ret < 0) { pm->usage++; pm->state = PM_DEVICE_STATE_ACTIVE; @@ -106,7 +110,7 @@ static void runtime_suspend_work(struct k_work *work) pm->state = PM_DEVICE_STATE_SUSPENDED; } k_event_set(&pm->event, BIT(pm->state)); - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); /* * On async put, we have to suspend the domain when the device @@ -131,7 +135,7 @@ int pm_device_runtime_get(const struct device *dev) SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev); if (!k_is_pre_kernel()) { - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); } if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { @@ -160,11 +164,11 @@ int pm_device_runtime_get(const struct device *dev) if (!k_is_pre_kernel()) { /* wait until possible async suspend is completed */ while (pm->state == PM_DEVICE_STATE_SUSPENDING) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); } } @@ -182,7 +186,7 @@ int pm_device_runtime_get(const struct device *dev) unlock: if (!k_is_pre_kernel()) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret); @@ -194,6 +198,8 @@ int pm_device_runtime_put(const struct device *dev) { int ret; + __ASSERT(!k_is_in_isr(), "use pm_device_runtime_put_async() in ISR"); + if (dev->pm == NULL) { return -ENOTSUP; } @@ -244,7 +250,7 @@ int pm_device_runtime_enable(const struct device *dev) } if (!k_is_pre_kernel()) { - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); } if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { @@ -271,7 +277,7 @@ int pm_device_runtime_enable(const struct device *dev) unlock: if (!k_is_pre_kernel()) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); } end: @@ -291,7 +297,7 @@ int pm_device_runtime_disable(const struct device *dev) SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev); if (!k_is_pre_kernel()) { - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); } if (!atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) { @@ -301,11 +307,11 @@ int pm_device_runtime_disable(const struct device *dev) /* wait until possible async suspend is completed */ if (!k_is_pre_kernel()) { while (pm->state == PM_DEVICE_STATE_SUSPENDING) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); k_event_wait(&pm->event, EVENT_MASK, true, K_FOREVER); - (void)k_mutex_lock(&pm->lock, K_FOREVER); + (void)k_sem_take(&pm->lock, K_FOREVER); } } @@ -323,7 +329,7 @@ int pm_device_runtime_disable(const struct device *dev) unlock: if (!k_is_pre_kernel()) { - k_mutex_unlock(&pm->lock); + k_sem_give(&pm->lock); } SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);