2019-02-28 13:07:58 +05:30
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Intel Corporation.
|
2021-10-21 18:35:38 +02:00
|
|
|
* Copyright (c) 2021 Nordic Semiconductor ASA.
|
2025-03-21 13:05:35 -07:00
|
|
|
* Copyright (c) 2025 HubbleNetwork.
|
2019-02-28 13:07:58 +05:30
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/pm/device.h>
|
|
|
|
#include <zephyr/pm/device_runtime.h>
|
|
|
|
#include <zephyr/sys/__assert.h>
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2022-05-06 11:12:04 +02:00
|
|
|
#include <zephyr/logging/log.h>
|
2021-10-27 13:39:12 +02:00
|
|
|
LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2021-11-29 15:50:12 -08:00
|
|
|
#ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
|
|
|
|
#define PM_DOMAIN(_pm) \
|
|
|
|
(_pm)->domain
|
|
|
|
#else
|
|
|
|
#define PM_DOMAIN(_pm) NULL
|
|
|
|
#endif
|
|
|
|
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2025-03-21 13:05:35 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
|
|
|
|
K_THREAD_STACK_DEFINE(pm_device_runtime_stack, CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_STACK_SIZE);
|
|
|
|
static struct k_work_q pm_device_runtime_wq;
|
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2025-03-21 13:05:35 -07:00
|
|
|
|
2022-12-06 17:44:24 +01:00
|
|
|
#define EVENT_STATE_ACTIVE BIT(PM_DEVICE_STATE_ACTIVE)
|
|
|
|
#define EVENT_STATE_SUSPENDED BIT(PM_DEVICE_STATE_SUSPENDED)
|
|
|
|
|
|
|
|
#define EVENT_MASK (EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
/**
|
|
|
|
* @brief Suspend a device
|
|
|
|
*
|
|
|
|
* @note Asynchronous operations are not supported when in pre-kernel mode. In
|
|
|
|
* this case, the async flag will be always forced to be false, and so the
|
2024-06-21 18:37:21 +10:00
|
|
|
* function will be blocking.
|
2021-10-21 18:35:38 +02:00
|
|
|
*
|
|
|
|
* @funcprops \pre_kernel_ok
|
|
|
|
*
|
|
|
|
* @param dev Device instance.
|
|
|
|
* @param async Perform operation asynchronously.
|
2023-12-05 23:17:56 +00:00
|
|
|
* @param delay Period to delay the asynchronous operation.
|
2021-10-21 18:35:38 +02:00
|
|
|
*
|
|
|
|
* @retval 0 If device has been suspended or queued for suspend.
|
|
|
|
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
|
|
|
* calls are unbalanced).
|
2022-12-06 18:17:05 +01:00
|
|
|
* @retval -EBUSY If the device is busy.
|
2021-10-21 18:35:38 +02:00
|
|
|
* @retval -errno Other negative errno, result of the action callback.
|
|
|
|
*/
|
2023-12-05 23:17:56 +00:00
|
|
|
static int runtime_suspend(const struct device *dev, bool async,
|
|
|
|
k_timeout_t delay)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
|
|
|
int ret = 0;
|
2021-10-21 18:35:38 +02:00
|
|
|
struct pm_device *pm = dev->pm;
|
|
|
|
|
2023-02-27 10:11:49 -08:00
|
|
|
/*
|
|
|
|
* Early return if device runtime is not enabled.
|
|
|
|
*/
|
2024-01-10 11:01:11 +01:00
|
|
|
if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
2023-02-27 10:11:49 -08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
if (k_is_pre_kernel()) {
|
|
|
|
async = false;
|
|
|
|
} else {
|
2022-12-06 18:17:05 +01:00
|
|
|
ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
|
|
|
|
if (ret < 0) {
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2021-05-14 16:50:23 -07:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (pm->base.usage == 0U) {
|
2021-10-21 18:35:38 +02:00
|
|
|
LOG_WRN("Unbalanced suspend");
|
|
|
|
ret = -EALREADY;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage--;
|
|
|
|
if (pm->base.usage > 0U) {
|
2021-10-21 18:35:38 +02:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2024-01-15 15:48:24 +01:00
|
|
|
if (async) {
|
2021-10-21 18:35:38 +02:00
|
|
|
/* queue suspend */
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDING;
|
2025-03-21 13:05:35 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ
|
2023-12-05 23:17:56 +00:00
|
|
|
(void)k_work_schedule(&pm->work, delay);
|
2025-03-21 13:05:35 -07:00
|
|
|
#else
|
|
|
|
(void)k_work_schedule_for_queue(&pm_device_runtime_wq, &pm->work, delay);
|
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ */
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2021-10-21 18:35:38 +02:00
|
|
|
} else {
|
|
|
|
/* suspend now */
|
2024-01-10 11:01:11 +01:00
|
|
|
ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
2021-10-21 18:35:38 +02:00
|
|
|
if (ret < 0) {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage++;
|
2021-10-21 18:35:38 +02:00
|
|
|
goto unlock;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
2021-10-21 18:35:38 +02:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDED;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
unlock:
|
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
return ret;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2021-10-21 18:35:38 +02:00
|
|
|
static void runtime_suspend_work(struct k_work *work)
|
2021-05-23 22:37:38 -07:00
|
|
|
{
|
2021-10-21 18:35:38 +02:00
|
|
|
int ret;
|
2022-01-19 12:07:51 +08:00
|
|
|
struct k_work_delayable *dwork = k_work_delayable_from_work(work);
|
|
|
|
struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
2021-05-23 22:37:38 -07:00
|
|
|
|
2022-12-06 18:17:05 +01:00
|
|
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
2022-01-07 23:24:50 -08:00
|
|
|
if (ret < 0) {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage++;
|
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
2022-01-07 23:24:50 -08:00
|
|
|
} else {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDED;
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2024-01-10 11:01:11 +01:00
|
|
|
k_event_set(&pm->event, BIT(pm->base.state));
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
2021-10-01 14:07:28 -07:00
|
|
|
/*
|
|
|
|
* On async put, we have to suspend the domain when the device
|
|
|
|
* finishes its operation
|
|
|
|
*/
|
2024-01-13 22:26:10 -08:00
|
|
|
if ((ret == 0) &&
|
2024-01-10 11:01:11 +01:00
|
|
|
atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
|
|
|
|
(void)pm_device_runtime_put(PM_DOMAIN(&pm->base));
|
2021-10-01 14:07:28 -07:00
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
|
2021-05-23 22:37:38 -07:00
|
|
|
}
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2021-05-23 22:37:38 -07:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
static int get_sync_locked(const struct device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct pm_device_isr *pm = dev->pm_isr;
|
|
|
|
uint32_t flags = pm->base.flags;
|
|
|
|
|
|
|
|
if (pm->base.usage == 0) {
|
|
|
|
if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
|
|
|
|
const struct device *domain = PM_DOMAIN(&pm->base);
|
|
|
|
|
2024-11-15 09:26:37 +01:00
|
|
|
if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
|
2024-01-10 11:01:11 +01:00
|
|
|
ret = pm_device_runtime_get(domain);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return -EWOULDBLOCK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.usage++;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-11-02 11:27:16 +01:00
|
|
|
int pm_device_runtime_get(const struct device *dev)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
2021-05-14 16:50:23 -07:00
|
|
|
int ret = 0;
|
2021-10-13 15:48:22 +02:00
|
|
|
struct pm_device *pm = dev->pm;
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2022-05-14 17:18:54 +10:00
|
|
|
if (pm == NULL) {
|
2023-03-26 16:31:33 +10:00
|
|
|
return 0;
|
2022-05-14 17:18:54 +10:00
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
|
2021-07-02 12:52:18 +02:00
|
|
|
|
2023-02-27 09:18:19 -08:00
|
|
|
/*
|
|
|
|
* Early return if device runtime is not enabled.
|
|
|
|
*/
|
2024-01-10 11:01:11 +01:00
|
|
|
if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
2023-02-27 09:18:19 -08:00
|
|
|
return 0;
|
2021-04-21 15:36:07 -07:00
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
|
|
|
|
struct pm_device_isr *pm_sync = dev->pm_isr;
|
|
|
|
k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
|
|
|
|
|
|
|
|
ret = get_sync_locked(dev);
|
|
|
|
k_spin_unlock(&pm_sync->lock, k);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2023-02-27 09:18:19 -08:00
|
|
|
if (!k_is_pre_kernel()) {
|
2023-07-25 15:19:03 +02:00
|
|
|
ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
|
|
|
|
if (ret < 0) {
|
|
|
|
return -EWOULDBLOCK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) {
|
2023-07-25 15:19:03 +02:00
|
|
|
ret = -EWOULDBLOCK;
|
|
|
|
goto unlock;
|
2021-05-14 16:50:23 -07:00
|
|
|
}
|
|
|
|
|
2021-10-01 14:07:28 -07:00
|
|
|
/*
|
|
|
|
* If the device is under a power domain, the domain has to be get
|
|
|
|
* first.
|
|
|
|
*/
|
2024-01-10 11:01:11 +01:00
|
|
|
const struct device *domain = PM_DOMAIN(&pm->base);
|
|
|
|
|
|
|
|
if (domain != NULL) {
|
|
|
|
ret = pm_device_runtime_get(domain);
|
2021-10-01 14:07:28 -07:00
|
|
|
if (ret != 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
2022-05-10 14:25:23 +10:00
|
|
|
/* Check if powering up this device failed */
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
|
|
|
|
(void)pm_device_runtime_put(domain);
|
2022-05-10 14:25:23 +10:00
|
|
|
ret = -EAGAIN;
|
|
|
|
goto unlock;
|
|
|
|
}
|
2023-03-26 14:01:37 +10:00
|
|
|
/* Power domain successfully claimed */
|
2024-01-10 11:01:11 +01:00
|
|
|
atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
|
2021-10-01 14:07:28 -07:00
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage++;
|
2021-08-24 23:12:27 -07:00
|
|
|
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2023-12-06 17:52:21 +00:00
|
|
|
/*
|
|
|
|
* Check if the device has a pending suspend operation (not started
|
|
|
|
* yet) and cancel it. This way we avoid unnecessary operations because
|
|
|
|
* the device is actually active.
|
|
|
|
*/
|
2024-01-10 11:01:11 +01:00
|
|
|
if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
|
2023-12-06 17:52:21 +00:00
|
|
|
((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
2023-12-06 17:52:21 +00:00
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
if (!k_is_pre_kernel()) {
|
2023-12-06 17:52:21 +00:00
|
|
|
/*
|
|
|
|
* If the device is already suspending there is
|
|
|
|
* nothing else we can do but wait until it finishes.
|
|
|
|
*/
|
2024-01-10 11:01:11 +01:00
|
|
|
while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
|
2024-03-27 15:19:39 +01:00
|
|
|
k_event_clear(&pm->event, EVENT_MASK);
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2022-12-06 17:44:24 +01:00
|
|
|
|
2024-03-27 15:19:39 +01:00
|
|
|
k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
|
2022-12-06 17:44:24 +01:00
|
|
|
|
2022-12-06 18:17:05 +01:00
|
|
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
2021-08-24 16:48:17 -07:00
|
|
|
}
|
2021-05-14 16:50:23 -07:00
|
|
|
}
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2021-05-14 16:50:23 -07:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (pm->base.usage > 1U) {
|
2021-10-21 18:35:38 +02:00
|
|
|
goto unlock;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
|
2021-10-21 18:35:38 +02:00
|
|
|
if (ret < 0) {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage--;
|
2021-10-21 18:35:38 +02:00
|
|
|
goto unlock;
|
2021-05-25 21:51:04 -07:00
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
unlock:
|
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2021-05-14 16:50:23 -07:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
end:
|
2021-10-21 18:35:38 +02:00
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
|
2019-02-28 13:07:58 +05:30
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
return ret;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
|
|
|
|
static int put_sync_locked(const struct device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct pm_device_isr *pm = dev->pm_isr;
|
|
|
|
uint32_t flags = pm->base.flags;
|
|
|
|
|
|
|
|
if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pm->base.usage == 0U) {
|
|
|
|
return -EALREADY;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.usage--;
|
|
|
|
if (pm->base.usage == 0U) {
|
|
|
|
ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDED;
|
|
|
|
|
|
|
|
if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
|
|
|
|
const struct device *domain = PM_DOMAIN(&pm->base);
|
|
|
|
|
2024-11-15 09:26:37 +01:00
|
|
|
if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
|
2024-01-10 11:01:11 +01:00
|
|
|
ret = put_sync_locked(domain);
|
|
|
|
} else {
|
|
|
|
ret = -EWOULDBLOCK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-11-02 11:27:16 +01:00
|
|
|
int pm_device_runtime_put(const struct device *dev)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
2021-10-21 18:35:38 +02:00
|
|
|
int ret;
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (dev->pm_base == NULL) {
|
2023-03-26 16:31:33 +10:00
|
|
|
return 0;
|
2022-05-14 17:18:54 +10:00
|
|
|
}
|
|
|
|
|
2021-11-02 11:27:16 +01:00
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
|
2021-10-01 14:07:28 -07:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
|
|
|
|
struct pm_device_isr *pm_sync = dev->pm_isr;
|
|
|
|
k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
|
|
|
|
|
|
|
|
ret = put_sync_locked(dev);
|
|
|
|
|
|
|
|
k_spin_unlock(&pm_sync->lock, k);
|
|
|
|
} else {
|
|
|
|
ret = runtime_suspend(dev, false, K_NO_WAIT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now put the domain
|
|
|
|
*/
|
|
|
|
if ((ret == 0) &&
|
|
|
|
atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
|
|
|
|
ret = pm_device_runtime_put(PM_DOMAIN(dev->pm_base));
|
|
|
|
}
|
2021-10-01 14:07:28 -07:00
|
|
|
}
|
2021-11-02 11:27:16 +01:00
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
return ret;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2023-12-05 23:17:56 +00:00
|
|
|
int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2021-10-21 18:35:38 +02:00
|
|
|
int ret;
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (dev->pm_base == NULL) {
|
2023-03-26 16:31:33 +10:00
|
|
|
return 0;
|
2022-05-14 17:18:54 +10:00
|
|
|
}
|
|
|
|
|
2023-12-05 23:17:56 +00:00
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay);
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
|
|
|
|
struct pm_device_isr *pm_sync = dev->pm_isr;
|
|
|
|
k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
|
|
|
|
|
|
|
|
ret = put_sync_locked(dev);
|
|
|
|
|
|
|
|
k_spin_unlock(&pm_sync->lock, k);
|
|
|
|
} else {
|
|
|
|
ret = runtime_suspend(dev, true, delay);
|
|
|
|
}
|
2023-12-05 23:17:56 +00:00
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
return ret;
|
2025-03-28 22:52:43 -07:00
|
|
|
#else
|
|
|
|
LOG_WRN("Function not available");
|
|
|
|
return -ENOSYS;
|
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2023-03-31 10:08:38 -07:00
|
|
|
__boot_func
|
2022-03-12 21:10:42 +10:00
|
|
|
int pm_device_runtime_auto_enable(const struct device *dev)
|
|
|
|
{
|
2024-01-10 11:01:11 +01:00
|
|
|
struct pm_device_base *pm = dev->pm_base;
|
2022-03-12 21:10:42 +10:00
|
|
|
|
|
|
|
/* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */
|
|
|
|
if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return pm_device_runtime_enable(dev);
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
static int runtime_enable_sync(const struct device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct pm_device_isr *pm = dev->pm_isr;
|
|
|
|
k_spinlock_key_t k = k_spin_lock(&pm->lock);
|
|
|
|
|
|
|
|
if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
|
|
|
|
ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDED;
|
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
|
|
|
|
pm->base.usage = 0U;
|
|
|
|
unlock:
|
|
|
|
k_spin_unlock(&pm->lock, k);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-12-14 09:47:00 -08:00
|
|
|
int pm_device_runtime_enable(const struct device *dev)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
2021-12-14 09:47:00 -08:00
|
|
|
int ret = 0;
|
2021-10-13 15:48:22 +02:00
|
|
|
struct pm_device *pm = dev->pm;
|
|
|
|
|
2024-02-01 18:47:35 -08:00
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
|
|
|
|
|
2022-05-14 17:18:54 +10:00
|
|
|
if (pm == NULL) {
|
2024-02-01 18:47:35 -08:00
|
|
|
ret = -ENOTSUP;
|
|
|
|
goto end;
|
2022-05-14 17:18:54 +10:00
|
|
|
}
|
|
|
|
|
2024-02-01 18:51:26 -08:00
|
|
|
if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2024-04-22 15:40:40 -07:00
|
|
|
if (pm_device_is_busy(dev)) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
|
|
|
|
ret = runtime_enable_sync(dev);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
2021-05-14 15:42:14 -07:00
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
/* lazy init of PM fields */
|
|
|
|
if (pm->dev == NULL) {
|
2021-10-13 15:48:22 +02:00
|
|
|
pm->dev = dev;
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2021-10-21 18:35:38 +02:00
|
|
|
k_work_init_delayable(&pm->work, runtime_suspend_work);
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2021-12-02 12:31:06 +01:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
|
|
|
|
ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
2021-12-02 12:31:06 +01:00
|
|
|
if (ret < 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_SUSPENDED;
|
2021-12-02 12:31:06 +01:00
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.usage = 0U;
|
2021-10-21 18:35:38 +02:00
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
2021-05-14 15:42:14 -07:00
|
|
|
|
2021-11-17 21:50:08 -08:00
|
|
|
end:
|
2021-12-14 09:47:00 -08:00
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
|
|
|
|
return ret;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
static int runtime_disable_sync(const struct device *dev)
|
|
|
|
{
|
|
|
|
struct pm_device_isr *pm = dev->pm_isr;
|
|
|
|
int ret;
|
|
|
|
k_spinlock_key_t k = k_spin_lock(&pm->lock);
|
|
|
|
|
|
|
|
if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
|
|
|
|
ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
|
|
|
|
unlock:
|
|
|
|
k_spin_unlock(&pm->lock, k);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-11-02 11:27:16 +01:00
|
|
|
int pm_device_runtime_disable(const struct device *dev)
|
2019-02-28 13:07:58 +05:30
|
|
|
{
|
2021-10-21 18:35:38 +02:00
|
|
|
int ret = 0;
|
2021-10-13 15:48:22 +02:00
|
|
|
struct pm_device *pm = dev->pm;
|
|
|
|
|
2024-02-01 18:34:32 -08:00
|
|
|
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
|
|
|
|
|
2022-05-14 17:18:54 +10:00
|
|
|
if (pm == NULL) {
|
2024-02-01 18:34:32 -08:00
|
|
|
ret = -ENOTSUP;
|
|
|
|
goto end;
|
2022-05-14 17:18:54 +10:00
|
|
|
}
|
|
|
|
|
2024-02-01 18:40:30 -08:00
|
|
|
if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
|
|
|
|
ret = runtime_disable_sync(dev);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
2021-10-21 18:35:38 +02:00
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
|
|
|
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2021-10-21 18:35:38 +02:00
|
|
|
if (!k_is_pre_kernel()) {
|
2024-01-10 11:01:11 +01:00
|
|
|
if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
|
2023-12-12 22:05:09 -08:00
|
|
|
((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
2023-12-12 22:05:09 -08:00
|
|
|
goto clear_bit;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wait until possible async suspend is completed */
|
2024-01-10 11:01:11 +01:00
|
|
|
while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
|
2024-03-27 15:19:39 +01:00
|
|
|
k_event_clear(&pm->event, EVENT_MASK);
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2022-12-06 17:44:24 +01:00
|
|
|
|
2024-03-27 15:19:39 +01:00
|
|
|
k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
|
2022-12-06 17:44:24 +01:00
|
|
|
|
2022-12-06 18:17:05 +01:00
|
|
|
(void)k_sem_take(&pm->lock, K_FOREVER);
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
|
|
|
}
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
/* wake up the device if suspended */
|
2024-01-10 11:01:11 +01:00
|
|
|
if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
|
|
|
|
ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
|
2021-10-21 18:35:38 +02:00
|
|
|
if (ret < 0) {
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
pm->base.state = PM_DEVICE_STATE_ACTIVE;
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2023-12-12 22:05:09 -08:00
|
|
|
clear_bit:
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif
|
2024-01-10 11:01:11 +01:00
|
|
|
atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
unlock:
|
|
|
|
if (!k_is_pre_kernel()) {
|
2022-12-06 18:17:05 +01:00
|
|
|
k_sem_give(&pm->lock);
|
2021-10-21 18:35:38 +02:00
|
|
|
}
|
|
|
|
|
2024-01-10 11:01:11 +01:00
|
|
|
end:
|
2021-11-02 11:27:16 +01:00
|
|
|
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
|
2021-10-21 18:35:38 +02:00
|
|
|
|
|
|
|
return ret;
|
2019-02-28 13:07:58 +05:30
|
|
|
}
|
2021-11-05 19:18:02 -07:00
|
|
|
|
|
|
|
bool pm_device_runtime_is_enabled(const struct device *dev)
|
|
|
|
{
|
2024-01-10 11:01:11 +01:00
|
|
|
struct pm_device_base *pm = dev->pm_base;
|
2021-11-05 19:18:02 -07:00
|
|
|
|
2022-05-14 17:18:54 +10:00
|
|
|
return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
|
2021-11-05 19:18:02 -07:00
|
|
|
}
|
2024-04-23 17:47:31 +01:00
|
|
|
|
|
|
|
int pm_device_runtime_usage(const struct device *dev)
|
|
|
|
{
|
|
|
|
if (!pm_device_runtime_is_enabled(dev)) {
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2024-11-15 15:35:56 +01:00
|
|
|
return dev->pm_base->usage;
|
2024-04-23 17:47:31 +01:00
|
|
|
}
|
2025-03-21 13:05:35 -07:00
|
|
|
|
2025-03-28 22:52:43 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
|
2025-03-21 13:05:35 -07:00
|
|
|
#ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
|
|
|
|
|
|
|
|
static int pm_device_runtime_wq_init(void)
|
|
|
|
{
|
|
|
|
const struct k_work_queue_config cfg = {.name = "PM DEVICE RUNTIME WQ"};
|
|
|
|
|
|
|
|
k_work_queue_init(&pm_device_runtime_wq);
|
|
|
|
|
|
|
|
k_work_queue_start(&pm_device_runtime_wq, pm_device_runtime_stack,
|
|
|
|
K_THREAD_STACK_SIZEOF(pm_device_runtime_stack),
|
|
|
|
CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_PRIO, &cfg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYS_INIT(pm_device_runtime_wq_init, POST_KERNEL,
|
|
|
|
CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_INIT_PRIO);
|
|
|
|
|
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
|
2025-03-28 22:52:43 -07:00
|
|
|
#endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
|