pm: device: runtime: refactor API
This patch refactors the runtime API to make it more clear and simple. Relevant changes: - API uses the action callback in a direct manner, avoiding unnecessary overhead. - API documentation has been improved to include detailed return error codes. - pm_runtime_disable() is now synchronous (to simplify possible error paths) and returns error in case it fails. It is also safe to disable in pre-kernel now. - pm_runtime_put(_async)() will return -EALREADY if called with usage count at zero (result of an unbalanced get/put call sequence) - A transitional state has been added back. This makes code more readable, and avoids using atomics (not required). TODO: - Solve in a better manner the asynchronous suspend error path (now "solved" using asserts). Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
This commit is contained in:
parent
e3fece5241
commit
eed8bd9c61
4 changed files with 243 additions and 219 deletions
|
@ -35,6 +35,8 @@ enum pm_device_state {
|
|||
* Device context may be lost.
|
||||
*/
|
||||
PM_DEVICE_STATE_SUSPENDED,
|
||||
/** Device is being suspended. */
|
||||
PM_DEVICE_STATE_SUSPENDING,
|
||||
/**
|
||||
* Device is turned off (power removed).
|
||||
*
|
||||
|
@ -55,8 +57,6 @@ enum pm_device_flag {
|
|||
PM_DEVICE_FLAGS_WS_CAPABLE,
|
||||
/** Indicates if the device is being used as wakeup source. */
|
||||
PM_DEVICE_FLAGS_WS_ENABLED,
|
||||
/** Indicates that the device is changing its state */
|
||||
PM_DEVICE_FLAG_TRANSITIONING,
|
||||
/** Number of flags (internal use only). */
|
||||
PM_DEVICE_FLAG_COUNT
|
||||
};
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Copyright (c) 2015 Intel Corporation.
|
||||
* Copyright (c) 2021 Nordic Semiconductor ASA
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
@ -8,107 +9,117 @@
|
|||
#define ZEPHYR_INCLUDE_PM_DEVICE_RUNTIME_H_
|
||||
|
||||
#include <device.h>
|
||||
#include <kernel.h>
|
||||
#include <sys/atomic.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Runtime Power Management API
|
||||
*
|
||||
* @defgroup runtime_power_management_api Runtime Power Management API
|
||||
* @defgroup runtime_power_management_api Device Runtime Power Management API
|
||||
* @ingroup power_management_api
|
||||
* @{
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PM_DEVICE_RUNTIME
|
||||
|
||||
#if defined(CONFIG_PM_DEVICE_RUNTIME) || defined(__DOXYGEN__)
|
||||
/**
|
||||
* @brief Enable device runtime PM
|
||||
*
|
||||
* Called by a device driver to enable device runtime power management.
|
||||
* The device might be asynchronously suspended if runtime PM is enabled
|
||||
* when the device is not use.
|
||||
*
|
||||
* @funcprops \pre_kernel_ok
|
||||
*
|
||||
* @param dev Pointer to device structure of the specific device driver
|
||||
* the caller is interested in.
|
||||
* @param dev Device instance.
|
||||
*/
|
||||
void pm_device_enable(const struct device *dev);
|
||||
|
||||
/**
|
||||
* @brief Disable device runtime PM
|
||||
*
|
||||
* Called by a device driver to disable device runtime power management.
|
||||
* The device might be asynchronously resumed if runtime PM is disabled
|
||||
* If the device is currently suspended it will be resumed.
|
||||
*
|
||||
* @funcprops \pre_kernel_ok
|
||||
*
|
||||
* @param dev Pointer to device structure of the specific device driver
|
||||
* the caller is interested in.
|
||||
* @param dev Device instance.
|
||||
*
|
||||
* @retval 0 If the device runtime PM is disabled successfully.
|
||||
* @retval -ENOSYS If the functionality is not available.
|
||||
* @retval -errno Other negative errno, result of resuming the device.
|
||||
*/
|
||||
void pm_device_disable(const struct device *dev);
|
||||
int pm_device_disable(const struct device *dev);
|
||||
|
||||
/**
|
||||
* @brief Call device resume synchronously based on usage count
|
||||
* @brief Resume a device based on usage count.
|
||||
*
|
||||
* Called by a device driver to mark the device as being used. It
|
||||
* will bring up or resume the device if it is in suspended state
|
||||
* based on the device usage count. This call is blocked until the
|
||||
* device PM state is changed to resume.
|
||||
* This function will resume the device if the device is suspended (usage count
|
||||
* equal to 0). In case of a resume failure, usage count and device state will
|
||||
* be left unchanged. In all other cases, usage count will be incremented.
|
||||
*
|
||||
* @param dev Pointer to device structure of the specific device driver
|
||||
* the caller is interested in.
|
||||
* @retval 0 If successful.
|
||||
* @retval Errno Negative errno code if failure.
|
||||
* If the device is still being suspended as a result of calling
|
||||
* pm_device_put_async(), this function will wait for the operation to finish to
|
||||
* then resume the device.
|
||||
*
|
||||
* @funcprops \pre_kernel_ok
|
||||
*
|
||||
* @param dev Device instance.
|
||||
*
|
||||
* @retval 0 If the device has been resumed successfully.
|
||||
* @retval -ENOSTUP If runtime PM is not enabled for the device.
|
||||
* @retval -ENOSYS If the functionality is not available.
|
||||
* @retval -errno Other negative errno, result of the PM action callback.
|
||||
*/
|
||||
int pm_device_get(const struct device *dev);
|
||||
|
||||
/**
|
||||
* @brief Call device suspend asynchronously based on usage count
|
||||
* @brief Suspend a device based on usage count.
|
||||
*
|
||||
* Called by a device driver to mark the device as being released.
|
||||
* This API asynchronously put the device to suspend state if
|
||||
* it not already in suspended state.
|
||||
* This function will suspend the device if the device is no longer required
|
||||
* (usage count equal to 0). In case of suspend failure, usage count and device
|
||||
* state will be left unchanged. In all other cases, usage count will be
|
||||
* decremented (down to 0).
|
||||
*
|
||||
* @funcprops \isr_ok, \pre_kernel_ok
|
||||
* @funcprops \pre_kernel_ok
|
||||
*
|
||||
* @param dev Pointer to device structure of the specific device driver
|
||||
* the caller is interested in.
|
||||
* @retval 0 If successfully queued the Async request. If queued,
|
||||
* the caller need to wait on the poll event linked to device pm
|
||||
* signal mechanism to know the completion of suspend operation.
|
||||
* @retval Errno Negative errno code if failure.
|
||||
*/
|
||||
int pm_device_put_async(const struct device *dev);
|
||||
|
||||
/**
|
||||
* @brief Call device suspend synchronously based on usage count
|
||||
* @param dev Device instance.
|
||||
*
|
||||
* Called by a device driver to mark the device as being released. It
|
||||
* will put the device to suspended state if is is in active state
|
||||
* based on the device usage count. This call is blocked until the
|
||||
* device PM state is changed to resume.
|
||||
* @retval 0 If device has been suspended successfully.
|
||||
* @retval -ENOSTUP If runtime PM is not enabled for the device.
|
||||
* @retval -ENOSYS If the functionality is not available.
|
||||
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
||||
* calls are unbalanced).
|
||||
* @retval -errno Other negative errno, result of the action callback.
|
||||
*
|
||||
* @param dev Pointer to device structure of the specific device driver
|
||||
* the caller is interested in.
|
||||
* @retval 0 If successful.
|
||||
* @retval Errno Negative errno code if failure.
|
||||
* @see pm_device_put_async()
|
||||
*/
|
||||
int pm_device_put(const struct device *dev);
|
||||
|
||||
/**
|
||||
* @brief Suspend a device based on usage count (asynchronously).
|
||||
*
|
||||
* This function will schedule the device suspension if the device is no longer
|
||||
* required (usage count equal to 0). In all other cases, usage count will be
|
||||
* decremented (down to 0).
|
||||
*
|
||||
* @note Asynchronous operations are not supported when in pre-kernel mode. In
|
||||
* this case, the function will be blocking (equivalent to pm_device_put()).
|
||||
*
|
||||
* @funcprops \pre_kernel_ok, \async
|
||||
*
|
||||
* @param dev Device instance.
|
||||
*
|
||||
* @retval 0 If device has queued for suspend.
|
||||
* @retval -ENOSTUP If runtime PM is not enabled for the device.
|
||||
* @retval -ENOSYS If the functionality is not available.
|
||||
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
||||
* calls are unbalanced).
|
||||
*
|
||||
* @see pm_device_put()
|
||||
*/
|
||||
int pm_device_put_async(const struct device *dev);
|
||||
|
||||
#else
|
||||
static inline void pm_device_enable(const struct device *dev) { }
|
||||
static inline void pm_device_disable(const struct device *dev) { }
|
||||
static inline int pm_device_disable(const struct device *dev) { return -ENOSYS; }
|
||||
static inline int pm_device_get(const struct device *dev) { return -ENOSYS; }
|
||||
static inline int pm_device_get_async(const struct device *dev) { return -ENOSYS; }
|
||||
static inline int pm_device_put(const struct device *dev) { return -ENOSYS; }
|
||||
static inline int pm_device_put_async(const struct device *dev) { return -ENOSYS; }
|
||||
static inline int pm_device_wait(const struct device *dev,
|
||||
k_timeout_t timeout) { return -ENOSYS; }
|
||||
#endif
|
||||
|
||||
/** @} */
|
||||
|
|
|
@ -35,10 +35,6 @@ int pm_device_state_set(const struct device *dev,
|
|||
return -ENOSYS;
|
||||
}
|
||||
|
||||
if (atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
case PM_DEVICE_STATE_SUSPENDED:
|
||||
if (pm->state == PM_DEVICE_STATE_SUSPENDED) {
|
||||
|
|
|
@ -1,169 +1,169 @@
|
|||
/*
|
||||
* Copyright (c) 2018 Intel Corporation.
|
||||
* Copyright (c) 2021 Nordic Semiconductor ASA.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <sys/__assert.h>
|
||||
#include <pm/device.h>
|
||||
#include <pm/device_runtime.h>
|
||||
#include <sys/__assert.h>
|
||||
|
||||
#include <logging/log.h>
|
||||
LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
|
||||
|
||||
/* Device PM request type */
|
||||
#define PM_DEVICE_SYNC BIT(0)
|
||||
#define PM_DEVICE_ASYNC BIT(1)
|
||||
|
||||
static void pm_device_runtime_state_set(struct pm_device *pm)
|
||||
{
|
||||
const struct device *dev = pm->dev;
|
||||
int ret = 0;
|
||||
|
||||
/* Clear transitioning flags */
|
||||
atomic_clear_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
|
||||
|
||||
switch (pm->state) {
|
||||
case PM_DEVICE_STATE_ACTIVE:
|
||||
if ((pm->usage == 0) && pm->enable) {
|
||||
ret = pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
|
||||
}
|
||||
break;
|
||||
case PM_DEVICE_STATE_SUSPENDED:
|
||||
if ((pm->usage > 0) || !pm->enable) {
|
||||
ret = pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOG_ERR("Invalid state!!\n");
|
||||
}
|
||||
|
||||
__ASSERT(ret == 0, "Set Power state error");
|
||||
|
||||
/*
|
||||
* This function returns the number of woken threads on success. There
|
||||
* is nothing we can do with this information. Just ignoring it.
|
||||
/**
|
||||
* @brief Suspend a device
|
||||
*
|
||||
* @note Asynchronous operations are not supported when in pre-kernel mode. In
|
||||
* this case, the async flag will be always forced to be false, and so the
|
||||
* the function will be blocking.
|
||||
*
|
||||
* @funcprops \pre_kernel_ok
|
||||
*
|
||||
* @param dev Device instance.
|
||||
* @param async Perform operation asynchronously.
|
||||
*
|
||||
* @retval 0 If device has been suspended or queued for suspend.
|
||||
* @retval -ENOSTUP If runtime PM is not enabled for the device.
|
||||
* @retval -EALREADY If device is already suspended (can only happen if get/put
|
||||
* calls are unbalanced).
|
||||
* @retval -errno Other negative errno, result of the action callback.
|
||||
*/
|
||||
(void)k_condvar_broadcast(&pm->condvar);
|
||||
}
|
||||
|
||||
static void pm_work_handler(struct k_work *work)
|
||||
{
|
||||
struct pm_device *pm = CONTAINER_OF(work,
|
||||
struct pm_device, work);
|
||||
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
pm_device_runtime_state_set(pm);
|
||||
(void)k_mutex_unlock(&pm->lock);
|
||||
}
|
||||
|
||||
static int pm_device_request(const struct device *dev,
|
||||
enum pm_device_state state, uint32_t pm_flags)
|
||||
static int runtime_suspend(const struct device *dev, bool async)
|
||||
{
|
||||
int ret = 0;
|
||||
struct pm_device *pm = dev->pm;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_request, dev, state);
|
||||
|
||||
__ASSERT((state == PM_DEVICE_STATE_ACTIVE) ||
|
||||
(state == PM_DEVICE_STATE_SUSPENDED),
|
||||
"Invalid device PM state requested");
|
||||
|
||||
if (k_is_pre_kernel()) {
|
||||
if (state == PM_DEVICE_STATE_ACTIVE) {
|
||||
pm->usage++;
|
||||
async = false;
|
||||
} else {
|
||||
pm->usage--;
|
||||
}
|
||||
|
||||
/* If we are being called before the kernel was initialized
|
||||
* we can assume that the system took care of initialized
|
||||
* devices properly. It means that all dependencies were
|
||||
* satisfied and this call just incremented the reference count
|
||||
* for this device.
|
||||
*/
|
||||
|
||||
/* Unfortunately this is not what is happening yet. There are
|
||||
* cases, for example, like the pinmux being initialized before
|
||||
* the gpio. Lets just power on/off the device.
|
||||
*/
|
||||
if (pm->usage == 1) {
|
||||
(void)pm_device_state_set(dev, PM_DEVICE_STATE_ACTIVE);
|
||||
} else if (pm->usage == 0) {
|
||||
(void)pm_device_state_set(dev, PM_DEVICE_STATE_SUSPENDED);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
}
|
||||
|
||||
if (!pm->enable) {
|
||||
ret = -ENOTSUP;
|
||||
goto out_unlock;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (state == PM_DEVICE_STATE_ACTIVE) {
|
||||
pm->usage++;
|
||||
if (pm->usage > 1) {
|
||||
goto out_unlock;
|
||||
}
|
||||
} else {
|
||||
/* Check if it is already 0 to avoid an underflow */
|
||||
if (pm->usage == 0) {
|
||||
goto out_unlock;
|
||||
if (pm->usage == 0U) {
|
||||
LOG_WRN("Unbalanced suspend");
|
||||
ret = -EALREADY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pm->usage--;
|
||||
if (pm->usage > 0) {
|
||||
goto out_unlock;
|
||||
}
|
||||
if (pm->usage > 0U) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
||||
/* Return in case of Async request */
|
||||
if (pm_flags & PM_DEVICE_ASYNC) {
|
||||
atomic_set_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING);
|
||||
if (async && !k_is_pre_kernel()) {
|
||||
/* queue suspend */
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDING;
|
||||
(void)k_work_schedule(&pm->work, K_NO_WAIT);
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/* suspend now */
|
||||
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
||||
if (ret < 0) {
|
||||
pm->usage++;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
while ((k_work_delayable_is_pending(&pm->work)) ||
|
||||
atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_TRANSITIONING)) {
|
||||
ret = k_condvar_wait(&pm->condvar, &pm->lock,
|
||||
K_FOREVER);
|
||||
if (ret != 0) {
|
||||
break;
|
||||
}
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||
}
|
||||
|
||||
pm_device_runtime_state_set(pm);
|
||||
unlock:
|
||||
if (!k_is_pre_kernel()) {
|
||||
k_mutex_unlock(&pm->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* pm->state was set in pm_device_runtime_state_set(). As the
|
||||
* device may not have been properly changed to the state or
|
||||
* another thread we check it here before returning.
|
||||
*/
|
||||
ret = state == pm->state ? 0 : -EIO;
|
||||
|
||||
out_unlock:
|
||||
(void)k_mutex_unlock(&pm->lock);
|
||||
out:
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_request, dev, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void runtime_suspend_work(struct k_work *work)
|
||||
{
|
||||
int ret;
|
||||
struct pm_device *pm = CONTAINER_OF(work, struct pm_device, work);
|
||||
|
||||
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
|
||||
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
if (ret == 0) {
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||
}
|
||||
k_condvar_broadcast(&pm->condvar);
|
||||
k_mutex_unlock(&pm->lock);
|
||||
|
||||
__ASSERT(ret == 0, "Could not suspend device (%d)", ret);
|
||||
}
|
||||
|
||||
int pm_device_get(const struct device *dev)
|
||||
{
|
||||
return pm_device_request(dev, PM_DEVICE_STATE_ACTIVE, 0);
|
||||
int ret = 0;
|
||||
struct pm_device *pm = dev->pm;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
|
||||
|
||||
if (!k_is_pre_kernel()) {
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
}
|
||||
|
||||
if (!pm->enable) {
|
||||
ret = -ENOTSUP;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pm->usage++;
|
||||
|
||||
if (!k_is_pre_kernel()) {
|
||||
/* wait until possible async suspend is completed */
|
||||
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
||||
(void)k_condvar_wait(&pm->condvar, &pm->lock, K_FOREVER);
|
||||
}
|
||||
}
|
||||
|
||||
if (pm->usage > 1U) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
|
||||
if (ret < 0) {
|
||||
pm->usage--;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pm->state = PM_DEVICE_STATE_ACTIVE;
|
||||
|
||||
unlock:
|
||||
if (!k_is_pre_kernel()) {
|
||||
k_mutex_unlock(&pm->lock);
|
||||
}
|
||||
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pm_device_put(const struct device *dev)
|
||||
{
|
||||
return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, 0);
|
||||
int ret;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_put, dev);
|
||||
ret = runtime_suspend(dev, false);
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_put, dev, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int pm_device_put_async(const struct device *dev)
|
||||
{
|
||||
return pm_device_request(dev, PM_DEVICE_STATE_SUSPENDED, PM_DEVICE_ASYNC);
|
||||
int ret;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_put_async, dev);
|
||||
ret = runtime_suspend(dev, true);
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_put_async, dev, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void pm_device_enable(const struct device *dev)
|
||||
|
@ -171,56 +171,73 @@ void pm_device_enable(const struct device *dev)
|
|||
struct pm_device *pm = dev->pm;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_enable, dev);
|
||||
if (k_is_pre_kernel()) {
|
||||
pm->dev = dev;
|
||||
if (pm->action_cb != NULL) {
|
||||
pm->enable = true;
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||
k_work_init_delayable(&pm->work, pm_work_handler);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!k_is_pre_kernel()) {
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
if (pm->action_cb == NULL) {
|
||||
pm->enable = false;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (pm->enable) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* lazy init of PM fields */
|
||||
if (pm->dev == NULL) {
|
||||
pm->dev = dev;
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||
k_work_init_delayable(&pm->work, runtime_suspend_work);
|
||||
}
|
||||
|
||||
pm->enable = true;
|
||||
|
||||
/* During the driver init, device can set the
|
||||
* PM state accordingly. For later cases we need
|
||||
* to check the usage and set the device PM state.
|
||||
*/
|
||||
if (!pm->dev) {
|
||||
pm->dev = dev;
|
||||
pm->state = PM_DEVICE_STATE_SUSPENDED;
|
||||
k_work_init_delayable(&pm->work, pm_work_handler);
|
||||
} else {
|
||||
k_work_schedule(&pm->work, K_NO_WAIT);
|
||||
unlock:
|
||||
if (!k_is_pre_kernel()) {
|
||||
k_mutex_unlock(&pm->lock);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
(void)k_mutex_unlock(&pm->lock);
|
||||
out:
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_enable, dev);
|
||||
}
|
||||
|
||||
void pm_device_disable(const struct device *dev)
|
||||
int pm_device_disable(const struct device *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct pm_device *pm = dev->pm;
|
||||
|
||||
SYS_PORT_TRACING_FUNC_ENTER(pm, device_disable, dev);
|
||||
__ASSERT(k_is_pre_kernel() == false, "Device should not be disabled "
|
||||
"before kernel is initialized");
|
||||
|
||||
if (!k_is_pre_kernel()) {
|
||||
(void)k_mutex_lock(&pm->lock, K_FOREVER);
|
||||
if (pm->enable) {
|
||||
}
|
||||
|
||||
if (!pm->enable) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* wait until possible async suspend is completed */
|
||||
if (!k_is_pre_kernel()) {
|
||||
while (pm->state == PM_DEVICE_STATE_SUSPENDING) {
|
||||
(void)k_condvar_wait(&pm->condvar, &pm->lock,
|
||||
K_FOREVER);
|
||||
}
|
||||
}
|
||||
|
||||
/* wake up the device if suspended */
|
||||
if (pm->state == PM_DEVICE_STATE_SUSPENDED) {
|
||||
ret = pm->action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
|
||||
if (ret < 0) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pm->state = PM_DEVICE_STATE_ACTIVE;
|
||||
}
|
||||
|
||||
pm->enable = false;
|
||||
/* Bring up the device before disabling the Idle PM */
|
||||
k_work_schedule(&pm->work, K_NO_WAIT);
|
||||
|
||||
unlock:
|
||||
if (!k_is_pre_kernel()) {
|
||||
k_mutex_unlock(&pm->lock);
|
||||
}
|
||||
(void)k_mutex_unlock(&pm->lock);
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev);
|
||||
|
||||
SYS_PORT_TRACING_FUNC_EXIT(pm, device_disable, dev, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue