pm: Deprecate z_pm_save_idle_exit

Deprecate z_pm_save_idle_exit and promote pm_system_resume.

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2024-04-30 16:45:49 -07:00 committed by Carles Cufí
commit 4d85f3d91c
7 changed files with 32 additions and 27 deletions

View file

@ -98,7 +98,7 @@ void _arch_isr_direct_pm(void)
if (_kernel.idle) { if (_kernel.idle) {
_kernel.idle = 0; _kernel.idle = 0;
z_pm_save_idle_exit(); pm_system_resume();
} }
irq_unlock(key); irq_unlock(key);

View file

@ -131,7 +131,7 @@ void _arch_isr_direct_pm(void)
if (_kernel.idle) { if (_kernel.idle) {
_kernel.idle = 0; _kernel.idle = 0;
z_pm_save_idle_exit(); pm_system_resume();
} }
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)

View file

@ -42,7 +42,7 @@ void _isr_wrapper(void)
* idle, this ensures that the calculation and programming of the * idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For * device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle * non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted. In each case, z_pm_save_idle_exit * state is not interrupted. In each case, pm_system_resume
* is called with interrupts disabled. * is called with interrupts disabled.
*/ */
@ -59,7 +59,7 @@ void _isr_wrapper(void)
if (_kernel.idle != 0) { if (_kernel.idle != 0) {
/* clear kernel idle state */ /* clear kernel idle state */
_kernel.idle = 0; _kernel.idle = 0;
z_pm_save_idle_exit(); pm_system_resume();
} }
/* re-enable interrupts */ /* re-enable interrupts */
__enable_irq(); __enable_irq();

View file

@ -112,7 +112,7 @@ void posix_irq_check_idle_exit(void)
{ {
if (_kernel.idle) { if (_kernel.idle) {
_kernel.idle = 0; _kernel.idle = 0;
z_pm_save_idle_exit(); pm_system_resume();
} }
} }
#endif #endif

View file

@ -250,7 +250,7 @@ static inline void arch_irq_direct_pm(void)
{ {
if (_kernel.idle) { if (_kernel.idle) {
_kernel.idle = 0; _kernel.idle = 0;
z_pm_save_idle_exit(); pm_system_resume();
} }
} }

View file

@ -116,8 +116,28 @@ int pm_notifier_unregister(struct pm_notifier *notifier);
*/ */
const struct pm_state_info *pm_state_next_get(uint8_t cpu); const struct pm_state_info *pm_state_next_get(uint8_t cpu);
/**
* @brief Notify exit from kernel sleep.
*
* This function would notify exit from kernel idling if a corresponding
* pm_system_suspend() notification was handled and did not return
* PM_STATE_ACTIVE.
*
* This function should be called from the ISR context of the event
* that caused the exit from kernel idling.
*
* This is required for cpu power states that would require
* interrupts to be enabled while entering low power states. e.g. C1 in x86. In
* those cases, the ISR would be invoked immediately after the event wakes up
* the CPU, before code following the CPU wait, gets a chance to execute. This
* can be ignored if no operation needs to be done at the wake event
* notification.
*/
void pm_system_resume(void);
/** @cond INTERNAL_HIDDEN */ /** @cond INTERNAL_HIDDEN */
void z_pm_save_idle_exit(void); __deprecated void z_pm_save_idle_exit(void);
/** @endcond */ /** @endcond */
/** /**
@ -182,6 +202,11 @@ static inline const struct pm_state_info *pm_state_next_get(uint8_t cpu)
static inline void z_pm_save_idle_exit(void) static inline void z_pm_save_idle_exit(void)
{ {
} }
static inline void pm_system_resume(void)
{
}
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -243,26 +243,6 @@ void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
*/ */
bool pm_system_suspend(int32_t ticks); bool pm_system_suspend(int32_t ticks);
/**
* Notify exit from kernel idling after PM operations
*
* This function would notify exit from kernel idling if a corresponding
* pm_system_suspend() notification was handled and did not return
* PM_STATE_ACTIVE.
*
* This function would be called from the ISR context of the event
* that caused the exit from kernel idling. This will be called immediately
* after interrupts are enabled. This is called to give a chance to do
* any operations before the kernel would switch tasks or processes nested
* interrupts. This is required for cpu low power states that would require
* interrupts to be enabled while entering low power states. e.g. C1 in x86. In
* those cases, the ISR would be invoked immediately after the event wakes up
* the CPU, before code following the CPU wait, gets a chance to execute. This
* can be ignored if no operation needs to be done at the wake event
* notification.
*/
void pm_system_resume(void);
#endif /* CONFIG_PM */ #endif /* CONFIG_PM */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM