From 07525a3d541ea1d5795063268c16d1e257a13cbf Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Sat, 21 Sep 2019 16:17:23 -0700 Subject: [PATCH] kernel: add arch interface for idle functions k_cpu_idle() and k_cpu_atomic_idle() were being directly implemented by arch code. Rename these implementations to z_arch_cpu_idle() and z_arch_cpu_atomic_idle(), and call them from new inline function definitions in kernel.h. Signed-off-by: Andrew Boie --- arch/arc/core/cpu_idle.S | 10 +++++----- arch/arm/core/cpu_idle.S | 14 +++++++------- arch/arm/include/kernel_arch_func.h | 2 +- arch/nios2/core/cpu_idle.c | 6 +++--- arch/nios2/include/kernel_arch_func.h | 3 --- arch/posix/core/cpuhalt.c | 10 +++++----- arch/riscv/core/cpu_idle.c | 10 +++++----- arch/riscv/include/kernel_arch_func.h | 3 --- arch/x86/core/cpuhalt.c | 8 ++++---- arch/x86/include/ia32/kernel_arch_func.h | 2 +- arch/x86_64/core/x86_64.c | 2 +- arch/xtensa/core/cpu_idle.c | 8 ++++---- arch/xtensa/include/kernel_arch_func.h | 2 -- doc/guides/porting/arch.rst | 16 ++++++++-------- include/arch/arc/v2/misc.h | 2 -- include/arch/arm/misc.h | 2 -- include/arch/x86/ia32/arch.h | 2 -- include/kernel.h | 13 +++++++++++-- soc/posix/inf_clock/soc.c | 6 +++--- soc/riscv/riscv-privilege/common/idle.c | 6 +++--- 20 files changed, 61 insertions(+), 66 deletions(-) diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S index f3660d67a3e..8efdd20eb5e 100644 --- a/arch/arc/core/cpu_idle.S +++ b/arch/arc/core/cpu_idle.S @@ -17,8 +17,8 @@ #include #include -GTEXT(k_cpu_idle) -GTEXT(k_cpu_atomic_idle) +GTEXT(z_arch_cpu_idle) +GTEXT(z_arch_cpu_atomic_idle) GDATA(k_cpu_sleep_mode) SECTION_VAR(BSS, k_cpu_sleep_mode) @@ -33,7 +33,7 @@ SECTION_VAR(BSS, k_cpu_sleep_mode) * void nanCpuIdle(void) */ -SECTION_FUNC(TEXT, k_cpu_idle) +SECTION_FUNC(TEXT, z_arch_cpu_idle) #ifdef CONFIG_TRACING push_s blink @@ -52,9 +52,9 @@ SECTION_FUNC(TEXT, k_cpu_idle) * * This function exits with interrupts restored to . * - * void k_cpu_atomic_idle(unsigned int key) + * void z_arch_cpu_atomic_idle(unsigned int key) */ -SECTION_FUNC(TEXT, k_cpu_atomic_idle) +SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) #ifdef CONFIG_TRACING push_s blink diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index c279bf53291..8933e8ae1aa 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -21,8 +21,8 @@ _ASM_FILE_PROLOGUE GTEXT(z_CpuIdleInit) -GTEXT(k_cpu_idle) -GTEXT(k_cpu_atomic_idle) +GTEXT(z_arch_cpu_idle) +GTEXT(z_arch_cpu_atomic_idle) #if defined(CONFIG_CPU_CORTEX_M) #define _SCB_SCR 0xE000ED10 @@ -68,10 +68,10 @@ SECTION_FUNC(TEXT, z_CpuIdleInit) * * C function prototype: * - * void k_cpu_idle (void); + * void z_arch_cpu_idle (void); */ -SECTION_FUNC(TEXT, k_cpu_idle) +SECTION_FUNC(TEXT, z_arch_cpu_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle @@ -103,7 +103,7 @@ SECTION_FUNC(TEXT, k_cpu_idle) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -117,10 +117,10 @@ SECTION_FUNC(TEXT, k_cpu_idle) * * C function prototype: * - * void k_cpu_atomic_idle (unsigned int key); + * void z_arch_cpu_atomic_idle (unsigned int key); */ -SECTION_FUNC(TEXT, k_cpu_atomic_idle) +SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle) #ifdef CONFIG_TRACING push {r0, lr} bl sys_trace_idle diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h index 034c6087465..7ed71ff582d 100644 --- a/arch/arm/include/kernel_arch_func.h +++ b/arch/arm/include/kernel_arch_func.h @@ -141,7 +141,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value) thread->arch.swap_return_value = value; } -extern void k_cpu_atomic_idle(unsigned int key); +extern void z_arch_cpu_atomic_idle(unsigned int key); #define z_arch_is_in_isr() z_IsInIsr() diff --git a/arch/nios2/core/cpu_idle.c b/arch/nios2/core/cpu_idle.c index 8368be5e26e..d895fcc9317 100644 --- a/arch/nios2/core/cpu_idle.c +++ b/arch/nios2/core/cpu_idle.c @@ -17,7 +17,7 @@ * * @return N/A */ -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { /* Do nothing but unconditionally unlock interrupts and return to the * caller. This CPU does not have any kind of power saving instruction. @@ -30,7 +30,7 @@ void k_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -42,7 +42,7 @@ void k_cpu_idle(void) * * @return N/A */ -void k_cpu_atomic_idle(unsigned int key) +void z_arch_cpu_atomic_idle(unsigned int key) { /* Do nothing but restore IRQ state. This CPU does not have any * kind of power saving instruction. diff --git a/arch/nios2/include/kernel_arch_func.h b/arch/nios2/include/kernel_arch_func.h index 693d0fb41aa..861f0f417da 100644 --- a/arch/nios2/include/kernel_arch_func.h +++ b/arch/nios2/include/kernel_arch_func.h @@ -26,9 +26,6 @@ extern "C" { #ifndef _ASMLANGUAGE -void k_cpu_idle(void); -void k_cpu_atomic_idle(unsigned int key); - static ALWAYS_INLINE void kernel_arch_init(void) { _kernel.irq_stack = diff --git a/arch/posix/core/cpuhalt.c b/arch/posix/core/cpuhalt.c index 4d27a8143ba..44963d4c90e 100644 --- a/arch/posix/core/cpuhalt.c +++ b/arch/posix/core/cpuhalt.c @@ -10,11 +10,11 @@ * This module provides: * * An implementation of the architecture-specific - * k_cpu_idle() primitive required by the kernel idle loop component. + * z_arch_cpu_idle() primitive required by the kernel idle loop component. * It can be called within an implementation of _sys_power_save_idle(), * which is provided for the kernel by the platform. * - * An implementation of k_cpu_atomic_idle(), which + * An implementation of z_arch_cpu_atomic_idle(), which * atomically re-enables interrupts and enters low power mode. * * A weak stub for sys_arch_reboot(), which does nothing @@ -36,7 +36,7 @@ * * @return N/A */ -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { sys_trace_idle(); posix_irq_full_unlock(); @@ -48,7 +48,7 @@ void k_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -62,7 +62,7 @@ void k_cpu_idle(void) * * @return N/A */ -void k_cpu_atomic_idle(unsigned int key) +void z_arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); posix_atomic_halt_cpu(key); diff --git a/arch/riscv/core/cpu_idle.c b/arch/riscv/core/cpu_idle.c index babab6a70f0..b1c97c7029d 100644 --- a/arch/riscv/core/cpu_idle.c +++ b/arch/riscv/core/cpu_idle.c @@ -9,11 +9,11 @@ /* * In RISC-V there is no conventional way to handle CPU power save. * Each RISC-V SOC handles it in its own way. - * Hence, by default, k_cpu_idle and k_cpu_atomic_idle functions just + * Hence, by default, z_arch_cpu_idle and z_arch_cpu_atomic_idle functions just * unlock interrupts and return to the caller, without issuing any CPU power * saving instruction. * - * Nonetheless, define the default k_cpu_idle and k_cpu_atomic_idle + * Nonetheless, define the default z_arch_cpu_idle and z_arch_cpu_atomic_idle * functions as weak functions, so that they can be replaced at the SOC-level. */ @@ -27,7 +27,7 @@ * * @return N/A */ -void __weak k_cpu_idle(void) +void __weak z_arch_cpu_idle(void) { irq_unlock(SOC_MSTATUS_IEN); } @@ -37,7 +37,7 @@ void __weak k_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -49,7 +49,7 @@ void __weak k_cpu_idle(void) * * @return N/A */ -void __weak k_cpu_atomic_idle(unsigned int key) +void __weak z_arch_cpu_atomic_idle(unsigned int key) { irq_unlock(key); } diff --git a/arch/riscv/include/kernel_arch_func.h b/arch/riscv/include/kernel_arch_func.h index f6264411b91..6a7e6acdb6a 100644 --- a/arch/riscv/include/kernel_arch_func.h +++ b/arch/riscv/include/kernel_arch_func.h @@ -22,9 +22,6 @@ extern "C" { #endif #ifndef _ASMLANGUAGE -void k_cpu_idle(void); -void k_cpu_atomic_idle(unsigned int key); - static ALWAYS_INLINE void kernel_arch_init(void) { _kernel.irq_stack = diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c index 317608175e2..5afa016a33a 100644 --- a/arch/x86/core/cpuhalt.c +++ b/arch/x86/core/cpuhalt.c @@ -18,7 +18,7 @@ * * @return N/A */ -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { sys_trace_idle(); #if defined(CONFIG_BOOT_TIME_MEASUREMENT) @@ -35,7 +35,7 @@ void k_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -48,7 +48,7 @@ void k_cpu_idle(void) * @return N/A */ -void k_cpu_atomic_idle(unsigned int key) +void z_arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); @@ -63,7 +63,7 @@ void k_cpu_atomic_idle(unsigned int key) * external, maskable interrupts after the next instruction is * executed." * - * Thus the IA-32 implementation of k_cpu_atomic_idle() will + * Thus the IA-32 implementation of z_arch_cpu_atomic_idle() will * atomically re-enable interrupts and enter a low-power mode. */ "hlt\n\t"); diff --git a/arch/x86/include/ia32/kernel_arch_func.h b/arch/x86/include/ia32/kernel_arch_func.h index 9da0b074180..4bad01fbe15 100644 --- a/arch/x86/include/ia32/kernel_arch_func.h +++ b/arch/x86/include/ia32/kernel_arch_func.h @@ -67,7 +67,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value) *(unsigned int *)(thread->callee_saved.esp) = value; } -extern void k_cpu_atomic_idle(unsigned int key); +extern void z_arch_cpu_atomic_idle(unsigned int key); #ifdef CONFIG_USERSPACE extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, diff --git a/arch/x86_64/core/x86_64.c b/arch/x86_64/core/x86_64.c index 9f193de4101..52d14e6a126 100644 --- a/arch/x86_64/core/x86_64.c +++ b/arch/x86_64/core/x86_64.c @@ -43,7 +43,7 @@ void z_arch_new_thread(struct k_thread *t, k_thread_stack_t *stack, nargs); } -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile("sti; hlt"); diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c index b0fce09e3fe..83d1355769e 100644 --- a/arch/xtensa/core/cpu_idle.c +++ b/arch/xtensa/core/cpu_idle.c @@ -10,9 +10,9 @@ * * This function always exits with interrupts unlocked. * - * void k_cpu_idle(void) + * void z_arch_cpu_idle(void) */ -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { sys_trace_idle(); __asm__ volatile ("waiti 0"); @@ -22,9 +22,9 @@ void k_cpu_idle(void) * * This function exits with interrupts restored to . * - * void k_cpu_atomic_idle(unsigned int key) + * void z_arch_cpu_atomic_idle(unsigned int key) */ -void k_cpu_atomic_idle(unsigned int key) +void z_arch_cpu_atomic_idle(unsigned int key) { sys_trace_idle(); __asm__ volatile ("waiti 0\n\t" diff --git a/arch/xtensa/include/kernel_arch_func.h b/arch/xtensa/include/kernel_arch_func.h index 91e21db811a..6a02fbf35be 100644 --- a/arch/xtensa/include/kernel_arch_func.h +++ b/arch/xtensa/include/kernel_arch_func.h @@ -83,8 +83,6 @@ static ALWAYS_INLINE void kernel_arch_init(void) #endif } -extern void k_cpu_atomic_idle(unsigned int key); - #ifdef __cplusplus } #endif diff --git a/doc/guides/porting/arch.rst b/doc/guides/porting/arch.rst index ab2da03cec4..ae51b57e85e 100644 --- a/doc/guides/porting/arch.rst +++ b/doc/guides/porting/arch.rst @@ -407,14 +407,14 @@ CPU Idling/Power Management *************************** The kernel provides support for CPU power management with two functions: -:c:func:`k_cpu_idle` and :c:func:`k_cpu_atomic_idle`. +:c:func:`z_arch_cpu_idle` and :c:func:`z_arch_cpu_atomic_idle`. -:c:func:`k_cpu_idle` can be as simple as calling the power saving instruction -for the architecture with interrupts unlocked, for example :code:`hlt` on x86, -:code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. This function can be -called in a loop within a context that does not care if it get interrupted or -not by an interrupt before going to sleep. There are basically two scenarios -when it is correct to use this function: +:c:func:`z_arch_cpu_idle` can be as simple as calling the power saving +instruction for the architecture with interrupts unlocked, for example +:code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. +This function can be called in a loop within a context that does not care if it +get interrupted or not by an interrupt before going to sleep. There are +basically two scenarios when it is correct to use this function: * In a single-threaded system, in the only thread when the thread is not used for doing real work after initialization, i.e. it is sitting in a loop doing @@ -422,7 +422,7 @@ when it is correct to use this function: * In the idle thread. -:c:func:`k_cpu_atomic_idle`, on the other hand, must be able to atomically +:c:func:`z_arch_cpu_atomic_idle`, on the other hand, must be able to atomically re-enable interrupts and invoke the power saving instruction. It can thus be used in real application code, again in single-threaded systems. diff --git a/include/arch/arc/v2/misc.h b/include/arch/arc/v2/misc.h index 00fa67daac3..7e90b2ece6e 100644 --- a/include/arch/arc/v2/misc.h +++ b/include/arch/arc/v2/misc.h @@ -20,8 +20,6 @@ extern "C" { #ifndef _ASMLANGUAGE extern unsigned int k_cpu_sleep_mode; -extern void k_cpu_idle(void); -extern void k_cpu_atomic_idle(unsigned int key); extern u32_t z_timer_cycle_get_32(void); #define z_arch_k_cycle_get_32() z_timer_cycle_get_32() diff --git a/include/arch/arm/misc.h b/include/arch/arm/misc.h index 6bccf2e1765..02e37a359c1 100644 --- a/include/arch/arm/misc.h +++ b/include/arch/arm/misc.h @@ -19,8 +19,6 @@ extern "C" { #endif #ifndef _ASMLANGUAGE -extern void k_cpu_idle(void); - extern u32_t z_timer_cycle_get_32(void); #define z_arch_k_cycle_get_32() z_timer_cycle_get_32() diff --git a/include/arch/x86/ia32/arch.h b/include/arch/x86/ia32/arch.h index f8f72eb2db0..d2116c57880 100644 --- a/include/arch/x86/ia32/arch.h +++ b/include/arch/x86/ia32/arch.h @@ -399,8 +399,6 @@ extern void k_float_enable(struct k_thread *thread, unsigned int options); * @} */ -extern void k_cpu_idle(void); - #ifdef CONFIG_X86_ENABLE_TSS extern struct task_state_segment _main_tss; #endif diff --git a/include/kernel.h b/include/kernel.h index 02987822786..d3a3de4e48e 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -4630,6 +4630,9 @@ extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state); * @{ */ +extern void z_arch_cpu_idle(void); +extern void z_arch_cpu_atomic_idle(unsigned int key); + /** * @brief Make the CPU idle. * @@ -4643,7 +4646,10 @@ extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state); * @return N/A * @req K-CPU-IDLE-001 */ -extern void k_cpu_idle(void); +static inline void k_cpu_idle(void) +{ + z_arch_cpu_idle(); +} /** * @brief Make the CPU idle in an atomic fashion. @@ -4656,7 +4662,10 @@ extern void k_cpu_idle(void); * @return N/A * @req K-CPU-IDLE-002 */ -extern void k_cpu_atomic_idle(unsigned int key); +static inline void k_cpu_atomic_idle(unsigned int key) +{ + z_arch_cpu_atomic_idle(key); +} /** * @} diff --git a/soc/posix/inf_clock/soc.c b/soc/posix/inf_clock/soc.c index eda15ff3a9c..ab1ec61bcf1 100644 --- a/soc/posix/inf_clock/soc.c +++ b/soc/posix/inf_clock/soc.c @@ -9,7 +9,7 @@ * clock. * * Therefore, the code will always run until completion after each interrupt, - * after which k_cpu_idle() will be called releasing the execution back to the + * after which z_arch_cpu_idle() will be called releasing the execution back to the * HW models. * * The HW models raising an interrupt will "awake the cpu" by calling @@ -125,7 +125,7 @@ void posix_interrupt_raised(void) /** - * Normally called from k_cpu_idle(): + * Normally called from z_arch_cpu_idle(): * the idle loop will call this function to set the CPU to "sleep". * Others may also call this function with care. The CPU will be set to sleep * until some interrupt awakes it. @@ -156,7 +156,7 @@ void posix_halt_cpu(void) /** - * Implementation of k_cpu_atomic_idle() for this SOC + * Implementation of z_arch_cpu_atomic_idle() for this SOC */ void posix_atomic_halt_cpu(unsigned int imask) { diff --git a/soc/riscv/riscv-privilege/common/idle.c b/soc/riscv/riscv-privilege/common/idle.c index 1571fb81567..592f2725d8f 100644 --- a/soc/riscv/riscv-privilege/common/idle.c +++ b/soc/riscv/riscv-privilege/common/idle.c @@ -31,7 +31,7 @@ static ALWAYS_INLINE void riscv_idle(unsigned int key) * * @return N/A */ -void k_cpu_idle(void) +void z_arch_cpu_idle(void) { riscv_idle(SOC_MSTATUS_IEN); } @@ -41,7 +41,7 @@ void k_cpu_idle(void) * @brief Atomically re-enable interrupts and enter low power mode * * INTERNAL - * The requirements for k_cpu_atomic_idle() are as follows: + * The requirements for z_arch_cpu_atomic_idle() are as follows: * 1) The enablement of interrupts and entering a low-power mode needs to be * atomic, i.e. there should be no period of time where interrupts are * enabled before the processor enters a low-power mode. See the comments @@ -53,7 +53,7 @@ void k_cpu_idle(void) * * @return N/A */ -void k_cpu_atomic_idle(unsigned int key) +void z_arch_cpu_atomic_idle(unsigned int key) { riscv_idle(key); }