kernel: add arch interface for idle functions

k_cpu_idle() and k_cpu_atomic_idle() were being directly
implemented by arch code.

Rename these implementations to z_arch_cpu_idle() and
z_arch_cpu_atomic_idle(), and call them from new inline
function definitions in kernel.h.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-21 16:17:23 -07:00 committed by Anas Nashif
commit 07525a3d54
20 changed files with 61 additions and 66 deletions

View file

@ -17,8 +17,8 @@
#include <linker/sections.h> #include <linker/sections.h>
#include <arch/cpu.h> #include <arch/cpu.h>
GTEXT(k_cpu_idle) GTEXT(z_arch_cpu_idle)
GTEXT(k_cpu_atomic_idle) GTEXT(z_arch_cpu_atomic_idle)
GDATA(k_cpu_sleep_mode) GDATA(k_cpu_sleep_mode)
SECTION_VAR(BSS, k_cpu_sleep_mode) SECTION_VAR(BSS, k_cpu_sleep_mode)
@ -33,7 +33,7 @@ SECTION_VAR(BSS, k_cpu_sleep_mode)
* void nanCpuIdle(void) * void nanCpuIdle(void)
*/ */
SECTION_FUNC(TEXT, k_cpu_idle) SECTION_FUNC(TEXT, z_arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push_s blink push_s blink
@ -52,9 +52,9 @@ SECTION_FUNC(TEXT, k_cpu_idle)
* *
* This function exits with interrupts restored to <key>. * This function exits with interrupts restored to <key>.
* *
* void k_cpu_atomic_idle(unsigned int key) * void z_arch_cpu_atomic_idle(unsigned int key)
*/ */
SECTION_FUNC(TEXT, k_cpu_atomic_idle) SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push_s blink push_s blink

View file

@ -21,8 +21,8 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(z_CpuIdleInit) GTEXT(z_CpuIdleInit)
GTEXT(k_cpu_idle) GTEXT(z_arch_cpu_idle)
GTEXT(k_cpu_atomic_idle) GTEXT(z_arch_cpu_atomic_idle)
#if defined(CONFIG_CPU_CORTEX_M) #if defined(CONFIG_CPU_CORTEX_M)
#define _SCB_SCR 0xE000ED10 #define _SCB_SCR 0xE000ED10
@ -68,10 +68,10 @@ SECTION_FUNC(TEXT, z_CpuIdleInit)
* *
* C function prototype: * C function prototype:
* *
* void k_cpu_idle (void); * void z_arch_cpu_idle (void);
*/ */
SECTION_FUNC(TEXT, k_cpu_idle) SECTION_FUNC(TEXT, z_arch_cpu_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
bl sys_trace_idle bl sys_trace_idle
@ -103,7 +103,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -117,10 +117,10 @@ SECTION_FUNC(TEXT, k_cpu_idle)
* *
* C function prototype: * C function prototype:
* *
* void k_cpu_atomic_idle (unsigned int key); * void z_arch_cpu_atomic_idle (unsigned int key);
*/ */
SECTION_FUNC(TEXT, k_cpu_atomic_idle) SECTION_FUNC(TEXT, z_arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
push {r0, lr} push {r0, lr}
bl sys_trace_idle bl sys_trace_idle

View file

@ -141,7 +141,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
thread->arch.swap_return_value = value; thread->arch.swap_return_value = value;
} }
extern void k_cpu_atomic_idle(unsigned int key); extern void z_arch_cpu_atomic_idle(unsigned int key);
#define z_arch_is_in_isr() z_IsInIsr() #define z_arch_is_in_isr() z_IsInIsr()

View file

@ -17,7 +17,7 @@
* *
* @return N/A * @return N/A
*/ */
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
/* Do nothing but unconditionally unlock interrupts and return to the /* Do nothing but unconditionally unlock interrupts and return to the
* caller. This CPU does not have any kind of power saving instruction. * caller. This CPU does not have any kind of power saving instruction.
@ -30,7 +30,7 @@ void k_cpu_idle(void)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -42,7 +42,7 @@ void k_cpu_idle(void)
* *
* @return N/A * @return N/A
*/ */
void k_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
/* Do nothing but restore IRQ state. This CPU does not have any /* Do nothing but restore IRQ state. This CPU does not have any
* kind of power saving instruction. * kind of power saving instruction.

View file

@ -26,9 +26,6 @@ extern "C" {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
void k_cpu_idle(void);
void k_cpu_atomic_idle(unsigned int key);
static ALWAYS_INLINE void kernel_arch_init(void) static ALWAYS_INLINE void kernel_arch_init(void)
{ {
_kernel.irq_stack = _kernel.irq_stack =

View file

@ -10,11 +10,11 @@
* This module provides: * This module provides:
* *
* An implementation of the architecture-specific * An implementation of the architecture-specific
* k_cpu_idle() primitive required by the kernel idle loop component. * z_arch_cpu_idle() primitive required by the kernel idle loop component.
* It can be called within an implementation of _sys_power_save_idle(), * It can be called within an implementation of _sys_power_save_idle(),
* which is provided for the kernel by the platform. * which is provided for the kernel by the platform.
* *
* An implementation of k_cpu_atomic_idle(), which * An implementation of z_arch_cpu_atomic_idle(), which
* atomically re-enables interrupts and enters low power mode. * atomically re-enables interrupts and enters low power mode.
* *
* A weak stub for sys_arch_reboot(), which does nothing * A weak stub for sys_arch_reboot(), which does nothing
@ -36,7 +36,7 @@
* *
* @return N/A * @return N/A
*/ */
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
posix_irq_full_unlock(); posix_irq_full_unlock();
@ -48,7 +48,7 @@ void k_cpu_idle(void)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -62,7 +62,7 @@ void k_cpu_idle(void)
* *
* @return N/A * @return N/A
*/ */
void k_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
posix_atomic_halt_cpu(key); posix_atomic_halt_cpu(key);

View file

@ -9,11 +9,11 @@
/* /*
* In RISC-V there is no conventional way to handle CPU power save. * In RISC-V there is no conventional way to handle CPU power save.
* Each RISC-V SOC handles it in its own way. * Each RISC-V SOC handles it in its own way.
* Hence, by default, k_cpu_idle and k_cpu_atomic_idle functions just * Hence, by default, z_arch_cpu_idle and z_arch_cpu_atomic_idle functions just
* unlock interrupts and return to the caller, without issuing any CPU power * unlock interrupts and return to the caller, without issuing any CPU power
* saving instruction. * saving instruction.
* *
* Nonetheless, define the default k_cpu_idle and k_cpu_atomic_idle * Nonetheless, define the default z_arch_cpu_idle and z_arch_cpu_atomic_idle
* functions as weak functions, so that they can be replaced at the SOC-level. * functions as weak functions, so that they can be replaced at the SOC-level.
*/ */
@ -27,7 +27,7 @@
* *
* @return N/A * @return N/A
*/ */
void __weak k_cpu_idle(void) void __weak z_arch_cpu_idle(void)
{ {
irq_unlock(SOC_MSTATUS_IEN); irq_unlock(SOC_MSTATUS_IEN);
} }
@ -37,7 +37,7 @@ void __weak k_cpu_idle(void)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -49,7 +49,7 @@ void __weak k_cpu_idle(void)
* *
* @return N/A * @return N/A
*/ */
void __weak k_cpu_atomic_idle(unsigned int key) void __weak z_arch_cpu_atomic_idle(unsigned int key)
{ {
irq_unlock(key); irq_unlock(key);
} }

View file

@ -22,9 +22,6 @@ extern "C" {
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
void k_cpu_idle(void);
void k_cpu_atomic_idle(unsigned int key);
static ALWAYS_INLINE void kernel_arch_init(void) static ALWAYS_INLINE void kernel_arch_init(void)
{ {
_kernel.irq_stack = _kernel.irq_stack =

View file

@ -18,7 +18,7 @@
* *
* @return N/A * @return N/A
*/ */
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
#if defined(CONFIG_BOOT_TIME_MEASUREMENT) #if defined(CONFIG_BOOT_TIME_MEASUREMENT)
@ -35,7 +35,7 @@ void k_cpu_idle(void)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -48,7 +48,7 @@ void k_cpu_idle(void)
* @return N/A * @return N/A
*/ */
void k_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
@ -63,7 +63,7 @@ void k_cpu_atomic_idle(unsigned int key)
* external, maskable interrupts after the next instruction is * external, maskable interrupts after the next instruction is
* executed." * executed."
* *
* Thus the IA-32 implementation of k_cpu_atomic_idle() will * Thus the IA-32 implementation of z_arch_cpu_atomic_idle() will
* atomically re-enable interrupts and enter a low-power mode. * atomically re-enable interrupts and enter a low-power mode.
*/ */
"hlt\n\t"); "hlt\n\t");

View file

@ -67,7 +67,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
*(unsigned int *)(thread->callee_saved.esp) = value; *(unsigned int *)(thread->callee_saved.esp) = value;
} }
extern void k_cpu_atomic_idle(unsigned int key); extern void z_arch_cpu_atomic_idle(unsigned int key);
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry, extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,

View file

@ -43,7 +43,7 @@ void z_arch_new_thread(struct k_thread *t, k_thread_stack_t *stack,
nargs); nargs);
} }
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile("sti; hlt"); __asm__ volatile("sti; hlt");

View file

@ -10,9 +10,9 @@
* *
* This function always exits with interrupts unlocked. * This function always exits with interrupts unlocked.
* *
* void k_cpu_idle(void) * void z_arch_cpu_idle(void)
*/ */
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ("waiti 0"); __asm__ volatile ("waiti 0");
@ -22,9 +22,9 @@ void k_cpu_idle(void)
* *
* This function exits with interrupts restored to <key>. * This function exits with interrupts restored to <key>.
* *
* void k_cpu_atomic_idle(unsigned int key) * void z_arch_cpu_atomic_idle(unsigned int key)
*/ */
void k_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
sys_trace_idle(); sys_trace_idle();
__asm__ volatile ("waiti 0\n\t" __asm__ volatile ("waiti 0\n\t"

View file

@ -83,8 +83,6 @@ static ALWAYS_INLINE void kernel_arch_init(void)
#endif #endif
} }
extern void k_cpu_atomic_idle(unsigned int key);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View file

@ -407,14 +407,14 @@ CPU Idling/Power Management
*************************** ***************************
The kernel provides support for CPU power management with two functions: The kernel provides support for CPU power management with two functions:
:c:func:`k_cpu_idle` and :c:func:`k_cpu_atomic_idle`. :c:func:`z_arch_cpu_idle` and :c:func:`z_arch_cpu_atomic_idle`.
:c:func:`k_cpu_idle` can be as simple as calling the power saving instruction :c:func:`z_arch_cpu_idle` can be as simple as calling the power saving
for the architecture with interrupts unlocked, for example :code:`hlt` on x86, instruction for the architecture with interrupts unlocked, for example
:code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. This function can be :code:`hlt` on x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC.
called in a loop within a context that does not care if it get interrupted or This function can be called in a loop within a context that does not care if it
not by an interrupt before going to sleep. There are basically two scenarios get interrupted or not by an interrupt before going to sleep. There are
when it is correct to use this function: basically two scenarios when it is correct to use this function:
* In a single-threaded system, in the only thread when the thread is not used * In a single-threaded system, in the only thread when the thread is not used
for doing real work after initialization, i.e. it is sitting in a loop doing for doing real work after initialization, i.e. it is sitting in a loop doing
@ -422,7 +422,7 @@ when it is correct to use this function:
* In the idle thread. * In the idle thread.
:c:func:`k_cpu_atomic_idle`, on the other hand, must be able to atomically :c:func:`z_arch_cpu_atomic_idle`, on the other hand, must be able to atomically
re-enable interrupts and invoke the power saving instruction. It can thus be re-enable interrupts and invoke the power saving instruction. It can thus be
used in real application code, again in single-threaded systems. used in real application code, again in single-threaded systems.

View file

@ -20,8 +20,6 @@ extern "C" {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
extern unsigned int k_cpu_sleep_mode; extern unsigned int k_cpu_sleep_mode;
extern void k_cpu_idle(void);
extern void k_cpu_atomic_idle(unsigned int key);
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32() #define z_arch_k_cycle_get_32() z_timer_cycle_get_32()

View file

@ -19,8 +19,6 @@ extern "C" {
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
extern void k_cpu_idle(void);
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
#define z_arch_k_cycle_get_32() z_timer_cycle_get_32() #define z_arch_k_cycle_get_32() z_timer_cycle_get_32()

View file

@ -399,8 +399,6 @@ extern void k_float_enable(struct k_thread *thread, unsigned int options);
* @} * @}
*/ */
extern void k_cpu_idle(void);
#ifdef CONFIG_X86_ENABLE_TSS #ifdef CONFIG_X86_ENABLE_TSS
extern struct task_state_segment _main_tss; extern struct task_state_segment _main_tss;
#endif #endif

View file

@ -4630,6 +4630,9 @@ extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state);
* @{ * @{
*/ */
extern void z_arch_cpu_idle(void);
extern void z_arch_cpu_atomic_idle(unsigned int key);
/** /**
* @brief Make the CPU idle. * @brief Make the CPU idle.
* *
@ -4643,7 +4646,10 @@ extern void z_handle_obj_poll_events(sys_dlist_t *events, u32_t state);
* @return N/A * @return N/A
* @req K-CPU-IDLE-001 * @req K-CPU-IDLE-001
*/ */
extern void k_cpu_idle(void); static inline void k_cpu_idle(void)
{
z_arch_cpu_idle();
}
/** /**
* @brief Make the CPU idle in an atomic fashion. * @brief Make the CPU idle in an atomic fashion.
@ -4656,7 +4662,10 @@ extern void k_cpu_idle(void);
* @return N/A * @return N/A
* @req K-CPU-IDLE-002 * @req K-CPU-IDLE-002
*/ */
extern void k_cpu_atomic_idle(unsigned int key); static inline void k_cpu_atomic_idle(unsigned int key)
{
z_arch_cpu_atomic_idle(key);
}
/** /**
* @} * @}

View file

@ -9,7 +9,7 @@
* clock. * clock.
* *
* Therefore, the code will always run until completion after each interrupt, * Therefore, the code will always run until completion after each interrupt,
* after which k_cpu_idle() will be called releasing the execution back to the * after which z_arch_cpu_idle() will be called releasing the execution back to the
* HW models. * HW models.
* *
* The HW models raising an interrupt will "awake the cpu" by calling * The HW models raising an interrupt will "awake the cpu" by calling
@ -125,7 +125,7 @@ void posix_interrupt_raised(void)
/** /**
* Normally called from k_cpu_idle(): * Normally called from z_arch_cpu_idle():
* the idle loop will call this function to set the CPU to "sleep". * the idle loop will call this function to set the CPU to "sleep".
* Others may also call this function with care. The CPU will be set to sleep * Others may also call this function with care. The CPU will be set to sleep
* until some interrupt awakes it. * until some interrupt awakes it.
@ -156,7 +156,7 @@ void posix_halt_cpu(void)
/** /**
* Implementation of k_cpu_atomic_idle() for this SOC * Implementation of z_arch_cpu_atomic_idle() for this SOC
*/ */
void posix_atomic_halt_cpu(unsigned int imask) void posix_atomic_halt_cpu(unsigned int imask)
{ {

View file

@ -31,7 +31,7 @@ static ALWAYS_INLINE void riscv_idle(unsigned int key)
* *
* @return N/A * @return N/A
*/ */
void k_cpu_idle(void) void z_arch_cpu_idle(void)
{ {
riscv_idle(SOC_MSTATUS_IEN); riscv_idle(SOC_MSTATUS_IEN);
} }
@ -41,7 +41,7 @@ void k_cpu_idle(void)
* @brief Atomically re-enable interrupts and enter low power mode * @brief Atomically re-enable interrupts and enter low power mode
* *
* INTERNAL * INTERNAL
* The requirements for k_cpu_atomic_idle() are as follows: * The requirements for z_arch_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be * 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are * atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments * enabled before the processor enters a low-power mode. See the comments
@ -53,7 +53,7 @@ void k_cpu_idle(void)
* *
* @return N/A * @return N/A
*/ */
void k_cpu_atomic_idle(unsigned int key) void z_arch_cpu_atomic_idle(unsigned int key)
{ {
riscv_idle(key); riscv_idle(key);
} }