kernel: add k_cpu_idle/k_cpu_atomic_idle()

nano_cpu_idle/nano_cpu_atomic_idle were not ported to the unified
kernel, and only the old APIs were available. There was no real impact
since, in the unified kernel, only the idle thread should really be
doing power management. However, with a single-threaded kernel, these
functions can be useful again.

The kernel internals now make use of these APIs instead of the legacy
ones.

Change-Id: Ie8a6396ba378d3ddda27b8dd32fa4711bf53eb36
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-12-14 13:04:36 -05:00 committed by Benjamin Walsh
commit c3a2bbba16
21 changed files with 247 additions and 48 deletions

View file

@ -29,11 +29,11 @@
#include <sections.h>
#include <arch/cpu.h>
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
GDATA(nano_cpu_sleep_mode)
GTEXT(k_cpu_idle)
GTEXT(k_cpu_atomic_idle)
GDATA(k_cpu_sleep_mode)
SECTION_VAR(BSS, nano_cpu_sleep_mode)
SECTION_VAR(BSS, k_cpu_sleep_mode)
.word 0
/*
@ -44,7 +44,7 @@ SECTION_VAR(BSS, nano_cpu_sleep_mode)
* void nanCpuIdle(void)
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push_s blink
@ -52,7 +52,7 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
pop_s blink
#endif
ld r1, [nano_cpu_sleep_mode]
ld r1, [k_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s [blink]
@ -63,9 +63,9 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
*
* This function exits with interrupts restored to <key>.
*
* void nano_cpu_atomic_idle(unsigned int key)
* void k_cpu_atomic_idle(unsigned int key)
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push_s blink
@ -73,7 +73,7 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
pop_s blink
#endif
ld r1, [nano_cpu_sleep_mode]
ld r1, [k_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
j_s.d [blink]

View file

@ -57,7 +57,7 @@ static ALWAYS_INLINE void _irq_setup(void)
_ARC_V2_AUX_IRQ_CTRL_14_REGS /* save r0 -> r13 (caller-saved) */
);
nano_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
k_cpu_sleep_mode = _ARC_V2_WAKE_IRQ_LEVEL;
_arc_v2_aux_reg_write(_ARC_V2_AUX_IRQ_CTRL, aux_irq_ctrl_value);
_kernel.irq_stack = _interrupt_stack + CONFIG_ISR_STACK_SIZE;

View file

@ -37,8 +37,8 @@ GTEXT(_CpuIdleInit)
GTEXT(_NanoIdleValGet)
GTEXT(_NanoIdleValClear)
#endif
GTEXT(nano_cpu_idle)
GTEXT(nano_cpu_atomic_idle)
GTEXT(k_cpu_idle)
GTEXT(k_cpu_atomic_idle)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
@ -116,10 +116,10 @@ SECTION_FUNC(TEXT, _NanoIdleValClear)
*
* C function prototype:
*
* void nano_cpu_idle (void);
* void k_cpu_idle (void);
*/
SECTION_FUNC(TEXT, nano_cpu_idle)
SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push {lr}
bl _sys_k_event_logger_enter_sleep
@ -148,7 +148,7 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
* nano_task_stack_pop(), and nano_task_fifo_get().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* The requirements for k_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
@ -162,10 +162,10 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
*
* C function prototype:
*
* void nano_cpu_atomic_idle (unsigned int imask);
* void k_cpu_atomic_idle (unsigned int imask);
*/
SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push {lr}
bl _sys_k_event_logger_enter_sleep

View file

@ -96,7 +96,7 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
thread->arch.swap_return_value = value;
}
extern void nano_cpu_atomic_idle(unsigned int);
extern void k_cpu_atomic_idle(unsigned int key);
#define _is_in_isr() _IsInIsr()

View file

@ -27,7 +27,7 @@
*
* @return N/A
*/
void nano_cpu_idle(void)
void k_cpu_idle(void)
{
/* Do nothing but unconditionally unlock interrupts and return to the
* caller. This CPU does not have any kind of power saving instruction.
@ -44,7 +44,7 @@ void nano_cpu_idle(void)
* nano_task_stack_pop(), and nano_task_fifo_get().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* The requirements for k_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
@ -56,7 +56,7 @@ void nano_cpu_idle(void)
*
* @return N/A
*/
void nano_cpu_atomic_idle(unsigned int key)
void k_cpu_atomic_idle(unsigned int key)
{
/* Do nothing but restore IRQ state. This CPU does not have any
* kind of power saving instruction.

View file

@ -36,8 +36,8 @@ extern "C" {
#ifndef _ASMLANGUAGE
void nano_cpu_idle(void);
void nano_cpu_atomic_idle(unsigned int key);
void k_cpu_idle(void);
void k_cpu_atomic_idle(unsigned int key);
static ALWAYS_INLINE void nanoArchInit(void)
{

View file

@ -18,20 +18,20 @@
*
* DESCRIPTION
* This module provides an implementation of the architecture-specific
* nano_cpu_idle() primitive required by the nanokernel idle loop component.
* k_cpu_idle() primitive required by the nanokernel idle loop component.
* It can be called within an implementation of _sys_power_save_idle(),
* which is provided for the microkernel by the platform.
*
* The module also provides an implementation of nano_cpu_atomic_idle(), which
* The module also provides an implementation of k_cpu_atomic_idle(), which
* atomically re-enables interrupts and enters low power mode.
*
* INTERNAL
* These implementations of nano_cpu_idle() and nano_cpu_atomic_idle() could be
* These implementations of k_cpu_idle() and k_cpu_atomic_idle() could be
* used when operating as a Hypervisor guest. More specifically, the Hypervisor
* supports the execution of the 'hlt' instruction from a guest (results in a
* VM exit), and more importantly, the Hypervisor will respect the
* single instruction delay slot after the 'sti' instruction as required
* by nano_cpu_atomic_idle().
* by k_cpu_atomic_idle().
*/
#include <zephyr.h>
@ -53,7 +53,7 @@ extern uint64_t __idle_tsc; /* timestamp when CPU went idle */
*
* @return N/A
*/
void nano_cpu_idle(void)
void k_cpu_idle(void)
{
_int_latency_stop();
_sys_k_event_logger_enter_sleep();
@ -75,7 +75,7 @@ void nano_cpu_idle(void)
* nano_task_stack_pop(), and nano_task_fifo_get().
*
* INTERNAL
* The requirements for nano_cpu_atomic_idle() are as follows:
* The requirements for k_cpu_atomic_idle() are as follows:
* 1) The enablement of interrupts and entering a low-power mode needs to be
* atomic, i.e. there should be no period of time where interrupts are
* enabled before the processor enters a low-power mode. See the comments
@ -88,7 +88,7 @@ void nano_cpu_idle(void)
* @return N/A
*/
void nano_cpu_atomic_idle(unsigned int imask)
void k_cpu_atomic_idle(unsigned int imask)
{
_int_latency_stop();
_sys_k_event_logger_enter_sleep();
@ -104,7 +104,7 @@ void nano_cpu_atomic_idle(unsigned int imask)
* external, maskable interrupts after the next instruction is
* executed."
*
* Thus the IA-32 implementation of nano_cpu_atomic_idle() will
* Thus the IA-32 implementation of k_cpu_atomic_idle() will
* atomically re-enable interrupts and enter a low-power mode.
*/
"hlt\n\t");

View file

@ -92,7 +92,7 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
*(unsigned int *)(thread->callee_saved.esp) = value;
}
extern void nano_cpu_atomic_idle(unsigned int imask);
extern void k_cpu_atomic_idle(unsigned int imask);
extern void _MsrWrite(unsigned int msr, uint64_t msrData);
extern uint64_t _MsrRead(unsigned int msr);

View file

@ -0,0 +1,143 @@
.. _cpu_idle:
CPU Idling
##########
Although normally reserved for the idle thread, in certain special
applications, a thread might want to make the CPU idle.
.. contents::
:local:
:depth: 2
Concepts
********
Making the CPU idle causes the kernel to pause all operations until an event,
normally an interrupt, wakes up the CPU. In a regular system, the idle thread
is responsible for this. However, in some constrained systems, it is possible
that another thread takes this duty.
Implementation
**************
Making the CPU idle
===================
Making the CPU idle is simple: call the k_cpu_idle() API. The CPU will stop
executing instructions until an event occurs. Make sure interrupts are not
locked before invoking it. Most likely, it will be called within a loop.
.. code-block:: c
static k_sem my_sem;
void my_isr(void *unused)
{
k_sem_give(&my_sem);
}
void main(void)
{
k_sem_init(&my_sem, 0, 1);
/* wait for semaphore from ISR, then do related work */
for (;;) {
/* wait for ISR to trigger work to perform */
if (k_sem_take(&my_sem, K_NO_WAIT) == 0) {
/* ... do processing */
}
/* put CPU to sleep to save power */
k_cpu_idle();
}
}
Making the CPU idle in an atomic fashion
========================================
It is possible that there is a need to do some work atomically before making
the CPU idle. In such a case, k_cpu_atomic_idle() should be used instead.
In fact, there is a race condition in the previous example: the interrupt could
occur between the time the semaphore is taken, finding out it is not available
and making the CPU idle again. In some systems, this can cause the CPU to idle
until *another* interrupt occurs, which might be *never*, thus hanging the
system completely. To prevent this, k_cpu_atomic_idle() should have been used,
like in this example.
.. code-block:: c
static k_sem my_sem;
void my_isr(void *unused)
{
k_sem_give(&my_sem);
}
void main(void)
{
k_sem_init(&my_sem, 0, 1);
for (;;) {
unsigned int key = irq_lock();
/*
* Wait for semaphore from ISR; if acquired, do related work, then
* go to next loop iteration (the semaphore might have been given
* again); else, make the CPU idle.
*/
if (k_sem_take(&my_sem, K_NO_WAIT) == 0) {
irq_unlock(key);
/* ... do processing */
} else {
/* put CPU to sleep to save power */
k_cpu_atomic_idle(key);
}
}
}
Suggested Uses
**************
Use k_cpu_atomic_idle() when a thread has to do some real work in addition to
idling the CPU to wait for an event. See example above.
Use k_cpu_idle() only when a thread is only responsible for idling the CPU,
i.e. not doing any real work, like in this example below.
.. code-block:: c
void main(void)
{
/* ... do some system/application initialization */
/* thread is only used for CPU idling from this point on */
for (;;) {
k_cpu_idle();
}
}
.. note::
**Do not use these APIs unless absolutely necessary.** In a normal system,
the idle thread takes care of power management, including CPU idling.
APIs
****
The following CPU idling APIs are provided by :file:`kernel.h`:
* :cpp:func:`k_cpu_idle()`
* :cpp:func:`k_cpu_atomic_idle()`

View file

@ -9,6 +9,7 @@ This section describes other services provided by the kernel.
:maxdepth: 1
interrupts.rst
cpu_idle.rst
atomic.rst
float.rst
ring_buffers.rst

View file

@ -372,7 +372,7 @@ consumption.
The microkernel has built-in support for going into tickless idle. However, in
nanokernel-only systems, part of the support has to be built in the
architecture (:c:func:`nano_cpu_idle` and :c:func:`nano_cpu_atomic_idle`).
architecture (:c:func:`k_cpu_idle` and :c:func:`k_cpu_atomic_idle`).
The interrupt entry stub (:code:`_interrupt_enter`, :code:`_isr_wrapper`) needs
to be adapted to handle exiting tickless idle. See examples in the code for
@ -412,9 +412,9 @@ CPU Idling/Power Management
***************************
The kernel provides support for CPU power management with two functions:
:c:func:`nano_cpu_idle` and :c:func:`nano_cpu_atomic_idle`.
:c:func:`k_cpu_idle` and :c:func:`k_cpu_atomic_idle`.
:c:func:`nano_cpu_idle` can be as simple as calling the power saving
:c:func:`k_cpu_idle` can be as simple as calling the power saving
instruction for the architecture with interrupts unlocked, for example :code:`hlt` on
x86, :code:`wfi` or :code:`wfe` on ARM, :code:`sleep` on ARC. This function can be called in a
loop within a context that does not care if it get interrupted or not by an interrupt
@ -427,7 +427,7 @@ use this function:
* In a microkernel system, in the idle task.
:c:func:`nano_cpu_atomic_idle`, on the other hand, must be able to atomically
:c:func:`k_cpu_atomic_idle`, on the other hand, must be able to atomically
re-enable interrupts and invoke the power saving instruction. It can thus be
used in real application code. For example, it is used in the implementation of
nanokernel objects when the task is polling an object, waiting for the object

View file

@ -29,9 +29,9 @@ extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern unsigned int nano_cpu_sleep_mode;
extern void nano_cpu_idle(void);
extern void nano_cpu_atomic_idle(unsigned int key);
extern unsigned int k_cpu_sleep_mode;
extern void k_cpu_idle(void);
extern void k_cpu_atomic_idle(unsigned int key);
#endif
#ifdef __cplusplus

View file

@ -29,7 +29,7 @@ extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern void nano_cpu_idle(void);
extern void k_cpu_idle(void);
#endif
#ifdef __cplusplus

View file

@ -45,7 +45,7 @@ extern "C" {
#ifdef _ASMLANGUAGE
/* needed by nano_cpu_atomic_idle() written in asm */
/* needed by k_cpu_atomic_idle() written in asm */
#define _SCB_SCR 0xE000ED10
#define _SCB_SCR_SEVONPEND (1 << 4)

View file

@ -456,7 +456,7 @@ extern void k_float_disable(k_tid_t thread);
#include <stddef.h> /* for size_t */
extern void nano_cpu_idle(void);
extern void k_cpu_idle(void);
/** Nanokernel provided routine to report any detected fatal error. */
extern FUNC_NORETURN void _NanoFatalErrorHandler(unsigned int reason,

View file

@ -2925,6 +2925,32 @@ extern void k_free(void *ptr);
* @} end defgroup heap_apis
*/
/**
* @brief Make the CPU idle.
*
* This function makes the CPU idle until an event wakes it up.
*
* In a regular system, the idle thread should be the only thread responsible
* for making the CPU idle and triggering any type of power management.
* However, in some more constrained systems, such as a single-threaded system,
* the only thread would be responsible for this if needed.
*
* @return N/A
*/
extern void k_cpu_idle(void);
/**
* @brief Make the CPU idle in an atomic fashion.
*
* Similar to k_cpu_idle(), but called with interrupts locked if operations
* must be done atomically before making the CPU idle.
*
* @param key Interrupt locking key obtained from irq_lock().
*
* @return N/A
*/
extern void k_cpu_atomic_idle(unsigned int key);
/*
* legacy.h must be before arch/cpu.h to allow the ioapic/loapic drivers to
* hook into the device subsystem, which itself uses nanokernel semaphores,

View file

@ -3288,6 +3288,37 @@ nano_timer_ticks_remain(struct nano_timer *timer)
return _ms_to_ticks(k_timer_remaining_get(timer));
}
/**
* @brief Make the CPU idle.
*
* <b> Legacy API </b>
*
* This function makes the CPU idle until an event wakes it up.
*
* @return N/A
*/
static inline __deprecated void nano_cpu_idle(void)
{
k_cpu_idle();
}
/**
* @brief Make the CPU idle in an atomic fashion.
*
* <b> Legacy API </b>
*
* Similar to k_cpu_idle(), but called with interrupts locked if operations
* must be done atomically before making the CPU idle.
*
* @param key Interrupt locking key obtained from irq_lock().
*
* @return N/A
*/
static inline __deprecated void nano_cpu_atomic_idle(unsigned int key)
{
k_cpu_atomic_idle(key);
}
#if CONFIG_X86
#if CONFIG_FP_SHARING

View file

@ -92,16 +92,16 @@ static void _sys_power_save_idle(int32_t ticks __unused)
* This function is entered with interrupts disabled. If a low power
* state was entered, then the hook function should enable inerrupts
* before exiting. This is because the kernel does not do its own idle
* processing in those cases i.e. skips nano_cpu_idle(). The kernel's
* processing in those cases i.e. skips k_cpu_idle(). The kernel's
* idle processing re-enables interrupts which is essential for
* the kernel's scheduling logic.
*/
if (_sys_soc_suspend(ticks) == SYS_PM_NOT_HANDLED) {
_sys_pm_idle_exit_notify = 0;
nano_cpu_idle();
k_cpu_idle();
}
#else
nano_cpu_idle();
k_cpu_idle();
#endif
}

View file

@ -37,6 +37,6 @@ void sys_reboot(int type)
/* should never get here */
printk("Failed to reboot: spinning endlessly...\n");
for (;;) {
nano_cpu_idle();
k_cpu_idle();
}
}

View file

@ -494,7 +494,6 @@ int main(void)
/* wait forever */
while (1) {
extern void nano_cpu_idle(void);
nano_cpu_idle();
}
}

View file

@ -66,7 +66,6 @@ int main(void)
/* wait forever */
while (1) {
extern void nano_cpu_idle(void);
nano_cpu_idle();
}
}