unified: Add tickless idle support for x86 and ARM
Change-Id: I42d20355321f431900727768a0836ee18e96b667 Signed-off-by: Peter Mitsis <peter.mitsis@windriver.com>
This commit is contained in:
parent
a04c0d70e1
commit
96cb05ca50
8 changed files with 191 additions and 83 deletions
|
@ -274,23 +274,7 @@ void _TIMER_INT_HANDLER(void *unused)
|
|||
idle_original_ticks + 1; /* actual # of idle ticks */
|
||||
_sys_clock_tick_announce();
|
||||
} else {
|
||||
/*
|
||||
* Increment the tick because _timer_idle_exit does not
|
||||
* account for the tick due to the timer interrupt itself.
|
||||
* Also, if not in tickless mode, _sys_idle_elapsed_ticks will be 0.
|
||||
*/
|
||||
_sys_idle_elapsed_ticks++;
|
||||
|
||||
/*
|
||||
* If we transition from 0 elapsed ticks to 1 we need to
|
||||
* announce the
|
||||
* tick event to the microkernel. Other cases will be covered by
|
||||
* _timer_idle_exit.
|
||||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
_sys_clock_final_tick_announce();
|
||||
}
|
||||
|
||||
/* accumulate total counter value */
|
||||
|
|
|
@ -295,29 +295,7 @@ void _timer_int_handler(void *unused)
|
|||
*_HPET_TIMER0_COMPARATOR = counter_last_value + counter_load_value;
|
||||
programmed_ticks = 1;
|
||||
|
||||
/*
|
||||
* Increment the tick because _timer_idle_exit does not account
|
||||
* for the tick due to the timer interrupt itself. Also, if not in
|
||||
* tickless mode, _sys_idle_elapsed_ticks will be 0.
|
||||
*/
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
_sys_idle_elapsed_ticks++;
|
||||
#else
|
||||
_sys_idle_elapsed_ticks = 1;
|
||||
#endif /* CONFIG_MICROKERNEL */
|
||||
|
||||
/*
|
||||
* If we transistion from 0 elapsed ticks to 1 we need to announce the
|
||||
* tick
|
||||
* event to the microkernel. Other cases will have already been covered
|
||||
* by
|
||||
* _timer_idle_exit
|
||||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
|
||||
_sys_clock_final_tick_announce();
|
||||
#endif /* !CONFIG_TICKLESS_IDLE */
|
||||
|
||||
}
|
||||
|
|
|
@ -310,28 +310,10 @@ void _timer_int_handler(void *unused /* parameter is not used */
|
|||
timer_mode = TIMER_MODE_PERIODIC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Increment the tick because _timer_idle_exit() does not account
|
||||
* for the tick due to the timer interrupt itself. Also, if not in
|
||||
* one-shot mode, _sys_idle_elapsed_ticks will be 0.
|
||||
*/
|
||||
#ifdef CONFIG_MICROKERNEL
|
||||
_sys_idle_elapsed_ticks++;
|
||||
#else
|
||||
_sys_idle_elapsed_ticks = 1;
|
||||
#endif
|
||||
_sys_clock_final_tick_announce();
|
||||
|
||||
/* track the accumulated cycle count */
|
||||
accumulated_cycle_count += cycles_per_tick * _sys_idle_elapsed_ticks;
|
||||
|
||||
/*
|
||||
* If we transistion from 0 elapsed ticks to 1 we need to announce the
|
||||
* tick event to the microkernel. Other cases will have already been
|
||||
* covered by _timer_idle_exit().
|
||||
*/
|
||||
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
#else
|
||||
/* track the accumulated cycle count */
|
||||
accumulated_cycle_count += cycles_per_tick;
|
||||
|
|
|
@ -73,16 +73,51 @@ extern int sys_clock_device_ctrl(struct device *device,
|
|||
#define sys_clock_device_ctrl device_control_nop
|
||||
#endif
|
||||
|
||||
extern int32_t _sys_idle_elapsed_ticks;
|
||||
#if !defined(CONFIG_KERNEL_V2) && defined(CONFIG_MICROKERNEL)
|
||||
extern void (*_do_sys_clock_tick_announce)(kevent_t);
|
||||
#define _sys_clock_tick_announce() \
|
||||
_do_sys_clock_tick_announce(TICK_EVENT)
|
||||
#else
|
||||
extern int32_t _sys_idle_elapsed_ticks;
|
||||
#define _sys_clock_tick_announce() \
|
||||
_nano_sys_clock_tick_announce(_sys_idle_elapsed_ticks)
|
||||
#endif
|
||||
extern void (*_do_sys_clock_tick_announce)(kevent_t);
|
||||
|
||||
#define _sys_clock_tick_announce() _do_sys_clock_tick_announce(TICK_EVENT)
|
||||
|
||||
/**
|
||||
* @brief Account for the tick due to the timer interrupt
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void _sys_clock_final_tick_announce(void)
|
||||
{
|
||||
/*
|
||||
* Ticks are annnounced at interrupt level but processed later in
|
||||
* the kernel server fiber. Increment '_sys_idle_elapsed_ticks' as
|
||||
* some ticks may have previously been announced by _timer_idle_exit()
|
||||
* (if tickless idle is enabled) but not yet processed.
|
||||
*/
|
||||
_sys_idle_elapsed_ticks++;
|
||||
|
||||
/* If no ticks were previously announced, announce the tick now. */
|
||||
if (_sys_idle_elapsed_ticks == 1) {
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define _sys_clock_tick_announce() \
|
||||
_nano_sys_clock_tick_announce(_sys_idle_elapsed_ticks)
|
||||
|
||||
/**
|
||||
* @brief Account for the tick due to the timer interrupt
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static inline void _sys_clock_final_tick_announce(void)
|
||||
{
|
||||
/*
|
||||
* Ticks are both announced and immediately processed at interrupt
|
||||
* level. Thus there is only one tick left to announce (and process).
|
||||
*/
|
||||
_sys_idle_elapsed_ticks = 1;
|
||||
_sys_clock_tick_announce();
|
||||
}
|
||||
#endif
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -287,7 +287,8 @@ config TICKLESS_IDLE
|
|||
bool
|
||||
prompt "Tickless idle"
|
||||
default y
|
||||
depends on !KERNEL_V2 && (MICROKERNEL || NANOKERNEL_TICKLESS_IDLE_SUPPORTED)
|
||||
depends on MICROKERNEL || KERNEL_V2 || \
|
||||
NANOKERNEL_TICKLESS_IDLE_SUPPORTED
|
||||
help
|
||||
This option suppresses periodic system clock interrupts whenever the
|
||||
kernel becomes idle. This permits the system to remain in a power
|
||||
|
|
|
@ -15,6 +15,7 @@ lib-y += $(strip \
|
|||
sem.o \
|
||||
device.o \
|
||||
thread_abort.o \
|
||||
idle.o \
|
||||
)
|
||||
|
||||
lib-y += $(strip \
|
||||
|
@ -37,7 +38,6 @@ lib-y += $(strip \
|
|||
|
||||
lib-$(CONFIG_INT_LATENCY_BENCHMARK) += int_latency_bench.o
|
||||
lib-$(CONFIG_STACK_CANARIES) += compiler_stack_protect.o
|
||||
lib-$(CONFIG_SYS_POWER_MANAGEMENT) += idle.o
|
||||
lib-$(CONFIG_NANO_TIMERS) += timer.o
|
||||
lib-$(CONFIG_KERNEL_EVENT_LOGGER) += event_logger.o
|
||||
lib-$(CONFIG_KERNEL_EVENT_LOGGER) += kernel_event_logger.o
|
||||
|
|
|
@ -1 +1,139 @@
|
|||
#include "../nanokernel/idle.c"
|
||||
/*
|
||||
* Copyright (c) 2016 Wind River Systems, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <nanokernel.h>
|
||||
#include <nano_private.h>
|
||||
#include <toolchain.h>
|
||||
#include <sections.h>
|
||||
#include <drivers/system_timer.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
#if defined(CONFIG_TICKLESS_IDLE)
|
||||
/*
|
||||
* Idle time must be this value or higher for timer to go into tickless idle
|
||||
* state.
|
||||
*/
|
||||
int32_t _sys_idle_threshold_ticks = CONFIG_TICKLESS_IDLE_THRESH;
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
/**
|
||||
*
|
||||
* @brief Indicate that kernel is idling in tickless mode
|
||||
*
|
||||
* Sets the nanokernel data structure idle field to either a positive value or
|
||||
* K_FOREVER.
|
||||
*
|
||||
* @param ticks the number of ticks to idle
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
static void set_kernel_idle_time_in_ticks(int32_t ticks)
|
||||
{
|
||||
_nanokernel.idle = ticks;
|
||||
}
|
||||
#else
|
||||
#define set_kernel_idle_time_in_ticks(x) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void _sys_power_save_idle(int32_t ticks __unused)
|
||||
{
|
||||
#if defined(CONFIG_TICKLESS_IDLE)
|
||||
if ((ticks == K_FOREVER) || ticks >= _sys_idle_threshold_ticks) {
|
||||
/*
|
||||
* Stop generating system timer interrupts until it's time for
|
||||
* the next scheduled kernel timer to expire.
|
||||
*/
|
||||
|
||||
_timer_idle_enter(ticks);
|
||||
}
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
set_kernel_idle_time_in_ticks(ticks);
|
||||
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
|
||||
defined(CONFIG_SYS_POWER_DEEP_SLEEP) || \
|
||||
defined(CONFIG_DEVICE_POWER_MANAGEMENT))
|
||||
/*
|
||||
* Call the suspend hook function, which checks if the system should
|
||||
* enter deep sleep, low power state or only suspend devices.
|
||||
* If the time available is too short for any PM operation then
|
||||
* the function returns SYS_PM_NOT_HANDLED immediately and kernel
|
||||
* does normal idle processing. Otherwise it will return the code
|
||||
* corresponding to the action taken.
|
||||
*
|
||||
* This function can just suspend devices without entering
|
||||
* deep sleep or cpu low power state. In this case it should return
|
||||
* SYS_PM_DEVICE_SUSPEND_ONLY and kernel would do normal idle
|
||||
* processing.
|
||||
*
|
||||
* This function is entered with interrupts disabled. If the function
|
||||
* returns either SYS_PM_LOW_POWER_STATE or SYS_PM_DEEP_SLEEP then
|
||||
* it should ensure interrupts are re-enabled before returning.
|
||||
* This is because the kernel does not do its own idle processing in
|
||||
* these cases i.e. skips nano_cpu_idle(). The kernel's idle
|
||||
* processing re-enables interrupts which is essential for kernel's
|
||||
* scheduling logic.
|
||||
*/
|
||||
if (!(_sys_soc_suspend(ticks) &
|
||||
(SYS_PM_DEEP_SLEEP | SYS_PM_LOW_POWER_STATE))) {
|
||||
nano_cpu_idle();
|
||||
}
|
||||
#else
|
||||
nano_cpu_idle();
|
||||
#endif
|
||||
}
|
||||
|
||||
void _sys_power_save_idle_exit(int32_t ticks)
|
||||
{
|
||||
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
|
||||
defined(CONFIG_SYS_POWER_DEEP_SLEEP) || \
|
||||
defined(CONFIG_DEVICE_POWER_MANAGEMENT))
|
||||
/*
|
||||
* Any idle wait based on CPU low power state will be exited by
|
||||
* interrupt. This function is called within that interrupt's
|
||||
* ISR context. _sys_soc_resume() needs to be called here
|
||||
* to handle exit from CPU low power states. This gives an
|
||||
* opportunity for device states altered in _sys_soc_suspend()
|
||||
* to be restored before the kernel schedules another thread.
|
||||
* _sys_soc_resume() is not called from here for deep sleep
|
||||
* exit. Deep sleep recovery happens at cold boot path.
|
||||
*/
|
||||
_sys_soc_resume();
|
||||
#endif
|
||||
#ifdef CONFIG_TICKLESS_IDLE
|
||||
if ((ticks == K_FOREVER) || ticks >= _sys_idle_threshold_ticks) {
|
||||
/* Resume normal periodic system timer interrupts */
|
||||
|
||||
_timer_idle_exit();
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(ticks);
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
}
|
||||
|
||||
|
||||
void idle(void *unused1, void *unused2, void *unused3)
|
||||
{
|
||||
ARG_UNUSED(unused1);
|
||||
ARG_UNUSED(unused2);
|
||||
ARG_UNUSED(unused3);
|
||||
|
||||
for (;;) {
|
||||
_sys_power_save_idle(_timeout_get_next_expiry());
|
||||
|
||||
k_yield();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -110,6 +110,8 @@ char __noinit __stack _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
|||
#define initialize_timeouts() do { } while ((0))
|
||||
#endif
|
||||
|
||||
extern void idle(void *unused1, void *unused2, void *unused3);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Clear BSS
|
||||
|
@ -191,18 +193,6 @@ void __weak main(void)
|
|||
/* NOP default main() if the application does not provide one. */
|
||||
}
|
||||
|
||||
static void idle(void *unused1, void *unused2, void *unused3)
|
||||
{
|
||||
ARG_UNUSED(unused1);
|
||||
ARG_UNUSED(unused2);
|
||||
ARG_UNUSED(unused3);
|
||||
|
||||
for (;;) {
|
||||
nano_cpu_idle();
|
||||
k_yield();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Initializes nanokernel data structures
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue