kernel: tickless: Add tickless kernel support
Adds event based scheduling logic to the kernel. Updates management of timeouts, timers, idling etc. based on time tracked at events rather than periodic ticks. Provides interfaces for timers to announce and get next timer expiry based on kernel scheduling decisions involving time slicing of threads, timeouts and idling. Uses wall time units instead of ticks in all scheduling activities. The implementation involves changes in the following areas 1. Management of time in wall units like ms/us instead of ticks The existing implementation already had an option to configure number of ticks in a second. The new implementation builds on top of that feature and provides option to set the size of the scheduling granurality to mili seconds or micro seconds. This allows most of the current implementation to be reused. Due to this re-use and co-existence with tick based kernel, the names of variables may contain the word "tick". However, in the tickless kernel implementation, it represents the currently configured time unit, which would be be mili seconds or micro seconds. The APIs that take time as a parameter are not impacted and they continue to pass time in mili seconds. 2. Timers would not be programmed in periodic mode generating ticks. Instead they would be programmed in one shot mode to generate events at the time the kernel scheduler needs to gain control for its scheduling activities like timers, timeouts, time slicing, idling etc. 3. The scheduler provides interfaces that the timer drivers use to announce elapsed time and get the next time the scheduler needs a timer event. It is possible that the scheduler may not need another timer event, in which case the system would wait for a non-timer event to wake it up if it is idling. 4. New APIs are defined to be implemented by timer drivers. Also they need to handler timer events differently. These changes have been done in the HPET timer driver. In future other timers that support tickles kernel should implement these APIs as well. These APIs are to re-program the timer, update and announce elapsed time. 5. Philosopher and timer_api applications have been enabled to test tickless kernel. Separate configuration files are created which define the necessary CONFIG flags. Run these apps using following command make pristine && make BOARD=qemu_x86 CONF_FILE=prj_tickless.conf qemu Jira: ZEP-339 ZEP-1946 ZEP-948 Change-Id: I7d950c31bf1ff929a9066fad42c2f0559a2e5983 Signed-off-by: Ramesh Thomas <ramesh.thomas@intel.com>
This commit is contained in:
parent
62eea121b3
commit
89ffd44dfb
16 changed files with 307 additions and 16 deletions
|
@ -15,7 +15,7 @@
|
|||
#include <toolchain.h>
|
||||
#include <kernel_structs.h>
|
||||
|
||||
extern s64_t _sys_clock_tick_count;
|
||||
extern volatile u64_t _sys_clock_tick_count;
|
||||
extern int sys_clock_hw_cycles_per_tick;
|
||||
|
||||
/*
|
||||
|
|
|
@ -24,6 +24,9 @@ _ASM_FILE_PROLOGUE
|
|||
GTEXT(_ExcExit)
|
||||
GTEXT(_IntExit)
|
||||
GDATA(_kernel)
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -55,6 +58,11 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
|
|||
|
||||
/* _IntExit falls through to _ExcExit (they are aliases of each other) */
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
push {lr}
|
||||
bl _update_time_slice_before_swap
|
||||
pop {lr}
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
|
|
|
@ -30,6 +30,9 @@
|
|||
/* externs */
|
||||
|
||||
GTEXT(__swap)
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
|
||||
GTEXT(_update_time_slice_before_swap)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
GTEXT(_sys_power_save_idle_exit)
|
||||
|
@ -315,6 +318,9 @@ alreadyOnIntStack:
|
|||
popl %esi
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
|
||||
call _update_time_slice_before_swap
|
||||
#endif
|
||||
pushfl /* push KERNEL_LOCK_KEY argument */
|
||||
#ifdef CONFIG_X86_IAMCU
|
||||
/* IAMCU first argument goes into a register, not the stack.
|
||||
|
|
|
@ -43,6 +43,13 @@ config UART_PIPE_ON_DEV_NAME
|
|||
|
||||
endif
|
||||
|
||||
if SYS_POWER_MANAGEMENT
|
||||
|
||||
config BUSY_WAIT_USES_ALTERNATE_CLOCK
|
||||
default y
|
||||
|
||||
endif
|
||||
|
||||
config BLUETOOTH_MONITOR_ON_DEV_NAME
|
||||
default UART_QMSI_1_NAME if BLUETOOTH_DEBUG_MONITOR
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <system_timer.h>
|
||||
#include <drivers/clock_control/nrf5_clock_control.h>
|
||||
#include <arch/arm/cortex_m/cmsis.h>
|
||||
#include <sys_clock.h>
|
||||
|
||||
/*
|
||||
* Convenience defines.
|
||||
|
@ -32,7 +33,6 @@
|
|||
CONFIG_SYS_CLOCK_TICKS_PER_SEC) * \
|
||||
1000000000UL) / 30517578125UL) & RTC_MASK)
|
||||
|
||||
extern s64_t _sys_clock_tick_count;
|
||||
extern s32_t _sys_idle_elapsed_ticks;
|
||||
|
||||
/*
|
||||
|
|
|
@ -38,9 +38,19 @@ extern void sys_clock_disable(void);
|
|||
#ifdef CONFIG_TICKLESS_IDLE
|
||||
extern void _timer_idle_enter(s32_t ticks);
|
||||
extern void _timer_idle_exit(void);
|
||||
#else
|
||||
#define _timer_idle_enter(ticks) do { } while ((0))
|
||||
#define _timer_idle_exit() do { } while ((0))
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
extern void _nano_sys_clock_tick_announce(s32_t ticks);
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
extern void _set_time(u32_t time);
|
||||
extern u32_t _get_program_time(void);
|
||||
extern u32_t _get_remaining_program_time(void);
|
||||
extern u32_t _get_elapsed_program_time(void);
|
||||
extern u64_t _get_elapsed_clock_time(void);
|
||||
#endif
|
||||
|
||||
extern int sys_clock_device_ctrl(struct device *device,
|
||||
u32_t ctrl_command, void *context);
|
||||
|
|
|
@ -864,7 +864,11 @@ static ALWAYS_INLINE s32_t _ms_to_ticks(s32_t ms)
|
|||
#endif
|
||||
|
||||
/* added tick needed to account for tick in progress */
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
#define _TICK_ALIGN 0
|
||||
#else
|
||||
#define _TICK_ALIGN 1
|
||||
#endif
|
||||
|
||||
static inline s64_t __ticks_to_ms(s64_t ticks)
|
||||
{
|
||||
|
@ -1131,6 +1135,44 @@ static inline void *k_timer_user_data_get(struct k_timer *timer)
|
|||
*/
|
||||
extern s64_t k_uptime_get(void);
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
/**
|
||||
* @brief Enable clock always on in tickless kernel
|
||||
*
|
||||
* This routine enables keepng the clock running when
|
||||
* there are no timer events programmed in tickless kernel
|
||||
* scheduling. This is necessary if the clock is used to track
|
||||
* passage of time.
|
||||
*
|
||||
* @retval prev_status Previous status of always on flag
|
||||
*/
|
||||
static inline int k_enable_sys_clock_always_on(void)
|
||||
{
|
||||
int prev_status = _sys_clock_always_on;
|
||||
|
||||
_sys_clock_always_on = 1;
|
||||
_enable_sys_clock();
|
||||
|
||||
return prev_status;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Disable clock always on in tickless kernel
|
||||
*
|
||||
* This routine disables keepng the clock running when
|
||||
* there are no timer events programmed in tickless kernel
|
||||
* scheduling. To save power, this routine should be called
|
||||
* immediately when clock is not used to track time.
|
||||
*/
|
||||
static inline void k_disable_sys_clock_always_on(void)
|
||||
{
|
||||
_sys_clock_always_on = 0;
|
||||
}
|
||||
#else
|
||||
#define k_enable_sys_clock_always_on() do { } while ((0))
|
||||
#define k_disable_sys_clock_always_on() do { } while ((0))
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Get system uptime (32-bit version).
|
||||
*
|
||||
|
|
|
@ -28,7 +28,14 @@ extern "C" {
|
|||
#error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
#define sys_clock_ticks_per_sec \
|
||||
(1000000 / (CONFIG_TICKLESS_KERNEL_TIME_UNIT_IN_MICRO_SECS))
|
||||
extern int _sys_clock_always_on;
|
||||
extern void _enable_sys_clock(void);
|
||||
#else
|
||||
#define sys_clock_ticks_per_sec CONFIG_SYS_CLOCK_TICKS_PER_SEC
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
||||
extern int sys_clock_hw_cycles_per_sec;
|
||||
|
@ -100,7 +107,7 @@ extern int sys_clock_hw_cycles_per_tick;
|
|||
* @} end defgroup clock_apis
|
||||
*/
|
||||
|
||||
extern s64_t _sys_clock_tick_count;
|
||||
extern volatile u64_t _sys_clock_tick_count;
|
||||
|
||||
/*
|
||||
* Number of ticks for x seconds. NOTE: With MSEC() or USEC(),
|
||||
|
|
|
@ -76,4 +76,40 @@ config TICKLESS_IDLE_THRESH
|
|||
ticks that must occur before the next kernel timer expires in order
|
||||
for suppression to happen.
|
||||
|
||||
config TICKLESS_KERNEL
|
||||
bool
|
||||
prompt "Tickless kernel"
|
||||
default n
|
||||
depends on TICKLESS_IDLE
|
||||
help
|
||||
This option enables a fully event driven kernel. Periodic system
|
||||
clock interrupt generation would be stopped at all times. This option
|
||||
requires Tickless Idle option to be enabled.
|
||||
|
||||
config TICKLESS_KERNEL_TIME_UNIT_IN_MICRO_SECS
|
||||
int
|
||||
prompt "Tickless kernel time unit in micro seconds"
|
||||
default 1000
|
||||
depends on TICKLESS_KERNEL
|
||||
help
|
||||
This option makes the system clock and scheduling granurality.
|
||||
The default will be one mili second. This option also determines
|
||||
the time unit passed in functions like _sys_soc_suspend. The
|
||||
value should be determined based what the timer hardware and driver
|
||||
can support. Spceficying too small a time unit than what the overall
|
||||
system speed can support would cause scheduling errors.
|
||||
|
||||
config BUSY_WAIT_USES_ALTERNATE_CLOCK
|
||||
bool
|
||||
prompt "Busy wait uses alternate clock in tickless kernel mode"
|
||||
default n
|
||||
help
|
||||
In tickless kernel mode, the system clock will be stopped when
|
||||
there are no timer events programmed. If the system clock is to
|
||||
be used to keep time e.g. to get a delata of time cycles then it
|
||||
needs to be turned on using provided APIs. Some platforms have
|
||||
alternate clocks which can be used instead. In that case this flag
|
||||
would be set to true. This flag would be checked before turning
|
||||
on the system clock in APIs that do busy wait reading clock
|
||||
cycles.
|
||||
endif
|
||||
|
|
|
@ -18,6 +18,15 @@
|
|||
* state.
|
||||
*/
|
||||
s32_t _sys_idle_threshold_ticks = CONFIG_TICKLESS_IDLE_THRESH;
|
||||
|
||||
#if defined(CONFIG_TICKLESS_KERNEL)
|
||||
#define _must_enter_tickless_idle(ticks) (1)
|
||||
#else
|
||||
#define _must_enter_tickless_idle(ticks) \
|
||||
((ticks == K_FOREVER) || (ticks >= _sys_idle_threshold_ticks))
|
||||
#endif
|
||||
#else
|
||||
#define _must_enter_tickless_idle(ticks) ((void)ticks, (0))
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||
|
@ -54,18 +63,37 @@ static void set_kernel_idle_time_in_ticks(s32_t ticks)
|
|||
#define set_kernel_idle_time_in_ticks(x) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void _sys_power_save_idle(s32_t ticks __unused)
|
||||
static void _sys_power_save_idle(s32_t ticks)
|
||||
{
|
||||
#if defined(CONFIG_TICKLESS_IDLE)
|
||||
if ((ticks == K_FOREVER) || ticks >= _sys_idle_threshold_ticks) {
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
if (ticks != K_FOREVER) {
|
||||
ticks -= _get_elapsed_program_time();
|
||||
if (!ticks) {
|
||||
/*
|
||||
* Timer has expired or about to expire
|
||||
* No time for power saving operations
|
||||
*
|
||||
* Note that it will never be zero unless some time
|
||||
* had elapsed since timer was last programmed.
|
||||
*/
|
||||
k_cpu_idle();
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (_must_enter_tickless_idle(ticks)) {
|
||||
/*
|
||||
* Stop generating system timer interrupts until it's time for
|
||||
* the next scheduled kernel timer to expire.
|
||||
*/
|
||||
|
||||
/*
|
||||
* In the case of tickless kernel, timer driver should
|
||||
* reprogram timer only if the currently programmed time
|
||||
* duration is smaller than the idle time.
|
||||
*/
|
||||
_timer_idle_enter(ticks);
|
||||
}
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
|
||||
set_kernel_idle_time_in_ticks(ticks);
|
||||
#if (defined(CONFIG_SYS_POWER_LOW_POWER_STATE) || \
|
||||
|
@ -108,15 +136,11 @@ void _sys_power_save_idle_exit(s32_t ticks)
|
|||
_sys_soc_resume();
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_TICKLESS_IDLE
|
||||
if ((ticks == K_FOREVER) || ticks >= _sys_idle_threshold_ticks) {
|
||||
/* Resume normal periodic system timer interrupts */
|
||||
|
||||
if (_must_enter_tickless_idle(ticks)) {
|
||||
/* Resume normal periodic system timer interrupts */
|
||||
_timer_idle_exit();
|
||||
}
|
||||
#else
|
||||
ARG_UNUSED(ticks);
|
||||
#endif /* CONFIG_TICKLESS_IDLE */
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ extern void _pend_thread(struct k_thread *thread,
|
|||
extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout);
|
||||
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||
extern int __must_switch_threads(void);
|
||||
extern int _is_thread_time_slicing(struct k_thread *thread);
|
||||
extern void _update_time_slice_before_swap(void);
|
||||
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
|
||||
extern s32_t _ms_to_ticks(s32_t ms);
|
||||
#endif
|
||||
|
@ -42,6 +44,11 @@ static inline int _is_idle_thread(void *entry_point)
|
|||
return entry_point == idle;
|
||||
}
|
||||
|
||||
static inline int _is_idle_thread_ptr(k_tid_t thread)
|
||||
{
|
||||
return thread == _idle_thread;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MULTITHREADING
|
||||
#define _ASSERT_VALID_PRIO(prio, entry_point) do { \
|
||||
__ASSERT(((prio) == K_IDLE_PRIO && _is_idle_thread(entry_point)) || \
|
||||
|
|
|
@ -52,8 +52,19 @@ extern void _new_thread(char *pStack, size_t stackSize,
|
|||
|
||||
extern unsigned int __swap(unsigned int key);
|
||||
|
||||
#define _Swap(x) __swap(x)
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && defined(CONFIG_TIMESLICING)
|
||||
extern void _update_time_slice_before_swap(void);
|
||||
|
||||
static inline unsigned int _time_slice_swap(unsigned int key)
|
||||
{
|
||||
_update_time_slice_before_swap();
|
||||
return __swap(key);
|
||||
}
|
||||
|
||||
#define _Swap(x) _time_slice_swap(x)
|
||||
#else
|
||||
#define _Swap(x) __swap(x)
|
||||
#endif
|
||||
/* set and clear essential fiber/task flag */
|
||||
|
||||
extern void _thread_essential_set(void);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*/
|
||||
|
||||
#include <misc/dlist.h>
|
||||
#include <drivers/system_timer.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -209,6 +210,22 @@ static inline void _add_timeout(struct k_thread *thread,
|
|||
s32_t *delta = &timeout->delta_ticks_from_prev;
|
||||
struct _timeout *in_q;
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
/*
|
||||
* If some time has already passed since timer was last
|
||||
* programmed, then that time needs to be accounted when
|
||||
* inserting the new timeout. We account for this
|
||||
* by adding the already elapsed time to the new timeout.
|
||||
* This is like adding this timout back in history.
|
||||
*/
|
||||
u32_t adjusted_timeout;
|
||||
u32_t program_time = _get_program_time();
|
||||
|
||||
if (program_time > 0) {
|
||||
*delta += _get_elapsed_program_time();
|
||||
}
|
||||
adjusted_timeout = *delta;
|
||||
#endif
|
||||
SYS_DLIST_FOR_EACH_CONTAINER(&_timeout_q, in_q, node) {
|
||||
if (*delta <= in_q->delta_ticks_from_prev) {
|
||||
in_q->delta_ticks_from_prev -= *delta;
|
||||
|
@ -226,6 +243,12 @@ inserted:
|
|||
K_DEBUG("after adding timeout %p\n", timeout);
|
||||
_dump_timeout(timeout, 0);
|
||||
_dump_timeout_q();
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
if (!program_time || (adjusted_timeout < program_time)) {
|
||||
_set_time(adjusted_timeout);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -390,6 +390,49 @@ void k_sched_time_slice_set(s32_t duration_in_ms, int prio)
|
|||
_time_slice_elapsed = 0;
|
||||
_time_slice_prio_ceiling = prio;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
int _is_thread_time_slicing(struct k_thread *thread)
|
||||
{
|
||||
/*
|
||||
* Time slicing is done on the thread if following conditions are met
|
||||
*
|
||||
* Time slice duration should be set > 0
|
||||
* Should not be the idle thread
|
||||
* Priority should be higher than time slice priority ceiling
|
||||
* There should be multiple threads active with same priority
|
||||
*/
|
||||
|
||||
if (!(_time_slice_duration > 0) || (_is_idle_thread_ptr(thread))
|
||||
|| _is_prio_higher(thread->base.prio, _time_slice_prio_ceiling)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int q_index = _get_ready_q_q_index(thread->base.prio);
|
||||
sys_dlist_t *q = &_ready_q.q[q_index];
|
||||
|
||||
return sys_dlist_has_multiple_nodes(q);
|
||||
}
|
||||
|
||||
/* Must be called with interrupts locked */
|
||||
/* Should be called only immediately before a thread switch */
|
||||
void _update_time_slice_before_swap(void)
|
||||
{
|
||||
if (!_is_thread_time_slicing(_get_next_ready_thread())) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Restart time slice count at new thread switch */
|
||||
_time_slice_elapsed = 0;
|
||||
|
||||
u32_t remaining = _get_remaining_program_time();
|
||||
|
||||
if (!remaining || (_time_slice_duration < remaining)) {
|
||||
_set_time(_time_slice_duration);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_TIMESLICING */
|
||||
|
||||
int k_is_preempt_thread(void)
|
||||
|
|
|
@ -38,8 +38,19 @@ int sys_clock_hw_cycles_per_sec;
|
|||
/* updated by timer driver for tickless, stays at 1 for non-tickless */
|
||||
s32_t _sys_idle_elapsed_ticks = 1;
|
||||
|
||||
s64_t _sys_clock_tick_count;
|
||||
volatile u64_t _sys_clock_tick_count;
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
/*
|
||||
* If this flag is set, system clock will run continuously even if
|
||||
* there are no timer events programmed. This allows using the
|
||||
* system clock to track passage of time without interruption.
|
||||
* To save power, this should be turned on only when required.
|
||||
*/
|
||||
int _sys_clock_always_on;
|
||||
|
||||
static u32_t next_ts;
|
||||
#endif
|
||||
/**
|
||||
*
|
||||
* @brief Return the lower part of the current system tick count
|
||||
|
@ -49,12 +60,20 @@ s64_t _sys_clock_tick_count;
|
|||
*/
|
||||
u32_t _tick_get_32(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
return (u32_t)_get_elapsed_clock_time();
|
||||
#else
|
||||
return (u32_t)_sys_clock_tick_count;
|
||||
#endif
|
||||
}
|
||||
FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t);
|
||||
|
||||
u32_t k_uptime_get_32(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
__ASSERT(_sys_clock_always_on,
|
||||
"Call k_enable_sys_clock_always_on to use clock API");
|
||||
#endif
|
||||
return __ticks_to_ms(_tick_get_32());
|
||||
}
|
||||
|
||||
|
@ -76,7 +95,11 @@ s64_t _tick_get(void)
|
|||
*/
|
||||
unsigned int imask = irq_lock();
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
tmp_sys_clock_tick_count = _get_elapsed_clock_time();
|
||||
#else
|
||||
tmp_sys_clock_tick_count = _sys_clock_tick_count;
|
||||
#endif
|
||||
irq_unlock(imask);
|
||||
return tmp_sys_clock_tick_count;
|
||||
}
|
||||
|
@ -84,6 +107,10 @@ FUNC_ALIAS(_tick_get, sys_tick_get, s64_t);
|
|||
|
||||
s64_t k_uptime_get(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
__ASSERT(_sys_clock_always_on,
|
||||
"Call k_enable_sys_clock_always_on to use clock API");
|
||||
#endif
|
||||
return __ticks_to_ms(_tick_get());
|
||||
}
|
||||
|
||||
|
@ -128,7 +155,11 @@ static ALWAYS_INLINE s64_t _nano_tick_delta(s64_t *reftime)
|
|||
*/
|
||||
unsigned int imask = irq_lock();
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
saved = _get_elapsed_clock_time();
|
||||
#else
|
||||
saved = _sys_clock_tick_count;
|
||||
#endif
|
||||
irq_unlock(imask);
|
||||
delta = saved - (*reftime);
|
||||
*reftime = saved;
|
||||
|
@ -274,6 +305,12 @@ int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
|
|||
*/
|
||||
static void handle_time_slicing(s32_t ticks)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
next_ts = 0;
|
||||
if (!_is_thread_time_slicing(_current)) {
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if (_time_slice_duration == 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -281,6 +318,7 @@ static void handle_time_slicing(s32_t ticks)
|
|||
if (_is_prio_higher(_current->base.prio, _time_slice_prio_ceiling)) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
_time_slice_elapsed += __ticks_to_ms(ticks);
|
||||
if (_time_slice_elapsed >= _time_slice_duration) {
|
||||
|
@ -293,10 +331,15 @@ static void handle_time_slicing(s32_t ticks)
|
|||
_move_thread_to_end_of_prio_q(_current);
|
||||
irq_unlock(key);
|
||||
}
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
next_ts =
|
||||
_ms_to_ticks(_time_slice_duration - _time_slice_elapsed);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
#define handle_time_slicing(ticks) do { } while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Announce a tick to the kernel
|
||||
|
@ -309,6 +352,7 @@ static void handle_time_slicing(s32_t ticks)
|
|||
*/
|
||||
void _nano_sys_clock_tick_announce(s32_t ticks)
|
||||
{
|
||||
#ifndef CONFIG_TICKLESS_KERNEL
|
||||
unsigned int key;
|
||||
|
||||
K_DEBUG("ticks: %d\n", ticks);
|
||||
|
@ -317,9 +361,24 @@ void _nano_sys_clock_tick_announce(s32_t ticks)
|
|||
key = irq_lock();
|
||||
_sys_clock_tick_count += ticks;
|
||||
irq_unlock(key);
|
||||
|
||||
#endif
|
||||
handle_timeouts(ticks);
|
||||
|
||||
/* time slicing is basically handled like just yet another timeout */
|
||||
handle_time_slicing(ticks);
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
u32_t next_to = _get_next_timeout_expiry();
|
||||
|
||||
next_to = next_to == K_FOREVER ? 0 : next_to;
|
||||
next_to = !next_to || (next_ts
|
||||
&& next_to) > next_ts ? next_ts : next_to;
|
||||
|
||||
u32_t remaining = _get_remaining_program_time();
|
||||
|
||||
if ((!remaining && next_to) || (next_to < remaining)) {
|
||||
/* Clears current program if next_to = 0 and remaining > 0 */
|
||||
_set_time(next_to);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -69,6 +69,10 @@ int _is_thread_essential(void)
|
|||
|
||||
void k_busy_wait(u32_t usec_to_wait)
|
||||
{
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && \
|
||||
!defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK)
|
||||
int saved_always_on = k_enable_sys_clock_always_on();
|
||||
#endif
|
||||
/* use 64-bit math to prevent overflow when multiplying */
|
||||
u32_t cycles_to_wait = (u32_t)(
|
||||
(u64_t)usec_to_wait *
|
||||
|
@ -85,6 +89,10 @@ void k_busy_wait(u32_t usec_to_wait)
|
|||
break;
|
||||
}
|
||||
}
|
||||
#if defined(CONFIG_TICKLESS_KERNEL) && \
|
||||
!defined(CONFIG_BUSY_WAIT_USES_ALTERNATE_CLOCK)
|
||||
_sys_clock_always_on = saved_always_on;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue