The system tick count is a 64 bit quantity that gets updated from interrupt context, meaning that it's dangerously non-atomic and has to be locked. The core kernel clock code did this right. But the value was also exposed to the rest of the universe as a global variable, and virtually nothing else was doing this correctly. Even in the timer ISRs themselves, the interrupts may be themselves preempted (most of our architectures support nested interrupts) by code that wants to set timeouts and inspect system uptime. Define a z_tick_{get,set}() API, eliminate the old variable, and make sure everyone uses the right mechanism. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
353 lines
8.1 KiB
C
353 lines
8.1 KiB
C
/* system clock support */
|
|
|
|
/*
|
|
* Copyright (c) 1997-2015 Wind River Systems, Inc.
|
|
*
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*/
|
|
|
|
|
|
#include <kernel_structs.h>
|
|
#include <toolchain.h>
|
|
#include <linker/sections.h>
|
|
#include <wait_q.h>
|
|
#include <drivers/system_timer.h>
|
|
#include <syscall_handler.h>
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
|
|
#warning "non-optimized system clock frequency chosen: performance may suffer"
|
|
#endif
|
|
#endif
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
|
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
|
|
#endif
|
|
#else
|
|
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
|
int z_clock_hw_cycles_per_sec;
|
|
#endif
|
|
#endif
|
|
|
|
/* updated by timer driver for tickless, stays at 1 for non-tickless */
|
|
s32_t _sys_idle_elapsed_ticks = 1;
|
|
|
|
/* Note that this value is 64 bits, and thus non-atomic on almost all
|
|
* Zephyr archtictures. And of course it's routinely updated inside
|
|
* timer interrupts. Access to it must be locked.
|
|
*/
|
|
static volatile u64_t tick_count;
|
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
/*
|
|
* If this flag is set, system clock will run continuously even if
|
|
* there are no timer events programmed. This allows using the
|
|
* system clock to track passage of time without interruption.
|
|
* To save power, this should be turned on only when required.
|
|
*/
|
|
int _sys_clock_always_on = 1;
|
|
|
|
static u32_t next_ts;
|
|
#endif
|
|
|
|
u32_t z_tick_get_32(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
return (u32_t)_get_elapsed_clock_time();
|
|
#else
|
|
return (u32_t)tick_count;
|
|
#endif
|
|
}
|
|
|
|
u32_t _impl_k_uptime_get_32(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
__ASSERT(_sys_clock_always_on,
|
|
"Call k_enable_sys_clock_always_on to use clock API");
|
|
#endif
|
|
return __ticks_to_ms(z_tick_get_32());
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
Z_SYSCALL_HANDLER(k_uptime_get_32)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
Z_OOPS(Z_SYSCALL_VERIFY(_sys_clock_always_on));
|
|
#endif
|
|
return _impl_k_uptime_get_32();
|
|
}
|
|
#endif
|
|
|
|
s64_t z_tick_get(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
return _get_elapsed_clock_time();
|
|
#else
|
|
unsigned int key = irq_lock();
|
|
s64_t ret = tick_count;
|
|
|
|
irq_unlock(key);
|
|
return ret;
|
|
#endif
|
|
}
|
|
|
|
void z_tick_set(s64_t val)
|
|
{
|
|
unsigned int key = irq_lock();
|
|
|
|
tick_count = val;
|
|
irq_unlock(key);
|
|
}
|
|
|
|
s64_t _impl_k_uptime_get(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
__ASSERT(_sys_clock_always_on,
|
|
"Call k_enable_sys_clock_always_on to use clock API");
|
|
#endif
|
|
return __ticks_to_ms(z_tick_get());
|
|
}
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
Z_SYSCALL_HANDLER(k_uptime_get, ret_p)
|
|
{
|
|
u64_t *ret = (u64_t *)ret_p;
|
|
|
|
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
|
|
*ret = _impl_k_uptime_get();
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
s64_t k_uptime_delta(s64_t *reftime)
|
|
{
|
|
s64_t uptime, delta;
|
|
|
|
uptime = k_uptime_get();
|
|
delta = uptime - *reftime;
|
|
*reftime = uptime;
|
|
|
|
return delta;
|
|
}
|
|
|
|
u32_t k_uptime_delta_32(s64_t *reftime)
|
|
{
|
|
return (u32_t)k_uptime_delta(reftime);
|
|
}
|
|
|
|
/* handle the expired timeouts in the nano timeout queue */
|
|
|
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
|
/*
|
|
* Handle timeouts by dequeuing the expired ones from _timeout_q and queue
|
|
* them on a local one, then doing the real handling from that queue. This
|
|
* allows going through the second queue without needing to have the
|
|
* interrupts locked since it is a local queue. Each expired timeout is marked
|
|
* as _EXPIRED so that an ISR preempting us and releasing an object on which
|
|
* a thread was timing out and expired will not give the object to that thread.
|
|
*
|
|
* Always called from interrupt level, and always only from the system clock
|
|
* interrupt.
|
|
*/
|
|
|
|
static inline void handle_timeouts(s32_t ticks)
|
|
{
|
|
sys_dlist_t expired;
|
|
unsigned int key;
|
|
|
|
/* init before locking interrupts */
|
|
sys_dlist_init(&expired);
|
|
|
|
key = irq_lock();
|
|
|
|
sys_dnode_t *next = sys_dlist_peek_head(&_timeout_q);
|
|
struct _timeout *timeout = (struct _timeout *)next;
|
|
|
|
K_DEBUG("head: %p, delta: %d\n",
|
|
timeout, timeout ? timeout->delta_ticks_from_prev : -2112);
|
|
|
|
if (next == NULL) {
|
|
irq_unlock(key);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Dequeue all expired timeouts from _timeout_q, relieving irq lock
|
|
* pressure between each of them, allowing handling of higher priority
|
|
* interrupts. We know that no new timeout will be prepended in front
|
|
* of a timeout which delta is 0, since timeouts of 0 ticks are
|
|
* prohibited.
|
|
*/
|
|
|
|
while (next != NULL) {
|
|
|
|
/*
|
|
* In the case where ticks number is greater than the first
|
|
* timeout delta of the list, the lag produced by this initial
|
|
* difference must also be applied to others timeouts in list
|
|
* until it was entirely consumed.
|
|
*/
|
|
|
|
s32_t tmp = timeout->delta_ticks_from_prev;
|
|
|
|
if (timeout->delta_ticks_from_prev < ticks) {
|
|
timeout->delta_ticks_from_prev = 0;
|
|
} else {
|
|
timeout->delta_ticks_from_prev -= ticks;
|
|
}
|
|
|
|
ticks -= tmp;
|
|
|
|
next = sys_dlist_peek_next(&_timeout_q, next);
|
|
|
|
if (timeout->delta_ticks_from_prev == 0) {
|
|
sys_dnode_t *node = &timeout->node;
|
|
|
|
sys_dlist_remove(node);
|
|
|
|
/*
|
|
* Reverse the order that that were queued in the
|
|
* timeout_q: timeouts expiring on the same ticks are
|
|
* queued in the reverse order, time-wise, that they are
|
|
* added to shorten the amount of time with interrupts
|
|
* locked while walking the timeout_q. By reversing the
|
|
* order _again_ when building the expired queue, they
|
|
* end up being processed in the same order they were
|
|
* added, time-wise.
|
|
*/
|
|
|
|
sys_dlist_prepend(&expired, node);
|
|
|
|
timeout->delta_ticks_from_prev = _EXPIRED;
|
|
|
|
} else {
|
|
break;
|
|
}
|
|
|
|
irq_unlock(key);
|
|
key = irq_lock();
|
|
|
|
timeout = (struct _timeout *)next;
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
_handle_expired_timeouts(&expired);
|
|
}
|
|
#else
|
|
#define handle_timeouts(ticks) do { } while (false)
|
|
#endif
|
|
|
|
#ifdef CONFIG_TIMESLICING
|
|
s32_t _time_slice_elapsed;
|
|
s32_t _time_slice_duration;
|
|
int _time_slice_prio_ceiling;
|
|
|
|
/*
|
|
* Always called from interrupt level, and always only from the system clock
|
|
* interrupt, thus:
|
|
* - _current does not have to be protected, since it only changes at thread
|
|
* level or when exiting a non-nested interrupt
|
|
* - _time_slice_elapsed does not have to be protected, since it can only change
|
|
* in this function and at thread level
|
|
* - _time_slice_duration does not have to be protected, since it can only
|
|
* change at thread level
|
|
*/
|
|
static void handle_time_slicing(s32_t ticks)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
next_ts = 0;
|
|
#endif
|
|
if (!_is_thread_time_slicing(_current)) {
|
|
return;
|
|
}
|
|
|
|
_time_slice_elapsed += ticks;
|
|
if (_time_slice_elapsed >= _time_slice_duration) {
|
|
|
|
unsigned int key;
|
|
|
|
_time_slice_elapsed = 0;
|
|
|
|
key = irq_lock();
|
|
_move_thread_to_end_of_prio_q(_current);
|
|
irq_unlock(key);
|
|
}
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
next_ts = _time_slice_duration - _time_slice_elapsed;
|
|
#endif
|
|
}
|
|
#else
|
|
#define handle_time_slicing(ticks) do { } while (false)
|
|
#endif
|
|
|
|
/**
|
|
*
|
|
* @brief Announce a tick to the kernel
|
|
*
|
|
* This function is only to be called by the system clock timer driver when a
|
|
* tick is to be announced to the kernel. It takes care of dequeuing the
|
|
* timers that have expired and wake up the threads pending on them.
|
|
*
|
|
* @return N/A
|
|
*/
|
|
void _nano_sys_clock_tick_announce(s32_t ticks)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
/* sys_clock timekeeping happens only on the main CPU */
|
|
if (_arch_curr_cpu()->id) {
|
|
return;
|
|
}
|
|
#endif
|
|
|
|
#ifndef CONFIG_TICKLESS_KERNEL
|
|
unsigned int key;
|
|
|
|
K_DEBUG("ticks: %d\n", ticks);
|
|
|
|
key = irq_lock();
|
|
tick_count += ticks;
|
|
irq_unlock(key);
|
|
#endif
|
|
handle_timeouts(ticks);
|
|
|
|
/* time slicing is basically handled like just yet another timeout */
|
|
handle_time_slicing(ticks);
|
|
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
u32_t next_to = _get_next_timeout_expiry();
|
|
|
|
next_to = next_to == K_FOREVER ? 0 : next_to;
|
|
next_to = !next_to || (next_ts
|
|
&& next_to) > next_ts ? next_ts : next_to;
|
|
|
|
u32_t remaining = _get_remaining_program_time();
|
|
|
|
if ((!remaining && next_to) || (next_to < remaining)) {
|
|
/* Clears current program if next_to = 0 and remaining > 0 */
|
|
_set_time(next_to);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
int k_enable_sys_clock_always_on(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
int prev_status = _sys_clock_always_on;
|
|
|
|
_sys_clock_always_on = 1;
|
|
_enable_sys_clock();
|
|
|
|
return prev_status;
|
|
#else
|
|
return -ENOTSUP;
|
|
#endif
|
|
}
|
|
|
|
void k_disable_sys_clock_always_on(void)
|
|
{
|
|
#ifdef CONFIG_TICKLESS_KERNEL
|
|
_sys_clock_always_on = 0;
|
|
#endif
|
|
}
|