kernel: New timeout implementation
Now that the API has been fixed up, replace the existing timeout queue with a much smaller version. The basic algorithm is unchanged: timeouts are stored in a sorted dlist with each node nolding a delta time from the previous node in the list; the announce call just walks this list pulling off the heads as needed. Advantages: * Properly spinlocked and SMP-aware. The earlier timer implementation relied on only CPU 0 doing timeout work, and on an irq_lock() being taken before entry (something that was violated in a few spots). Now any CPU can wake up for an event (or all of them) and everything works correctly. * The *_thread_timeout() API is now expressible as a clean wrapping (just one liners) around the lower-level interface based on function pointer callbacks. As a result the timeout objects no longer need to store backpointers to the thread and wait_q and have shrunk by 33%. * MUCH smaller, to the tune of hundreds of lines of code removed. * Future proof, in that all operations on the queue are now fronted by just two entry points (_add_timeout() and z_clock_announce()) which can easily be augmented with fancier data structures. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
52e444bc05
commit
987c0e5fc1
11 changed files with 315 additions and 637 deletions
|
@ -43,7 +43,11 @@ static u32_t driver_uptime;
|
|||
|
||||
u32_t z_clock_elapsed(void)
|
||||
{
|
||||
#ifdef TICKLESS_KERNEL
|
||||
return (u32_t)(z_clock_uptime() - driver_uptime);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void wrapped_announce(s32_t ticks)
|
||||
|
@ -54,4 +58,12 @@ static void wrapped_announce(s32_t ticks)
|
|||
|
||||
#define z_clock_announce(t) wrapped_announce(t)
|
||||
|
||||
#define _sys_clock_always_on (0)
|
||||
|
||||
static inline void z_tick_set(s64_t val)
|
||||
{
|
||||
/* noop with current kernel code, use z_clock_announce() */
|
||||
ARG_UNUSED(val);
|
||||
}
|
||||
|
||||
#endif /* ZEPHYR_LEGACY_SET_TIME_H__ */
|
||||
|
|
|
@ -1330,9 +1330,8 @@ struct k_timer {
|
|||
|
||||
#define _K_TIMER_INITIALIZER(obj, expiry, stop) \
|
||||
{ \
|
||||
.timeout.delta_ticks_from_prev = _INACTIVE, \
|
||||
.timeout.thread = NULL, \
|
||||
.timeout.func = _timer_expiration_handler, \
|
||||
.timeout.dticks = _INACTIVE, \
|
||||
.timeout.fn = _timer_expiration_handler, \
|
||||
.wait_q = _WAIT_Q_INIT(&obj.wait_q), \
|
||||
.expiry_fn = expiry, \
|
||||
.stop_fn = stop, \
|
||||
|
@ -1608,7 +1607,16 @@ __syscall u32_t k_uptime_get_32(void);
|
|||
*
|
||||
* @return Elapsed time.
|
||||
*/
|
||||
extern s64_t k_uptime_delta(s64_t *reftime);
|
||||
static inline s64_t k_uptime_delta(s64_t *reftime)
|
||||
{
|
||||
s64_t uptime, delta;
|
||||
|
||||
uptime = k_uptime_get();
|
||||
delta = uptime - *reftime;
|
||||
*reftime = uptime;
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get elapsed time (32-bit version).
|
||||
|
@ -1626,7 +1634,10 @@ extern s64_t k_uptime_delta(s64_t *reftime);
|
|||
*
|
||||
* @return Elapsed time.
|
||||
*/
|
||||
extern u32_t k_uptime_delta_32(s64_t *reftime);
|
||||
static inline u32_t k_uptime_delta_32(s64_t *reftime)
|
||||
{
|
||||
return (u32_t)k_uptime_delta(reftime);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Read the hardware clock.
|
||||
|
|
|
@ -199,14 +199,10 @@ u32_t z_tick_get_32(void);
|
|||
*/
|
||||
s64_t z_tick_get(void);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Sets the current system tick count
|
||||
*
|
||||
* @param ticks Ticks since system start
|
||||
*
|
||||
*/
|
||||
void z_tick_set(s64_t ticks);
|
||||
#ifndef CONFIG_SYS_CLOCK_EXISTS
|
||||
#define z_tick_get() (0)
|
||||
#define z_tick_get_32() (0)
|
||||
#endif
|
||||
|
||||
/* timeouts */
|
||||
|
||||
|
@ -215,9 +211,8 @@ typedef void (*_timeout_func_t)(struct _timeout *t);
|
|||
|
||||
struct _timeout {
|
||||
sys_dnode_t node;
|
||||
struct k_thread *thread;
|
||||
s32_t delta_ticks_from_prev;
|
||||
_timeout_func_t func;
|
||||
s32_t dticks;
|
||||
_timeout_func_t fn;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -16,7 +16,6 @@ add_library(kernel
|
|||
sched.c
|
||||
sem.c
|
||||
stack.c
|
||||
sys_clock.c
|
||||
system_work_q.c
|
||||
thread.c
|
||||
thread_abort.c
|
||||
|
@ -36,7 +35,7 @@ set_target_properties(
|
|||
|
||||
target_sources_ifdef(CONFIG_INT_LATENCY_BENCHMARK kernel PRIVATE int_latency_bench.c)
|
||||
target_sources_ifdef(CONFIG_STACK_CANARIES kernel PRIVATE compiler_stack_protect.c)
|
||||
target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timer.c)
|
||||
target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer.c)
|
||||
target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)
|
||||
target_sources_if_kconfig( kernel PRIVATE poll.c)
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ static inline int _is_thread_prevented_from_running(struct k_thread *thread)
|
|||
static inline bool _is_thread_timeout_active(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev != _INACTIVE;
|
||||
return thread->base.timeout.dticks != _INACTIVE;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
|
@ -266,7 +266,7 @@ static ALWAYS_INLINE void _sched_unlock_no_reschedule(void)
|
|||
static ALWAYS_INLINE bool _is_thread_timeout_expired(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev == _EXPIRED;
|
||||
return thread->base.timeout.dticks == _EXPIRED;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
|
|
@ -20,22 +20,31 @@ extern "C" {
|
|||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
|
||||
struct _thread_base;
|
||||
static inline void _init_timeout(struct _timeout *t, _timeout_func_t fn)
|
||||
{
|
||||
t->dticks = _INACTIVE;
|
||||
}
|
||||
|
||||
extern u64_t z_last_tick_announced;
|
||||
void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks);
|
||||
|
||||
void _init_timeout(struct _timeout *t, _timeout_func_t fn);
|
||||
int _abort_timeout(struct _timeout *to);
|
||||
|
||||
void _add_timeout(struct _timeout *timeout, _timeout_func_t func,
|
||||
s32_t timeout_in_ticks);
|
||||
static inline void _init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
_init_timeout(&thread_base->timeout, NULL);
|
||||
}
|
||||
|
||||
int _abort_timeout(struct _timeout *timeout);
|
||||
extern void z_thread_timeout(struct _timeout *to);
|
||||
|
||||
void _init_thread_timeout(struct _thread_base *thread_base);
|
||||
static inline void _add_thread_timeout(struct k_thread *th, s32_t ticks)
|
||||
{
|
||||
_add_timeout(&th->base.timeout, z_thread_timeout, ticks);
|
||||
}
|
||||
|
||||
void _add_thread_timeout(struct k_thread *thread, s32_t timeout_in_ticks);
|
||||
|
||||
int _abort_thread_timeout(struct k_thread *thread);
|
||||
static inline int _abort_thread_timeout(struct k_thread *thread)
|
||||
{
|
||||
return _abort_timeout(&thread->base.timeout);
|
||||
}
|
||||
|
||||
s32_t _get_next_timeout_expiry(void);
|
||||
|
||||
|
|
|
@ -364,6 +364,20 @@ void _unpend_thread_no_timeout(struct k_thread *thread)
|
|||
thread->base.pended_on = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
/* Timeout handler for *_thread_timeout() APIs */
|
||||
void z_thread_timeout(struct _timeout *to)
|
||||
{
|
||||
struct k_thread *th = CONTAINER_OF(to, struct k_thread, base.timeout);
|
||||
|
||||
if (th->base.pended_on != NULL) {
|
||||
_unpend_thread_no_timeout(th);
|
||||
}
|
||||
_mark_thread_as_started(th);
|
||||
_ready_thread(th);
|
||||
}
|
||||
#endif
|
||||
|
||||
int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout)
|
||||
{
|
||||
pend(_current, wait_q, timeout);
|
||||
|
|
|
@ -1,604 +0,0 @@
|
|||
/* system clock support */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1997-2015 Wind River Systems, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
|
||||
#include <kernel_structs.h>
|
||||
#include <toolchain.h>
|
||||
#include <linker/sections.h>
|
||||
#include <wait_q.h>
|
||||
#include <drivers/system_timer.h>
|
||||
#include <syscall_handler.h>
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
#ifdef _NON_OPTIMIZED_TICKS_PER_SEC
|
||||
#warning "non-optimized system clock frequency chosen: performance may suffer"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
static void _handle_expired_timeouts(sys_dlist_t *expired);
|
||||
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
||||
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
|
||||
#endif
|
||||
#else
|
||||
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
||||
int z_clock_hw_cycles_per_sec;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern u64_t z_clock_uptime(void);
|
||||
|
||||
/* Note that this value is 64 bits, and thus non-atomic on almost all
|
||||
* Zephyr archtictures. And of course it's routinely updated inside
|
||||
* timer interrupts. Access to it must be locked.
|
||||
*/
|
||||
static volatile u64_t tick_count;
|
||||
|
||||
u64_t z_last_tick_announced;
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
/*
|
||||
* If this flag is set, system clock will run continuously even if
|
||||
* there are no timer events programmed. This allows using the
|
||||
* system clock to track passage of time without interruption.
|
||||
* To save power, this should be turned on only when required.
|
||||
*/
|
||||
int _sys_clock_always_on = 1;
|
||||
|
||||
static u32_t next_ts;
|
||||
#endif
|
||||
|
||||
u32_t z_tick_get_32(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
return (u32_t)z_clock_uptime();
|
||||
#else
|
||||
return (u32_t)tick_count;
|
||||
#endif
|
||||
}
|
||||
|
||||
u32_t _impl_k_uptime_get_32(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
__ASSERT(_sys_clock_always_on,
|
||||
"Call k_enable_sys_clock_always_on to use clock API");
|
||||
#endif
|
||||
return __ticks_to_ms(z_tick_get_32());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(k_uptime_get_32)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
Z_OOPS(Z_SYSCALL_VERIFY(_sys_clock_always_on));
|
||||
#endif
|
||||
return _impl_k_uptime_get_32();
|
||||
}
|
||||
#endif
|
||||
|
||||
s64_t z_tick_get(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
return z_clock_uptime();
|
||||
#else
|
||||
unsigned int key = irq_lock();
|
||||
s64_t ret = tick_count;
|
||||
|
||||
irq_unlock(key);
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
void z_tick_set(s64_t val)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
__ASSERT_NO_MSG(val > tick_count);
|
||||
__ASSERT_NO_MSG(val > z_last_tick_announced);
|
||||
|
||||
tick_count = val;
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
s64_t _impl_k_uptime_get(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
__ASSERT(_sys_clock_always_on,
|
||||
"Call k_enable_sys_clock_always_on to use clock API");
|
||||
#endif
|
||||
return __ticks_to_ms(z_tick_get());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(k_uptime_get, ret_p)
|
||||
{
|
||||
u64_t *ret = (u64_t *)ret_p;
|
||||
|
||||
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
|
||||
*ret = _impl_k_uptime_get();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
s64_t k_uptime_delta(s64_t *reftime)
|
||||
{
|
||||
s64_t uptime, delta;
|
||||
|
||||
uptime = k_uptime_get();
|
||||
delta = uptime - *reftime;
|
||||
*reftime = uptime;
|
||||
|
||||
return delta;
|
||||
}
|
||||
|
||||
u32_t k_uptime_delta_32(s64_t *reftime)
|
||||
{
|
||||
return (u32_t)k_uptime_delta(reftime);
|
||||
}
|
||||
|
||||
/* handle the expired timeouts in the nano timeout queue */
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
/*
|
||||
* Handle timeouts by dequeuing the expired ones from _timeout_q and queue
|
||||
* them on a local one, then doing the real handling from that queue. This
|
||||
* allows going through the second queue without needing to have the
|
||||
* interrupts locked since it is a local queue. Each expired timeout is marked
|
||||
* as _EXPIRED so that an ISR preempting us and releasing an object on which
|
||||
* a thread was timing out and expired will not give the object to that thread.
|
||||
*
|
||||
* Always called from interrupt level, and always only from the system clock
|
||||
* interrupt.
|
||||
*/
|
||||
|
||||
static inline void handle_timeouts(s32_t ticks)
|
||||
{
|
||||
sys_dlist_t expired;
|
||||
unsigned int key;
|
||||
|
||||
/* init before locking interrupts */
|
||||
sys_dlist_init(&expired);
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
sys_dnode_t *next = sys_dlist_peek_head(&_timeout_q);
|
||||
struct _timeout *timeout = (struct _timeout *)next;
|
||||
|
||||
K_DEBUG("head: %p, delta: %d\n",
|
||||
timeout, timeout ? timeout->delta_ticks_from_prev : -2112);
|
||||
|
||||
if (next == NULL) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dequeue all expired timeouts from _timeout_q, relieving irq lock
|
||||
* pressure between each of them, allowing handling of higher priority
|
||||
* interrupts. We know that no new timeout will be prepended in front
|
||||
* of a timeout which delta is 0, since timeouts of 0 ticks are
|
||||
* prohibited.
|
||||
*/
|
||||
|
||||
while (next != NULL) {
|
||||
|
||||
/*
|
||||
* In the case where ticks number is greater than the first
|
||||
* timeout delta of the list, the lag produced by this initial
|
||||
* difference must also be applied to others timeouts in list
|
||||
* until it was entirely consumed.
|
||||
*/
|
||||
|
||||
s32_t tmp = timeout->delta_ticks_from_prev;
|
||||
|
||||
if (timeout->delta_ticks_from_prev < ticks) {
|
||||
timeout->delta_ticks_from_prev = 0;
|
||||
} else {
|
||||
timeout->delta_ticks_from_prev -= ticks;
|
||||
}
|
||||
|
||||
ticks -= tmp;
|
||||
|
||||
next = sys_dlist_peek_next(&_timeout_q, next);
|
||||
|
||||
if (timeout->delta_ticks_from_prev == 0) {
|
||||
sys_dnode_t *node = &timeout->node;
|
||||
|
||||
sys_dlist_remove(node);
|
||||
|
||||
/*
|
||||
* Reverse the order that that were queued in the
|
||||
* timeout_q: timeouts expiring on the same ticks are
|
||||
* queued in the reverse order, time-wise, that they are
|
||||
* added to shorten the amount of time with interrupts
|
||||
* locked while walking the timeout_q. By reversing the
|
||||
* order _again_ when building the expired queue, they
|
||||
* end up being processed in the same order they were
|
||||
* added, time-wise.
|
||||
*/
|
||||
|
||||
sys_dlist_prepend(&expired, node);
|
||||
|
||||
timeout->delta_ticks_from_prev = _EXPIRED;
|
||||
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
key = irq_lock();
|
||||
|
||||
timeout = (struct _timeout *)next;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
_handle_expired_timeouts(&expired);
|
||||
}
|
||||
#else
|
||||
#define handle_timeouts(ticks) do { } while (false)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Announce ticks to the kernel
|
||||
*
|
||||
* This function is only to be called by the system clock timer driver when a
|
||||
* tick is to be announced to the kernel. It takes care of dequeuing the
|
||||
* timers that have expired and wake up the threads pending on them.
|
||||
*
|
||||
* @return N/A
|
||||
*/
|
||||
void z_clock_announce(s32_t ticks)
|
||||
{
|
||||
z_last_tick_announced += ticks;
|
||||
|
||||
__ASSERT_NO_MSG(z_last_tick_announced >= tick_count);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* sys_clock timekeeping happens only on the main CPU */
|
||||
if (_arch_curr_cpu()->id) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_TICKLESS_KERNEL
|
||||
unsigned int key;
|
||||
|
||||
K_DEBUG("ticks: %d\n", ticks);
|
||||
|
||||
key = irq_lock();
|
||||
tick_count += ticks;
|
||||
irq_unlock(key);
|
||||
#endif
|
||||
handle_timeouts(ticks);
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
z_time_slice(ticks);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
u32_t next_to = _get_next_timeout_expiry();
|
||||
|
||||
next_to = next_to == K_FOREVER ? 0 : next_to;
|
||||
next_to = !next_to || (next_ts
|
||||
&& next_to) > next_ts ? next_ts : next_to;
|
||||
|
||||
if (next_to) {
|
||||
/* Clears current program if next_to = 0 and remaining > 0 */
|
||||
int dt = next_to ? next_to : (_sys_clock_always_on ? INT_MAX : K_FOREVER);
|
||||
z_clock_set_timeout(dt, false);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int k_enable_sys_clock_always_on(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
int prev_status = _sys_clock_always_on;
|
||||
|
||||
_sys_clock_always_on = 1;
|
||||
_enable_sys_clock();
|
||||
|
||||
return prev_status;
|
||||
#else
|
||||
return -ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
void k_disable_sys_clock_always_on(void)
|
||||
{
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
_sys_clock_always_on = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
|
||||
extern u64_t z_last_tick_announced;
|
||||
|
||||
/* initialize the timeouts part of k_thread when enabled in the kernel */
|
||||
|
||||
void _init_timeout(struct _timeout *t, _timeout_func_t func)
|
||||
{
|
||||
/*
|
||||
* Must be initialized here and when dequeueing a timeout so that code
|
||||
* not dealing with timeouts does not have to handle this, such as when
|
||||
* waiting forever on a semaphore.
|
||||
*/
|
||||
t->delta_ticks_from_prev = _INACTIVE;
|
||||
|
||||
/*
|
||||
* Must be initialized here, so the _handle_one_timeout()
|
||||
* routine can check if there is a thread waiting on this timeout
|
||||
*/
|
||||
t->thread = NULL;
|
||||
|
||||
/*
|
||||
* Function must be initialized before being potentially called.
|
||||
*/
|
||||
t->func = func;
|
||||
|
||||
/*
|
||||
* These are initialized when enqueing on the timeout queue:
|
||||
*
|
||||
* thread->timeout.node.next
|
||||
* thread->timeout.node.prev
|
||||
*/
|
||||
}
|
||||
|
||||
void _init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
_init_timeout(&thread_base->timeout, NULL);
|
||||
}
|
||||
|
||||
/* remove a thread timing out from kernel object's wait queue */
|
||||
|
||||
static inline void _unpend_thread_timing_out(struct k_thread *thread,
|
||||
struct _timeout *timeout_obj)
|
||||
{
|
||||
if (thread->base.pended_on) {
|
||||
_unpend_thread_no_timeout(thread);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Handle one timeout from the expired timeout queue. Removes it from the wait
|
||||
* queue it is on if waiting for an object; in this case, the return value is
|
||||
* kept as -EAGAIN, set previously in _Swap().
|
||||
*/
|
||||
|
||||
static inline void _handle_one_expired_timeout(struct _timeout *timeout)
|
||||
{
|
||||
struct k_thread *thread = timeout->thread;
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
timeout->delta_ticks_from_prev = _INACTIVE;
|
||||
|
||||
K_DEBUG("timeout %p\n", timeout);
|
||||
if (thread) {
|
||||
_unpend_thread_timing_out(thread, timeout);
|
||||
_mark_thread_as_started(thread);
|
||||
_ready_thread(thread);
|
||||
irq_unlock(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
if (timeout->func) {
|
||||
timeout->func(timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop over all expired timeouts and handle them one by one. Should be called
|
||||
* with interrupts unlocked: interrupts will be locked on each interation only
|
||||
* for the amount of time necessary.
|
||||
*/
|
||||
|
||||
static void _handle_expired_timeouts(sys_dlist_t *expired)
|
||||
{
|
||||
struct _timeout *timeout, *next;
|
||||
|
||||
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(expired, timeout, next, node) {
|
||||
_handle_one_expired_timeout(timeout);
|
||||
}
|
||||
}
|
||||
|
||||
/* returns _INACTIVE if the timer is not active */
|
||||
int _abort_timeout(struct _timeout *timeout)
|
||||
{
|
||||
if (timeout->delta_ticks_from_prev == _INACTIVE) {
|
||||
return _INACTIVE;
|
||||
}
|
||||
|
||||
if (!sys_dlist_is_tail(&_timeout_q, &timeout->node)) {
|
||||
sys_dnode_t *next_node =
|
||||
sys_dlist_peek_next(&_timeout_q, &timeout->node);
|
||||
struct _timeout *next = (struct _timeout *)next_node;
|
||||
|
||||
next->delta_ticks_from_prev += timeout->delta_ticks_from_prev;
|
||||
}
|
||||
sys_dlist_remove(&timeout->node);
|
||||
timeout->delta_ticks_from_prev = _INACTIVE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* returns _INACTIVE if the timer has already expired */
|
||||
int _abort_thread_timeout(struct k_thread *thread)
|
||||
{
|
||||
return _abort_timeout(&thread->base.timeout);
|
||||
}
|
||||
|
||||
static inline void _dump_timeout(struct _timeout *timeout, int extra_tab)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_DEBUG
|
||||
char *tab = extra_tab ? "\t" : "";
|
||||
|
||||
K_DEBUG("%stimeout %p, prev: %p, next: %p\n"
|
||||
"%s\tthread: %p\n"
|
||||
"%s\tticks remaining: %d\n"
|
||||
"%s\tfunction: %p\n",
|
||||
tab, timeout, timeout->node.prev, timeout->node.next,
|
||||
tab, timeout->thread,
|
||||
tab, timeout->delta_ticks_from_prev,
|
||||
tab, timeout->func);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void _dump_timeout_q(void)
|
||||
{
|
||||
#ifdef CONFIG_KERNEL_DEBUG
|
||||
struct _timeout *timeout;
|
||||
|
||||
K_DEBUG("_timeout_q: %p, head: %p, tail: %p\n",
|
||||
&_timeout_q, _timeout_q.head, _timeout_q.tail);
|
||||
|
||||
SYS_DLIST_FOR_EACH_CONTAINER(&_timeout_q, timeout, node) {
|
||||
_dump_timeout(timeout, 1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* find the closest deadline in the timeout queue */
|
||||
|
||||
s32_t _get_next_timeout_expiry(void)
|
||||
{
|
||||
struct _timeout *t = (struct _timeout *)
|
||||
sys_dlist_peek_head(&_timeout_q);
|
||||
|
||||
return t ? t->delta_ticks_from_prev : K_FOREVER;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add timeout to timeout queue. Record waiting thread and wait queue if any.
|
||||
*
|
||||
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||
*
|
||||
* If the new timeout is expiring on the same system clock tick as other
|
||||
* timeouts already present in the _timeout_q, it is be _prepended_ to these
|
||||
* timeouts. This allows exiting the loop sooner, which is good, since
|
||||
* interrupts are locked while trying to find the insert point. Note that the
|
||||
* timeouts are then processed in the _reverse order_ if they expire on the
|
||||
* same tick.
|
||||
*
|
||||
* This should not cause problems to applications, unless they really expect
|
||||
* two timeouts queued very close to one another to expire in the same order
|
||||
* they were queued. This could be changed at the cost of potential longer
|
||||
* interrupt latency.
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
*/
|
||||
|
||||
void _add_timeout(struct _timeout *timeout,
|
||||
_timeout_func_t fn, s32_t timeout_in_ticks)
|
||||
{
|
||||
__ASSERT(timeout_in_ticks >= 0, "");
|
||||
__ASSERT(fn == timeout->func, "");
|
||||
|
||||
timeout->delta_ticks_from_prev = timeout_in_ticks;
|
||||
|
||||
K_DEBUG("before adding timeout %p\n", timeout);
|
||||
_dump_timeout(timeout, 0);
|
||||
_dump_timeout_q();
|
||||
|
||||
/* If timer is submitted to expire ASAP with
|
||||
* timeout_in_ticks (duration) as zero value,
|
||||
* then handle timeout immedately without going
|
||||
* through timeout queue.
|
||||
*/
|
||||
if (!timeout_in_ticks) {
|
||||
_handle_one_expired_timeout(timeout);
|
||||
return;
|
||||
}
|
||||
|
||||
s32_t *delta = &timeout->delta_ticks_from_prev;
|
||||
struct _timeout *in_q;
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
/*
|
||||
* If some time has already passed since timer was last
|
||||
* programmed, then that time needs to be accounted when
|
||||
* inserting the new timeout. We account for this
|
||||
* by adding the already elapsed time to the new timeout.
|
||||
* This is like adding this timout back in history.
|
||||
*/
|
||||
u32_t adjusted_timeout;
|
||||
|
||||
*delta += (int)(z_clock_uptime() - z_last_tick_announced);
|
||||
|
||||
adjusted_timeout = *delta;
|
||||
#endif
|
||||
SYS_DLIST_FOR_EACH_CONTAINER(&_timeout_q, in_q, node) {
|
||||
if (*delta <= in_q->delta_ticks_from_prev) {
|
||||
in_q->delta_ticks_from_prev -= *delta;
|
||||
sys_dlist_insert_before(&_timeout_q, &in_q->node,
|
||||
&timeout->node);
|
||||
goto inserted;
|
||||
}
|
||||
|
||||
*delta -= in_q->delta_ticks_from_prev;
|
||||
}
|
||||
|
||||
sys_dlist_append(&_timeout_q, &timeout->node);
|
||||
|
||||
inserted:
|
||||
K_DEBUG("after adding timeout %p\n", timeout);
|
||||
_dump_timeout(timeout, 0);
|
||||
_dump_timeout_q();
|
||||
|
||||
#ifdef CONFIG_TICKLESS_KERNEL
|
||||
if (adjusted_timeout < _get_next_timeout_expiry()) {
|
||||
z_clock_set_timeout(adjusted_timeout, false);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Put thread on timeout queue. Record wait queue if any.
|
||||
*
|
||||
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
*/
|
||||
|
||||
void _add_thread_timeout(struct k_thread *thread, s32_t timeout_in_ticks)
|
||||
{
|
||||
thread->base.timeout.thread = thread;
|
||||
_add_timeout(&thread->base.timeout, NULL, timeout_in_ticks);
|
||||
}
|
||||
|
||||
s32_t z_timeout_remaining(struct _timeout *timeout)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
s32_t remaining_ticks;
|
||||
|
||||
if (timeout->delta_ticks_from_prev == _INACTIVE) {
|
||||
remaining_ticks = 0;
|
||||
} else {
|
||||
/*
|
||||
* compute remaining ticks by walking the timeout list
|
||||
* and summing up the various tick deltas involved
|
||||
*/
|
||||
struct _timeout *t =
|
||||
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
|
||||
|
||||
remaining_ticks = t->delta_ticks_from_prev;
|
||||
while (t != timeout) {
|
||||
t = (struct _timeout *)sys_dlist_peek_next(&_timeout_q,
|
||||
&t->node);
|
||||
remaining_ticks += t->delta_ticks_from_prev;
|
||||
}
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
return remaining_ticks;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SYS_CLOCK_EXISTS */
|
243
kernel/timeout.c
Normal file
243
kernel/timeout.c
Normal file
|
@ -0,0 +1,243 @@
|
|||
/*
|
||||
* Copyright (c) 2018 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#include <timeout_q.h>
|
||||
#include <drivers/system_timer.h>
|
||||
#include <sys_clock.h>
|
||||
#include <spinlock.h>
|
||||
#include <ksched.h>
|
||||
#include <syscall_handler.h>
|
||||
|
||||
#define LOCKED(lck) for (k_spinlock_key_t __i = {}, \
|
||||
__key = k_spin_lock(lck); \
|
||||
!__i.key; \
|
||||
k_spin_unlock(lck, __key), __i.key = 1)
|
||||
|
||||
static u64_t curr_tick;
|
||||
|
||||
static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
|
||||
|
||||
static struct k_spinlock timeout_lock;
|
||||
|
||||
static bool can_wait_forever;
|
||||
|
||||
/* During a call to z_clock_announce(), the "current" time is "ahead"
|
||||
* of the reference used by timeout_list by this amount.
|
||||
*/
|
||||
static int announce_advance;
|
||||
|
||||
#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
|
||||
int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
|
||||
#endif
|
||||
|
||||
static struct _timeout *first(void)
|
||||
{
|
||||
sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
|
||||
|
||||
return t == NULL ? NULL : CONTAINER_OF(t, struct _timeout, node);
|
||||
}
|
||||
|
||||
static struct _timeout *next(struct _timeout *t)
|
||||
{
|
||||
sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
|
||||
|
||||
return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node);
|
||||
}
|
||||
|
||||
static void remove(struct _timeout *t)
|
||||
{
|
||||
if (next(t) != NULL) {
|
||||
next(t)->dticks += t->dticks;
|
||||
}
|
||||
|
||||
sys_dlist_remove(&t->node);
|
||||
t->dticks = _INACTIVE;
|
||||
}
|
||||
|
||||
static s32_t adjust_elapsed(s32_t ticks)
|
||||
{
|
||||
ticks -= z_clock_elapsed();
|
||||
return ticks < 0 ? 0 : ticks;
|
||||
}
|
||||
|
||||
void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks)
|
||||
{
|
||||
__ASSERT(to->dticks < 0, "");
|
||||
to->fn = fn;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
struct _timeout *t;
|
||||
|
||||
to->dticks = adjust_elapsed(ticks) + announce_advance;
|
||||
for (t = first(); t != NULL; t = next(t)) {
|
||||
__ASSERT(t->dticks >= 0, "");
|
||||
|
||||
if (t->dticks > to->dticks) {
|
||||
t->dticks -= to->dticks;
|
||||
sys_dlist_insert_before(&timeout_list,
|
||||
&t->node, &to->node);
|
||||
break;
|
||||
}
|
||||
to->dticks -= t->dticks;
|
||||
}
|
||||
|
||||
if (t == NULL) {
|
||||
sys_dlist_append(&timeout_list, &to->node);
|
||||
}
|
||||
}
|
||||
|
||||
z_clock_set_timeout(_get_next_timeout_expiry(), false);
|
||||
}
|
||||
|
||||
int _abort_timeout(struct _timeout *to)
|
||||
{
|
||||
int ret = _INACTIVE;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
if (to->dticks != _INACTIVE) {
|
||||
remove(to);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
s32_t z_timeout_remaining(struct _timeout *to)
|
||||
{
|
||||
s32_t ticks = 0;
|
||||
|
||||
if (to->dticks == _INACTIVE) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
for (struct _timeout *t = first(); t != NULL; t = next(t)) {
|
||||
ticks += t->dticks;
|
||||
if (to == t) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ticks;
|
||||
}
|
||||
|
||||
void z_clock_announce(s32_t ticks)
|
||||
{
|
||||
struct _timeout *t = NULL;
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
z_time_slice(ticks);
|
||||
#endif
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
curr_tick += ticks;
|
||||
announce_advance = ticks;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
LOCKED(&timeout_lock) {
|
||||
t = first();
|
||||
if (t != NULL) {
|
||||
if (t->dticks <= announce_advance) {
|
||||
announce_advance -= t->dticks;
|
||||
t->dticks = 0;
|
||||
remove(t);
|
||||
} else {
|
||||
t->dticks -= announce_advance;
|
||||
t = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (t == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
t->fn(t);
|
||||
}
|
||||
|
||||
announce_advance = 0;
|
||||
z_clock_set_timeout(_get_next_timeout_expiry(), false);
|
||||
}
|
||||
|
||||
s32_t _get_next_timeout_expiry(void)
|
||||
{
|
||||
s32_t ret = 0;
|
||||
int max = can_wait_forever ? K_FOREVER : INT_MAX;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
struct _timeout *to = first();
|
||||
|
||||
ret = to == NULL ? max : adjust_elapsed(to->dticks);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
if (_current_cpu->slice_ticks && _current_cpu->slice_ticks < ret) {
|
||||
ret = _current_cpu->slice_ticks;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
int k_enable_sys_clock_always_on(void)
|
||||
{
|
||||
int ret = !can_wait_forever;
|
||||
|
||||
can_wait_forever = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void k_disable_sys_clock_always_on(void)
|
||||
{
|
||||
can_wait_forever = 1;
|
||||
}
|
||||
|
||||
s64_t z_tick_get(void)
|
||||
{
|
||||
u64_t t = 0;
|
||||
|
||||
LOCKED(&timeout_lock) {
|
||||
t = curr_tick + z_clock_elapsed();
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
u32_t z_tick_get_32(void)
|
||||
{
|
||||
/* Returning just the low word doesn't require locking as the
|
||||
* API is by definition at risk of overflow
|
||||
*/
|
||||
return z_clock_elapsed() + (u32_t)curr_tick;
|
||||
}
|
||||
|
||||
u32_t _impl_k_uptime_get_32(void)
|
||||
{
|
||||
return __ticks_to_ms(z_tick_get_32());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(k_uptime_get_32)
|
||||
{
|
||||
return _impl_k_uptime_get_32();
|
||||
}
|
||||
#endif
|
||||
|
||||
s64_t _impl_k_uptime_get(void)
|
||||
{
|
||||
return __ticks_to_ms(z_tick_get());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(k_uptime_get, ret_p)
|
||||
{
|
||||
u64_t *ret = (u64_t *)ret_p;
|
||||
|
||||
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(ret, sizeof(*ret)));
|
||||
*ret = _impl_k_uptime_get();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
|
@ -202,7 +202,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer)
|
|||
u32_t result = timer->status;
|
||||
|
||||
if (result == 0) {
|
||||
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
|
||||
if (timer->timeout.dticks != _INACTIVE) {
|
||||
/* wait for timer to expire or stop */
|
||||
(void)_pend_current_thread(key, &timer->wait_q, K_FOREVER);
|
||||
|
||||
|
|
|
@ -362,8 +362,7 @@ void thread_producer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3)
|
|||
|
||||
void thread_consumer_get_msgq_w_cxt_switch(void *p1, void *p2, void *p3)
|
||||
{
|
||||
producer_get_w_cxt_switch_tid->base.timeout.delta_ticks_from_prev =
|
||||
_EXPIRED;
|
||||
producer_get_w_cxt_switch_tid->base.timeout.dticks =_EXPIRED;
|
||||
__read_swap_end_time_value = 1;
|
||||
TIMING_INFO_PRE_READ();
|
||||
__msg_q_get_w_cxt_start_time = TIMING_INFO_OS_GET_TIME();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue