kernel: enhance realtime-ness when handling timeouts
The numbers of timeouts that expire on a given tick is arbitrary. When handling them, interrupts were locked, which prevented higher-priority interrupts from preempting the system clock timer handler. Instead of looping on the list of timeouts, which needs interrupts being locked since it can be manipulated at interrupt level, timeouts are dequeued one by one, unlocking interrupts between each, and put on a local 'expired' queue that is drained subsequently, with interrupts unlocked. This scheme uses the fact that no timeout can be prepended onto the timeout queue while expired timeouts are getting removed from it, since adding a timeout of 0 is prohibited. Timer handlers now run with interrupts unlocked: the previous behaviour added potentially horrible non-determinism to the handling of timeouts. Change-Id: I709085134029ea2ad73e167dc915b956114e14c2 Signed-off-by: Benjamin Walsh <walsh.benj@gmail.com>
This commit is contained in:
parent
5596f78c08
commit
b889fa8b20
5 changed files with 175 additions and 59 deletions
|
@ -264,6 +264,15 @@ static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
|
|||
thread->base.flags &= ~K_SUSPENDED;
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE int _is_thread_timeout_expired(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
return thread->base.timeout.delta_ticks_from_prev == _EXPIRED;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* check if a thread is on the timeout queue */
|
||||
static inline int _is_thread_timeout_active(struct k_thread *thread)
|
||||
{
|
||||
|
@ -382,10 +391,36 @@ static inline struct k_thread *_peek_first_pending_thread(_wait_q_t *wait_q)
|
|||
return (struct k_thread *)sys_dlist_peek_head(wait_q);
|
||||
}
|
||||
|
||||
static inline struct k_thread *_get_thread_to_unpend(_wait_q_t *wait_q)
|
||||
{
|
||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
if (_is_in_isr()) {
|
||||
/* skip threads that have an expired timeout */
|
||||
sys_dlist_t *q = (sys_dlist_t *)wait_q;
|
||||
sys_dnode_t *cur, *next;
|
||||
|
||||
SYS_DLIST_FOR_EACH_NODE_SAFE(q, cur, next) {
|
||||
struct k_thread *thread = (struct k_thread *)cur;
|
||||
|
||||
if (_is_thread_timeout_expired(thread)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sys_dlist_remove(cur);
|
||||
return thread;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
return (struct k_thread *)sys_dlist_get(wait_q);
|
||||
}
|
||||
|
||||
/* unpend the first thread from a wait queue */
|
||||
/* must be called with interrupts locked */
|
||||
static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
|
||||
{
|
||||
struct k_thread *thread = (struct k_thread *)sys_dlist_get(wait_q);
|
||||
struct k_thread *thread = _get_thread_to_unpend(wait_q);
|
||||
|
||||
if (thread) {
|
||||
_mark_thread_as_not_pending(thread);
|
||||
|
|
|
@ -83,67 +83,63 @@ static inline void _unpend_thread_timing_out(struct k_thread *thread,
|
|||
}
|
||||
|
||||
/*
|
||||
* Handle one expired timeout.
|
||||
*
|
||||
* This removes the thread from the timeout queue head, and also removes it
|
||||
* from the wait queue it is on if waiting for an object. In that case,
|
||||
* the return value is kept as -EAGAIN, set previously in _Swap().
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
* Handle one timeout from the expired timeout queue. Removes it from the wait
|
||||
* queue it is on if waiting for an object; in this case, the return value is
|
||||
* kept as -EAGAIN, set previously in _Swap().
|
||||
*/
|
||||
|
||||
static inline struct _timeout *_handle_one_timeout(
|
||||
sys_dlist_t *timeout_q)
|
||||
static inline void _handle_one_expired_timeout(struct _timeout *timeout)
|
||||
{
|
||||
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
|
||||
struct k_thread *thread = t->thread;
|
||||
struct k_thread *thread = timeout->thread;
|
||||
unsigned int key = irq_lock();
|
||||
|
||||
t->delta_ticks_from_prev = _INACTIVE;
|
||||
timeout->delta_ticks_from_prev = _INACTIVE;
|
||||
|
||||
K_DEBUG("timeout %p\n", t);
|
||||
if (thread != NULL) {
|
||||
_unpend_thread_timing_out(thread, t);
|
||||
K_DEBUG("timeout %p\n", timeout);
|
||||
if (thread) {
|
||||
_unpend_thread_timing_out(thread, timeout);
|
||||
_ready_thread(thread);
|
||||
} else if (t->func) {
|
||||
t->func(t);
|
||||
irq_unlock(key);
|
||||
} else {
|
||||
irq_unlock(key);
|
||||
if (timeout->func) {
|
||||
timeout->func(timeout);
|
||||
}
|
||||
}
|
||||
|
||||
return (struct _timeout *)sys_dlist_peek_head(timeout_q);
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop over all expired timeouts and handle them one by one.
|
||||
* Must be called with interrupts locked.
|
||||
* Loop over all expired timeouts and handle them one by one. Should be called
|
||||
* with interrupts unlocked: interrupts will be locked on each interation only
|
||||
* for the amount of time necessary.
|
||||
*/
|
||||
|
||||
static inline void _handle_timeouts(void)
|
||||
static inline void _handle_expired_timeouts(sys_dlist_t *expired)
|
||||
{
|
||||
sys_dlist_t *timeout_q = &_timeout_q;
|
||||
struct _timeout *next;
|
||||
sys_dnode_t *timeout, *next;
|
||||
|
||||
next = (struct _timeout *)sys_dlist_peek_head(timeout_q);
|
||||
while (next && next->delta_ticks_from_prev == 0) {
|
||||
next = _handle_one_timeout(timeout_q);
|
||||
SYS_DLIST_FOR_EACH_NODE_SAFE(expired, timeout, next) {
|
||||
sys_dlist_remove(timeout);
|
||||
_handle_one_expired_timeout((struct _timeout *)timeout);
|
||||
}
|
||||
}
|
||||
|
||||
/* returns _INACTIVE if the timer has already expired */
|
||||
static inline int _abort_timeout(struct _timeout *t)
|
||||
/* returns _INACTIVE if the timer is not active */
|
||||
static inline int _abort_timeout(struct _timeout *timeout)
|
||||
{
|
||||
sys_dlist_t *timeout_q = &_timeout_q;
|
||||
|
||||
if (t->delta_ticks_from_prev == _INACTIVE) {
|
||||
if (timeout->delta_ticks_from_prev == _INACTIVE) {
|
||||
return _INACTIVE;
|
||||
}
|
||||
|
||||
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
|
||||
struct _timeout *next =
|
||||
(struct _timeout *)sys_dlist_peek_next(timeout_q,
|
||||
&t->node);
|
||||
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
|
||||
if (!sys_dlist_is_tail(&_timeout_q, &timeout->node)) {
|
||||
sys_dnode_t *next_node =
|
||||
sys_dlist_peek_next(&_timeout_q, &timeout->node);
|
||||
struct _timeout *next = (struct _timeout *)next_node;
|
||||
|
||||
next->delta_ticks_from_prev += timeout->delta_ticks_from_prev;
|
||||
}
|
||||
sys_dlist_remove(&t->node);
|
||||
t->delta_ticks_from_prev = _INACTIVE;
|
||||
sys_dlist_remove(&timeout->node);
|
||||
timeout->delta_ticks_from_prev = _INACTIVE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -185,6 +181,8 @@ static int _is_timeout_insert_point(sys_dnode_t *test, void *timeout)
|
|||
* Add timeout to timeout queue. Record waiting thread and wait queue if any.
|
||||
*
|
||||
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
*/
|
||||
|
||||
static inline void _add_timeout(struct k_thread *thread,
|
||||
|
@ -226,6 +224,8 @@ static inline void _add_timeout(struct k_thread *thread,
|
|||
* Put thread on timeout queue. Record wait queue if any.
|
||||
*
|
||||
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||
*
|
||||
* Must be called with interrupts locked.
|
||||
*/
|
||||
|
||||
static inline void _add_thread_timeout(struct k_thread *thread,
|
||||
|
|
|
@ -199,6 +199,9 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
|
|||
struct k_thread *dummy_thread =
|
||||
(struct k_thread *)&sem_thread->dummy;
|
||||
|
||||
if (_is_thread_timeout_expired(dummy_thread)) {
|
||||
continue;
|
||||
}
|
||||
_abort_thread_timeout(dummy_thread);
|
||||
_unpend_thread(dummy_thread);
|
||||
|
||||
|
|
|
@ -178,21 +178,69 @@ uint32_t k_uptime_delta_32(int64_t *reftime)
|
|||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
#include <wait_q.h>
|
||||
|
||||
static inline void handle_expired_timeouts(int32_t ticks)
|
||||
/*
|
||||
* Handle timeouts by dequeuing the expired ones from _timeout_q and queue
|
||||
* them on a local one, then doing the real handling from that queue. This
|
||||
* allows going through the second queue without needing to have the
|
||||
* interrupts locked since it is a local queue. Each expired timeout is marked
|
||||
* as _EXPIRED so that an ISR preempting us and releasing an object on which
|
||||
* a thread was timing out and expiredwill not give the object to that thread.
|
||||
*
|
||||
* Always called from interrupt level, and always only from the system clock
|
||||
* interrupt.
|
||||
*/
|
||||
static inline void handle_timeouts(int32_t ticks)
|
||||
{
|
||||
sys_dlist_t expired;
|
||||
unsigned int key;
|
||||
|
||||
/* init before locking interrupts */
|
||||
sys_dlist_init(&expired);
|
||||
|
||||
key = irq_lock();
|
||||
|
||||
struct _timeout *head =
|
||||
(struct _timeout *)sys_dlist_peek_head(&_timeout_q);
|
||||
|
||||
K_DEBUG("head: %p, delta: %d\n",
|
||||
head, head ? head->delta_ticks_from_prev : -2112);
|
||||
|
||||
if (head) {
|
||||
head->delta_ticks_from_prev -= ticks;
|
||||
_handle_timeouts();
|
||||
if (!head) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
||||
head->delta_ticks_from_prev -= ticks;
|
||||
|
||||
/*
|
||||
* Dequeue all expired timeouts from _timeout_q, relieving irq lock
|
||||
* pressure between each of them, allowing handling of higher priority
|
||||
* interrupts. We know that no new timeout will be prepended in front
|
||||
* of a timeout which delta is 0, since timeouts of 0 ticks are
|
||||
* prohibited.
|
||||
*/
|
||||
sys_dnode_t *next = &head->node;
|
||||
struct _timeout *timeout = (struct _timeout *)next;
|
||||
|
||||
while (timeout && timeout->delta_ticks_from_prev == 0) {
|
||||
|
||||
sys_dlist_remove(next);
|
||||
sys_dlist_append(&expired, next);
|
||||
timeout->delta_ticks_from_prev = _EXPIRED;
|
||||
|
||||
irq_unlock(key);
|
||||
key = irq_lock();
|
||||
|
||||
next = sys_dlist_peek_head(&_timeout_q);
|
||||
timeout = (struct _timeout *)next;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
_handle_expired_timeouts(&expired);
|
||||
}
|
||||
#else
|
||||
#define handle_expired_timeouts(ticks) do { } while ((0))
|
||||
#define handle_timeouts(ticks) do { } while ((0))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TIMESLICING
|
||||
|
@ -200,6 +248,16 @@ int32_t _time_slice_elapsed;
|
|||
int32_t _time_slice_duration = CONFIG_TIMESLICE_SIZE;
|
||||
int _time_slice_prio_ceiling = CONFIG_TIMESLICE_PRIORITY;
|
||||
|
||||
/*
|
||||
* Always called from interrupt level, and always only from the system clock
|
||||
* interrupt, thus:
|
||||
* - _current does not have to be protected, since it only changes at thread
|
||||
* level or when exiting a non-nested interrupt
|
||||
* - _time_slice_elapsed does not have to be protected, since it can only change
|
||||
* in this function and at thread level
|
||||
* - _time_slice_duration does not have to be protected, since it can only
|
||||
* change at thread level
|
||||
*/
|
||||
static void handle_time_slicing(int32_t ticks)
|
||||
{
|
||||
if (_time_slice_duration == 0) {
|
||||
|
@ -212,8 +270,14 @@ static void handle_time_slicing(int32_t ticks)
|
|||
|
||||
_time_slice_elapsed += _ticks_to_ms(ticks);
|
||||
if (_time_slice_elapsed >= _time_slice_duration) {
|
||||
|
||||
unsigned int key;
|
||||
|
||||
_time_slice_elapsed = 0;
|
||||
|
||||
key = irq_lock();
|
||||
_move_thread_to_end_of_prio_q(_current);
|
||||
irq_unlock(key);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -235,11 +299,13 @@ void _nano_sys_clock_tick_announce(int32_t ticks)
|
|||
|
||||
K_DEBUG("ticks: %d\n", ticks);
|
||||
|
||||
/* 64-bit value, ensure atomic access with irq lock */
|
||||
key = irq_lock();
|
||||
_sys_clock_tick_count += ticks;
|
||||
handle_expired_timeouts(ticks);
|
||||
|
||||
handle_time_slicing(ticks);
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
handle_timeouts(ticks);
|
||||
|
||||
/* time slicing is basically handled like just yet another timeout */
|
||||
handle_time_slicing(ticks);
|
||||
}
|
||||
|
|
|
@ -54,17 +54,19 @@ SYS_INIT(init_timer_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|||
*/
|
||||
void _timer_expiration_handler(struct _timeout *t)
|
||||
{
|
||||
int key = irq_lock();
|
||||
struct k_timer *timer = CONTAINER_OF(t, struct k_timer, timeout);
|
||||
struct k_thread *pending_thread;
|
||||
struct k_thread *thread;
|
||||
unsigned int key;
|
||||
|
||||
/*
|
||||
* if the timer is periodic, start it again; don't add _TICK_ALIGN
|
||||
* since we're already aligned to a tick boundary
|
||||
*/
|
||||
if (timer->period > 0) {
|
||||
key = irq_lock();
|
||||
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
|
||||
timer->period);
|
||||
irq_unlock(key);
|
||||
}
|
||||
|
||||
/* update timer's status */
|
||||
|
@ -74,17 +76,27 @@ void _timer_expiration_handler(struct _timeout *t)
|
|||
if (timer->expiry_fn) {
|
||||
timer->expiry_fn(timer);
|
||||
}
|
||||
/*
|
||||
* wake up the (only) thread waiting on the timer, if there is one;
|
||||
* don't invoke _Swap() since the timeout ISR called us, not a thread
|
||||
*/
|
||||
pending_thread = _unpend_first_thread(&timer->wait_q);
|
||||
if (pending_thread) {
|
||||
_ready_thread(pending_thread);
|
||||
_set_thread_return_value(pending_thread, 0);
|
||||
|
||||
thread = (struct k_thread *)sys_dlist_peek_head(&timer->wait_q);
|
||||
|
||||
if (!thread) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts _DO NOT_ have to be locked in this specific instance of
|
||||
* calling _unpend_thread() because a) this is the only place a thread
|
||||
* can be taken off this pend queue, and b) the only place a thread
|
||||
* can be put on the pend queue is at thread level, which of course
|
||||
* cannot interrupt the current context.
|
||||
*/
|
||||
_unpend_thread(thread);
|
||||
|
||||
key = irq_lock();
|
||||
_ready_thread(thread);
|
||||
irq_unlock(key);
|
||||
|
||||
_set_thread_return_value(thread, 0);
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue