kernel: remove K_TIMING thread flag

The fact that a thread is timing out was tracked via two flags: the
K_TIMING thread flag bit, and the thread's timeout's
delta_ticks_from_prev being -1 or not. This duplication could
potentially cause discrepancies if the two flags got out-of-sync, and
there was no benfits to having both.

Since timeouts that are not parts of a thread rely on the value of
delta_ticks_from_prev, standardize on it.

Since the K_TIMING bit is removed from the thread's flags, K_READY would
not reflect the reality anymore. It is removed and replaced by
_is_thread_prevented_froM_running(), which looks at the state flags that
are relevant. A thread that is ready now is not prevented from running
and does not have an active timeout.

Change-Id: I902ef9fb7801b00626df491f5108971817750daa
Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
Benjamin Walsh 2016-11-23 22:15:44 -05:00 committed by Anas Nashif
commit a36e0cf651
8 changed files with 63 additions and 64 deletions

View file

@ -33,11 +33,6 @@
/* thread is defined statically */
#define K_STATIC (1 << 8)
#define K_READY 0
/* Thread is waiting on a timeout */
#define K_TIMING (1 << 12)
/* Thread is waiting on an object */
#define K_PENDING (1 << 13)
@ -53,9 +48,6 @@
/* Not a real thread */
#define K_DUMMY (1 << 17)
#define K_EXECUTION_MASK \
(K_TIMING | K_PENDING | K_PRESTART | K_DEAD | K_SUSPENDED | K_DUMMY)
#if defined(CONFIG_FP_SHARING)
/* thread uses floating point registers */
#define K_FP_REGS (1 << 4)

View file

@ -258,22 +258,14 @@ static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
thread->base.flags &= ~K_SUSPENDED;
}
/* mark a thread as being in the timer queue */
static inline void _mark_thread_as_timing(struct k_thread *thread)
/* check if a thread is on the timeout queue */
static inline int _is_thread_timeout_active(struct k_thread *thread)
{
thread->base.flags |= K_TIMING;
}
/* mark a thread as not being in the timer queue */
static inline void _mark_thread_as_not_timing(struct k_thread *thread)
{
thread->base.flags &= ~K_TIMING;
}
/* check if a thread is on the timer queue */
static inline int _is_thread_timing(struct k_thread *thread)
{
return !!(thread->base.flags & K_TIMING);
#ifdef CONFIG_SYS_CLOCK_EXISTS
return thread->base.timeout.delta_ticks_from_prev != -1;
#else
return 0;
#endif
}
static inline int _has_thread_started(struct k_thread *thread)
@ -281,10 +273,19 @@ static inline int _has_thread_started(struct k_thread *thread)
return !(thread->base.flags & K_PRESTART);
}
static inline int _is_thread_prevented_from_running(struct k_thread *thread)
{
return thread->base.flags & (K_PENDING | K_PRESTART |
K_DEAD | K_DUMMY |
K_SUSPENDED);
}
/* check if a thread is ready */
static inline int _is_thread_ready(struct k_thread *thread)
{
return (thread->base.flags & K_EXECUTION_MASK) == K_READY;
return !(_is_thread_prevented_from_running(thread) ||
_is_thread_timeout_active(thread));
}
/* mark a thread as pending in its TCS */
@ -305,11 +306,22 @@ static inline int _is_thread_pending(struct k_thread *thread)
return !!(thread->base.flags & K_PENDING);
}
/*
* Mark the thread as not being in the timer queue. If this makes it ready,
* then add it to the ready queue according to its priority.
/**
* @brief Mark a thread as started
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_started(struct k_thread *thread)
{
thread->base.flags &= ~K_PRESTART;
}
/*
* Put the thread in the ready queue according to its priority if it is not
* blocked for another reason (eg. suspended).
*
* Must be called with interrupts locked.
*/
/* must be called with interrupts locked */
static inline void _ready_thread(struct k_thread *thread)
{
__ASSERT(_is_prio_higher(thread->base.prio, K_LOWEST_THREAD_PRIO) ||
@ -324,24 +336,14 @@ static inline void _ready_thread(struct k_thread *thread)
"thread %p prio too high (id %d, cannot be higher than %d)",
thread, thread->base.prio, K_HIGHEST_THREAD_PRIO);
/* K_PRESTART is needed to handle the start-with-delay case */
_reset_thread_states(thread, K_TIMING|K_PRESTART);
/* needed to handle the start-with-delay case */
_mark_thread_as_started(thread);
if (_is_thread_ready(thread)) {
_add_thread_to_ready_q(thread);
}
}
/**
* @brief Mark a thread as started
*
* This routine must be called with interrupts locked.
*/
static inline void _mark_thread_as_started(struct k_thread *thread)
{
thread->base.flags &= ~K_PRESTART;
}
/**
* @brief Mark thread as dead
*

View file

@ -98,6 +98,8 @@ static inline struct _timeout *_handle_one_timeout(
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
struct k_thread *thread = t->thread;
t->delta_ticks_from_prev = -1;
K_DEBUG("timeout %p\n", t);
if (thread != NULL) {
_unpend_thread_timing_out(thread, t);
@ -105,14 +107,6 @@ static inline struct _timeout *_handle_one_timeout(
} else if (t->func) {
t->func(t);
}
/*
* Note: t->func() may add timeout again. Make sure that
* delta_ticks_from_prev is set to -1 only if timeout is
* still expired (delta_ticks_from_prev == 0)
*/
if (t->delta_ticks_from_prev == 0) {
t->delta_ticks_from_prev = -1;
}
return (struct _timeout *)sys_dlist_peek_head(timeout_q);
}

View file

@ -34,8 +34,21 @@ static ALWAYS_INLINE void _init_thread_timeout(struct _thread_base *thread_base)
{
ARG_UNUSED(thread_base);
}
#define _add_thread_timeout(thread, wait_q, timeout) do { } while (0)
static inline int _abort_thread_timeout(struct k_thread *thread) { return 0; }
static ALWAYS_INLINE void
_add_thread_timeout(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
{
ARG_UNUSED(thread);
ARG_UNUSED(wait_q);
ARG_UNUSED(timeout);
}
static ALWAYS_INLINE int _abort_thread_timeout(struct k_thread *thread)
{
ARG_UNUSED(thread);
return 0;
}
#define _get_next_timeout_expiry() (K_FOREVER)
#endif

View file

@ -33,7 +33,6 @@ void _legacy_sleep(int32_t ticks)
int key = irq_lock();
_mark_thread_as_timing(_current);
_remove_thread_from_ready_q(_current);
_add_thread_timeout(_current, NULL, ticks);

View file

@ -168,9 +168,9 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, int32_t timeout)
_mark_thread_as_pending(thread);
if (timeout != K_FOREVER) {
_mark_thread_as_timing(thread);
_add_thread_timeout(thread, wait_q,
_TICK_ALIGN + _ms_to_ticks(timeout));
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(timeout);
_add_thread_timeout(thread, wait_q, ticks);
}
}
@ -306,12 +306,11 @@ void k_sleep(int32_t duration)
return;
}
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(duration);
int key = irq_lock();
_mark_thread_as_timing(_current);
_remove_thread_from_ready_q(_current);
_add_thread_timeout(_current, NULL,
_TICK_ALIGN + _ms_to_ticks(duration));
_add_thread_timeout(_current, NULL, ticks);
_Swap(key);
}

View file

@ -222,8 +222,8 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
*/
if (!_is_thread_ready(desc->thread)) {
_reset_thread_states(desc->thread, K_PENDING | K_TIMING);
_abort_thread_timeout(desc->thread);
_mark_thread_as_not_pending(desc->thread);
if (_is_thread_ready(desc->thread)) {
_add_thread_to_ready_q(desc->thread);
}

View file

@ -220,9 +220,9 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay)
if (delay == 0) {
start_thread(thread);
} else {
_mark_thread_as_timing(thread);
_add_thread_timeout(thread, NULL,
_TICK_ALIGN + _ms_to_ticks(delay));
int32_t ticks = _TICK_ALIGN + _ms_to_ticks(delay);
_add_thread_timeout(thread, NULL, ticks);
}
#else
ARG_UNUSED(delay);
@ -252,7 +252,8 @@ int k_thread_cancel(k_tid_t tid)
int key = irq_lock();
if (_has_thread_started(thread) || !_is_thread_timing(thread)) {
if (_has_thread_started(thread) ||
!_is_thread_timeout_active(thread)) {
irq_unlock(key);
return -EINVAL;
}
@ -365,9 +366,8 @@ void _k_thread_single_abort(struct k_thread *thread)
if (_is_thread_pending(thread)) {
_unpend_thread(thread);
}
if (_is_thread_timing(thread)) {
if (_is_thread_timeout_active(thread)) {
_abort_thread_timeout(thread);
_mark_thread_as_not_timing(thread);
}
}
_mark_thread_as_dead(thread);