kernel/timeout: Prepare unification of timeout/thread wait_q fields

The existing timeout API wants to store a wait_q on which the thread
is waiting, but it only uses that value in one spot (and there only as
a boolean flag indicating "this thread is waiting on a wait_q).

As it happens threads can already store their own backpointers to a
wait_q (needed for the SCALABLE scheduler backend), so we should use
that instead.

This patch doesn't actually perform that unification yet.  It
reorgnizes things such that the pended_on field is always set at the
point of timeout interaction, and adds a bunch of asserts to make 100%
sure the logic is correct.  The next patch will modify the API.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-09-26 13:19:31 -07:00 committed by Anas Nashif
commit 15d520819d
3 changed files with 8 additions and 16 deletions

View file

@ -393,12 +393,10 @@ struct _thread_base {
struct rbnode qnode_rb;
};
#ifdef CONFIG_WAITQ_SCALABLE
/* wait queue on which the thread is pended (needed only for
* trees, not dumb lists)
*/
_wait_q_t *pended_on;
#endif
/* user facing 'thread options'; values defined in include/kernel.h */
u8_t user_options;

View file

@ -307,6 +307,11 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
_remove_thread_from_ready_q(thread);
_mark_thread_as_pending(thread);
if (wait_q != NULL) {
thread->base.pended_on = wait_q;
_priq_wait_add(&wait_q->waitq, thread);
}
/* The timeout handling is currently synchronized external to
* the scheduler using the legacy global lock. Should fix
* that.
@ -319,13 +324,6 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
irq_unlock(key);
}
if (wait_q != NULL) {
#ifdef CONFIG_WAITQ_SCALABLE
thread->base.pended_on = wait_q;
#endif
_priq_wait_add(&wait_q->waitq, thread);
}
sys_trace_thread_pend(thread);
}
@ -337,14 +335,9 @@ void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
static _wait_q_t *pended_on(struct k_thread *thread)
{
#ifdef CONFIG_WAITQ_SCALABLE
__ASSERT_NO_MSG(thread->base.pended_on);
return thread->base.pended_on;
#else
ARG_UNUSED(thread);
return NULL;
#endif
}
struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q,
@ -368,9 +361,7 @@ void _unpend_thread_no_timeout(struct k_thread *thread)
_mark_thread_as_not_pending(thread);
}
#if defined(CONFIG_ASSERT) && defined(CONFIG_WAITQ_SCALABLE)
thread->base.pended_on = NULL;
#endif
}
int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout)

View file

@ -366,6 +366,7 @@ void _init_thread_timeout(struct _thread_base *thread_base)
static inline void _unpend_thread_timing_out(struct k_thread *thread,
struct _timeout *timeout_obj)
{
__ASSERT(thread->base.pended_on == (void*)timeout_obj->wait_q, "");
if (timeout_obj->wait_q) {
_unpend_thread_no_timeout(thread);
thread->base.timeout.wait_q = NULL;
@ -505,6 +506,7 @@ void _add_timeout(struct k_thread *thread, struct _timeout *timeout,
_wait_q_t *wait_q, s32_t timeout_in_ticks)
{
__ASSERT(timeout_in_ticks >= 0, "");
__ASSERT(!thread || thread->base.pended_on == wait_q, "");
timeout->delta_ticks_from_prev = timeout_in_ticks;
timeout->thread = thread;
@ -578,6 +580,7 @@ void _add_thread_timeout(struct k_thread *thread,
_wait_q_t *wait_q,
s32_t timeout_in_ticks)
{
__ASSERT(thread && thread->base.pended_on == wait_q, "");
_add_timeout(thread, &thread->base.timeout, wait_q, timeout_in_ticks);
}