kernel: Clean up _unpend_thread() API

Almost everywhere this was called, it was immediately followed by
_abort_thread_timeout(), for obvious reasons.  The only exceptions
were in timeout and k_timer expiration (unifying these two would be
another good cleanup), which are peripheral parts of the scheduler and
can plausibly use a more "internal" API.

So make the common case the default, and expose the old behavior as
_unpend_thread_no_timeout().  (Along with identical changes for
_unpend_first_thread) Saves code bytes and simplifies scheduler
surface area for future synchronization work.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-04-02 18:24:58 -07:00 committed by Anas Nashif
commit 22642cf309
16 changed files with 45 additions and 33 deletions

View file

@ -489,27 +489,27 @@ _find_first_thread_to_unpend(_wait_q_t *wait_q, struct k_thread *from)
/* Unpend a thread from the wait queue it is on. Thread must be pending. */
/* must be called with interrupts locked */
static inline void _unpend_thread(struct k_thread *thread)
{
__ASSERT(thread->base.thread_state & _THREAD_PENDING, "");
void _unpend_thread(struct k_thread *thread);
sys_dlist_remove(&thread->base.k_q_node);
_mark_thread_as_not_pending(thread);
}
/* Same, but does not abort current timeout */
void _unpend_thread_no_timeout(struct k_thread *thread);
/* unpend the first thread from a wait queue */
/* must be called with interrupts locked */
static inline struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q);
static inline struct k_thread *_unpend1_no_timeout(_wait_q_t *wait_q)
{
struct k_thread *thread = _find_first_thread_to_unpend(wait_q, NULL);
if (thread) {
_unpend_thread(thread);
_unpend_thread_no_timeout(thread);
}
return thread;
}
#ifdef CONFIG_USERSPACE
/**
* Indicate whether the currently running thread has been configured to be

View file

@ -69,7 +69,7 @@ static inline void _unpend_thread_timing_out(struct k_thread *thread,
struct _timeout *timeout_obj)
{
if (timeout_obj->wait_q) {
_unpend_thread(thread);
_unpend_thread_no_timeout(thread);
thread->base.timeout.wait_q = NULL;
}
}

View file

@ -260,7 +260,6 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take receiver out of rx queue */
_unpend_thread(receiving_thread);
_abort_thread_timeout(receiving_thread);
/* ready receiver for execution */
_set_thread_return_value(receiving_thread, 0);
@ -441,7 +440,6 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
if (mbox_message_match(tx_msg, rx_msg) == 0) {
/* take sender out of mailbox's tx queue */
_unpend_thread(sending_thread);
_abort_thread_timeout(sending_thread);
irq_unlock(key);

View file

@ -119,7 +119,6 @@ void k_mem_slab_free(struct k_mem_slab *slab, void **mem)
if (pending_thread) {
_set_thread_return_value_with_data(pending_thread, 0, *mem);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
} else {
**(char ***)mem = slab->free_list;

View file

@ -103,7 +103,6 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
struct k_thread *th = (void *)sys_dlist_peek_head(&p->wait_q);
_unpend_thread(th);
_abort_thread_timeout(th);
_ready_thread(th);
need_sched = 1;
}

View file

@ -92,7 +92,6 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
q->msg_size);
/* wake up waiting thread */
_set_thread_return_value(pending_thread, 0);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
_reschedule(key);
return 0;
@ -182,7 +181,6 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
/* wake up waiting thread */
_set_thread_return_value(pending_thread, 0);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
_reschedule(key);
return 0;
@ -222,7 +220,6 @@ void _impl_k_msgq_purge(struct k_msgq *q)
/* wake up any threads that are waiting to write */
while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) {
_set_thread_return_value(pending_thread, -ENOMSG);
_abort_thread_timeout(pending_thread);
_ready_thread(pending_thread);
}

View file

@ -231,7 +231,6 @@ void _impl_k_mutex_unlock(struct k_mutex *mutex)
mutex, new_owner, new_owner ? new_owner->base.prio : -1000);
if (new_owner) {
_abort_thread_timeout(new_owner);
_ready_thread(new_owner);
irq_unlock(key);

View file

@ -335,7 +335,6 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list,
* Add it to the transfer list.
*/
_unpend_thread(thread);
_abort_thread_timeout(thread);
sys_dlist_append(xfer_list, &thread->base.k_q_node);
}

View file

@ -285,7 +285,6 @@ static int signal_poll_event(struct k_poll_event *event, u32_t state)
}
_unpend_thread(thread);
_abort_thread_timeout(thread);
_set_thread_return_value(thread,
state == K_POLL_STATE_NOT_READY ? -EINTR : 0);

View file

@ -61,7 +61,6 @@ void k_queue_init(struct k_queue *queue)
#if !defined(CONFIG_POLL)
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
_abort_thread_timeout(thread);
_ready_thread(thread);
_set_thread_return_value_with_data(thread, 0, data);
}

View file

@ -230,6 +230,31 @@ inserted:
#endif
}
void _unpend_thread_no_timeout(struct k_thread *thread)
{
__ASSERT(thread->base.thread_state & _THREAD_PENDING, "");
sys_dlist_remove(&thread->base.k_q_node);
_mark_thread_as_not_pending(thread);
}
void _unpend_thread(struct k_thread *thread)
{
_unpend_thread_no_timeout(thread);
_abort_thread_timeout(thread);
}
struct k_thread *_unpend_first_thread(_wait_q_t *wait_q)
{
struct k_thread *t = _unpend1_no_timeout(wait_q);
if (t) {
_abort_thread_timeout(t);
}
return t;
}
/* Block the current thread and swap to the next. Releases the
* irq_lock, does a _Swap and returns the return value set at wakeup
* time

View file

@ -98,7 +98,6 @@ static void do_sem_give(struct k_sem *sem)
struct k_thread *thread = _unpend_first_thread(&sem->wait_q);
if (thread) {
(void)_abort_thread_timeout(thread);
_ready_thread(thread);
_set_thread_return_value(thread, 0);
} else {
@ -121,7 +120,7 @@ void _sem_give_non_preemptible(struct k_sem *sem)
{
struct k_thread *thread;
thread = _unpend_first_thread(&sem->wait_q);
thread = _unpend1_no_timeout(&sem->wait_q);
if (!thread) {
increment_count_up_to_limit(sem);
return;

View file

@ -80,12 +80,11 @@ void _impl_k_stack_push(struct k_stack *stack, u32_t data)
first_pending_thread = _unpend_first_thread(&stack->wait_q);
if (first_pending_thread) {
_abort_thread_timeout(first_pending_thread);
_ready_thread(first_pending_thread);
_set_thread_return_value_with_data(first_pending_thread,
0, (void *)data);
_reschedule_noyield(key);
_reschedule(key);
return;
} else {
*(stack->next) = data;

View file

@ -501,7 +501,7 @@ void _k_thread_single_abort(struct k_thread *thread)
_remove_thread_from_ready_q(thread);
} else {
if (_is_thread_pending(thread)) {
_unpend_thread(thread);
_unpend_thread_no_timeout(thread);
}
if (_is_thread_timeout_active(thread)) {
_abort_thread_timeout(thread);

View file

@ -75,13 +75,14 @@ void _timer_expiration_handler(struct _timeout *t)
}
/*
* Interrupts _DO NOT_ have to be locked in this specific instance of
* calling _unpend_thread() because a) this is the only place a thread
* can be taken off this pend queue, and b) the only place a thread
* can be put on the pend queue is at thread level, which of course
* cannot interrupt the current context.
* Interrupts _DO NOT_ have to be locked in this specific
* instance of thread unpending because a) this is the only
* place a thread can be taken off this pend queue, and b) the
* only place a thread can be put on the pend queue is at
* thread level, which of course cannot interrupt the current
* context.
*/
_unpend_thread(thread);
_unpend_thread_no_timeout(thread);
key = irq_lock();
_ready_thread(thread);
@ -163,7 +164,7 @@ void _impl_k_timer_stop(struct k_timer *timer)
}
key = irq_lock();
struct k_thread *pending_thread = _unpend_first_thread(&timer->wait_q);
struct k_thread *pending_thread = _unpend1_no_timeout(&timer->wait_q);
if (pending_thread) {
_ready_thread(pending_thread);

View file

@ -15,7 +15,6 @@ void ready_one_thread(_wait_q_t *wq)
struct k_thread *th = _unpend_first_thread(wq);
if (th) {
_abort_thread_timeout(th);
_ready_thread(th);
}
}