kernel: Refactor, unifying _pend_current_thread() + _Swap() idiom

Everywhere the current thread is pended, the code is going to have to
do a _Swap() soon afterward, yet the scheduler API exposed these as
separate steps.  Unify this pattern everywhere it appears, which saves
some code bytes and gets _Swap() out of the general scheduler API at
zero cost.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-03-26 11:58:10 -07:00 committed by Anas Nashif
commit e0a572beeb
15 changed files with 28 additions and 45 deletions

View file

@ -26,7 +26,7 @@ extern int _reschedule_yield(int key);
extern void k_sched_unlock(void);
extern void _pend_thread(struct k_thread *thread,
_wait_q_t *wait_q, s32_t timeout);
extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout);
extern int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout);
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
extern int _is_thread_time_slicing(struct k_thread *thread);
extern void _update_time_slice_before_swap(void);

View file

@ -285,8 +285,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
* synchronous send: pend current thread (unqueued)
* until the receiver consumes the message
*/
_pend_current_thread(NULL, K_FOREVER);
return _Swap(key);
return _pend_current_thread(key, NULL, K_FOREVER);
}
}
@ -306,8 +306,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
#endif
/* synchronous send: sender waits on tx queue for receiver or timeout */
_pend_current_thread(&mbox->tx_msg_queue, timeout);
return _Swap(key);
return _pend_current_thread(key, &mbox->tx_msg_queue, timeout);
}
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
@ -461,9 +460,8 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
}
/* wait until a matching sender appears or a timeout occurs */
_pend_current_thread(&mbox->rx_msg_queue, timeout);
_current->base.swap_data = rx_msg;
result = _Swap(key);
result = _pend_current_thread(key, &mbox->rx_msg_queue, timeout);
/* consume message data immediately, if needed */
if (result == 0) {

View file

@ -101,8 +101,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
result = -ENOMEM;
} else {
/* wait for a free block or timeout */
_pend_current_thread(&slab->wait_q, timeout);
result = _Swap(key);
result = _pend_current_thread(key, &slab->wait_q, timeout);
if (result == 0) {
*mem = _current->base.swap_data;
}

View file

@ -51,7 +51,7 @@ SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, s32_t timeout)
{
int ret, key;
int ret;
s64_t end = 0;
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
@ -74,9 +74,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
return ret;
}
key = irq_lock();
_pend_current_thread(&p->wait_q, timeout);
_Swap(key);
_pend_current_thread(irq_lock(), &p->wait_q, timeout);
if (timeout != K_FOREVER) {
timeout = end - _tick_get();

View file

@ -112,9 +112,8 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
result = -ENOMSG;
} else {
/* wait for put message success, failure, or timeout */
_pend_current_thread(&q->wait_q, timeout);
_current->base.swap_data = data;
return _reschedule_yield(key);
return _pend_current_thread(key, &q->wait_q, timeout);
}
irq_unlock(key);
@ -195,9 +194,8 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
result = -ENOMSG;
} else {
/* wait for get message success or timeout */
_pend_current_thread(&q->wait_q, timeout);
_current->base.swap_data = data;
return _Swap(key);
return _pend_current_thread(key, &q->wait_q, timeout);
}
irq_unlock(key);

View file

@ -158,9 +158,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
adjust_owner_prio(mutex, new_prio);
}
_pend_current_thread(&mutex->wait_q, timeout);
int got_mutex = _Swap(key);
int got_mutex = _pend_current_thread(key, &mutex->wait_q, timeout);
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);

View file

@ -529,8 +529,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
*/
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_current_thread(&pipe->wait_q.writers, timeout);
_Swap(key);
_pend_current_thread(key, &pipe->wait_q.writers, timeout);
} else {
k_sched_unlock();
}
@ -672,8 +671,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
_current->base.swap_data = &pipe_desc;
key = irq_lock();
_sched_unlock_no_reschedule();
_pend_current_thread(&pipe->wait_q.readers, timeout);
_Swap(key);
_pend_current_thread(key, &pipe->wait_q.readers, timeout);
} else {
k_sched_unlock();
}

View file

@ -246,9 +246,7 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
_pend_current_thread(&wait_q, timeout);
int swap_rc = _Swap(key);
int swap_rc = _pend_current_thread(key, &wait_q, timeout);
/*
* Clear all event registrations. If events happen while we're in this

View file

@ -224,8 +224,8 @@ void *k_queue_get(struct k_queue *queue, s32_t timeout)
return k_queue_poll(queue, timeout);
#else
_pend_current_thread(&queue->wait_q, timeout);
int ret = _pend_current_thread(key, &queue->wait_q, timeout);
return _Swap(key) ? NULL : _current->base.swap_data;
return ret ? NULL : _current->base.swap_data;
#endif /* CONFIG_POLL */
}

View file

@ -240,12 +240,15 @@ inserted:
#endif
}
/* pend the current thread */
/* must be called with interrupts locked */
void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout)
/* Block the current thread and swap to the next. Releases the
* irq_lock, does a _Swap and returns the return value set at wakeup
* time
*/
int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout)
{
_remove_thread_from_ready_q(_current);
_pend_thread(_current, wait_q, timeout);
return _Swap(key);
}
int _impl_k_thread_priority_get(k_tid_t thread)

View file

@ -160,9 +160,7 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
return -EBUSY;
}
_pend_current_thread(&sem->wait_q, timeout);
return _Swap(key);
return _pend_current_thread(key, &sem->wait_q, timeout);
}
#ifdef CONFIG_USERSPACE

View file

@ -128,9 +128,8 @@ int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
return -EBUSY;
}
_pend_current_thread(&stack->wait_q, timeout);
result = _pend_current_thread(key, &stack->wait_q, timeout);
result = _Swap(key);
if (result == 0) {
*data = (u32_t)_current->base.swap_data;
}

View file

@ -206,8 +206,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer)
if (result == 0) {
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
/* wait for timer to expire or stop */
_pend_current_thread(&timer->wait_q, K_FOREVER);
_Swap(key);
_pend_current_thread(key, &timer->wait_q, K_FOREVER);
/* get updated timer status */
key = irq_lock();

View file

@ -24,9 +24,8 @@ int pthread_barrier_wait(pthread_barrier_t *b)
while (!sys_dlist_is_empty(&b->wait_q)) {
ready_one_thread(&b->wait_q);
}
} else {
_pend_current_thread(&b->wait_q, K_FOREVER);
}
return _reschedule_noyield(key);
} else {
return _pend_current_thread(key, &b->wait_q, K_FOREVER);
}
}

View file

@ -20,9 +20,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
mut->sem->count = 1;
ready_one_thread(&mut->sem->wait_q);
_pend_current_thread(&cv->wait_q, timeout);
ret = _reschedule_yield(key);
ret = _pend_current_thread(&cv->wait_q, timeout);
/* FIXME: this extra lock (and the potential context switch it
* can cause) could be optimized out. At the point of the