kernel: Refactor, unifying _pend_current_thread() + _Swap() idiom
Everywhere the current thread is pended, the code is going to have to do a _Swap() soon afterward, yet the scheduler API exposed these as separate steps. Unify this pattern everywhere it appears, which saves some code bytes and gets _Swap() out of the general scheduler API at zero cost. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
8606fabf74
commit
e0a572beeb
15 changed files with 28 additions and 45 deletions
|
@ -26,7 +26,7 @@ extern int _reschedule_yield(int key);
|
||||||
extern void k_sched_unlock(void);
|
extern void k_sched_unlock(void);
|
||||||
extern void _pend_thread(struct k_thread *thread,
|
extern void _pend_thread(struct k_thread *thread,
|
||||||
_wait_q_t *wait_q, s32_t timeout);
|
_wait_q_t *wait_q, s32_t timeout);
|
||||||
extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout);
|
extern int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout);
|
||||||
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
extern void _move_thread_to_end_of_prio_q(struct k_thread *thread);
|
||||||
extern int _is_thread_time_slicing(struct k_thread *thread);
|
extern int _is_thread_time_slicing(struct k_thread *thread);
|
||||||
extern void _update_time_slice_before_swap(void);
|
extern void _update_time_slice_before_swap(void);
|
||||||
|
|
|
@ -285,8 +285,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
* synchronous send: pend current thread (unqueued)
|
* synchronous send: pend current thread (unqueued)
|
||||||
* until the receiver consumes the message
|
* until the receiver consumes the message
|
||||||
*/
|
*/
|
||||||
_pend_current_thread(NULL, K_FOREVER);
|
return _pend_current_thread(key, NULL, K_FOREVER);
|
||||||
return _Swap(key);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,8 +306,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* synchronous send: sender waits on tx queue for receiver or timeout */
|
/* synchronous send: sender waits on tx queue for receiver or timeout */
|
||||||
_pend_current_thread(&mbox->tx_msg_queue, timeout);
|
return _pend_current_thread(key, &mbox->tx_msg_queue, timeout);
|
||||||
return _Swap(key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
|
int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout)
|
||||||
|
@ -461,9 +460,8 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wait until a matching sender appears or a timeout occurs */
|
/* wait until a matching sender appears or a timeout occurs */
|
||||||
_pend_current_thread(&mbox->rx_msg_queue, timeout);
|
|
||||||
_current->base.swap_data = rx_msg;
|
_current->base.swap_data = rx_msg;
|
||||||
result = _Swap(key);
|
result = _pend_current_thread(key, &mbox->rx_msg_queue, timeout);
|
||||||
|
|
||||||
/* consume message data immediately, if needed */
|
/* consume message data immediately, if needed */
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
|
|
|
@ -101,8 +101,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout)
|
||||||
result = -ENOMEM;
|
result = -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
/* wait for a free block or timeout */
|
/* wait for a free block or timeout */
|
||||||
_pend_current_thread(&slab->wait_q, timeout);
|
result = _pend_current_thread(key, &slab->wait_q, timeout);
|
||||||
result = _Swap(key);
|
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
*mem = _current->base.swap_data;
|
*mem = _current->base.swap_data;
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||||
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||||
size_t size, s32_t timeout)
|
size_t size, s32_t timeout)
|
||||||
{
|
{
|
||||||
int ret, key;
|
int ret;
|
||||||
s64_t end = 0;
|
s64_t end = 0;
|
||||||
|
|
||||||
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
|
__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");
|
||||||
|
@ -74,9 +74,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
key = irq_lock();
|
_pend_current_thread(irq_lock(), &p->wait_q, timeout);
|
||||||
_pend_current_thread(&p->wait_q, timeout);
|
|
||||||
_Swap(key);
|
|
||||||
|
|
||||||
if (timeout != K_FOREVER) {
|
if (timeout != K_FOREVER) {
|
||||||
timeout = end - _tick_get();
|
timeout = end - _tick_get();
|
||||||
|
|
|
@ -112,9 +112,8 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
result = -ENOMSG;
|
result = -ENOMSG;
|
||||||
} else {
|
} else {
|
||||||
/* wait for put message success, failure, or timeout */
|
/* wait for put message success, failure, or timeout */
|
||||||
_pend_current_thread(&q->wait_q, timeout);
|
|
||||||
_current->base.swap_data = data;
|
_current->base.swap_data = data;
|
||||||
return _reschedule_yield(key);
|
return _pend_current_thread(key, &q->wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
@ -195,9 +194,8 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
|
||||||
result = -ENOMSG;
|
result = -ENOMSG;
|
||||||
} else {
|
} else {
|
||||||
/* wait for get message success or timeout */
|
/* wait for get message success or timeout */
|
||||||
_pend_current_thread(&q->wait_q, timeout);
|
|
||||||
_current->base.swap_data = data;
|
_current->base.swap_data = data;
|
||||||
return _Swap(key);
|
return _pend_current_thread(key, &q->wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
|
@ -158,9 +158,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
|
||||||
adjust_owner_prio(mutex, new_prio);
|
adjust_owner_prio(mutex, new_prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
_pend_current_thread(&mutex->wait_q, timeout);
|
int got_mutex = _pend_current_thread(key, &mutex->wait_q, timeout);
|
||||||
|
|
||||||
int got_mutex = _Swap(key);
|
|
||||||
|
|
||||||
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
|
K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex);
|
||||||
|
|
||||||
|
|
|
@ -529,8 +529,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc,
|
||||||
*/
|
*/
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
_sched_unlock_no_reschedule();
|
_sched_unlock_no_reschedule();
|
||||||
_pend_current_thread(&pipe->wait_q.writers, timeout);
|
_pend_current_thread(key, &pipe->wait_q.writers, timeout);
|
||||||
_Swap(key);
|
|
||||||
} else {
|
} else {
|
||||||
k_sched_unlock();
|
k_sched_unlock();
|
||||||
}
|
}
|
||||||
|
@ -672,8 +671,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read,
|
||||||
_current->base.swap_data = &pipe_desc;
|
_current->base.swap_data = &pipe_desc;
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
_sched_unlock_no_reschedule();
|
_sched_unlock_no_reschedule();
|
||||||
_pend_current_thread(&pipe->wait_q.readers, timeout);
|
_pend_current_thread(key, &pipe->wait_q.readers, timeout);
|
||||||
_Swap(key);
|
|
||||||
} else {
|
} else {
|
||||||
k_sched_unlock();
|
k_sched_unlock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,9 +246,7 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
|
||||||
|
|
||||||
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
|
_wait_q_t wait_q = _WAIT_Q_INIT(&wait_q);
|
||||||
|
|
||||||
_pend_current_thread(&wait_q, timeout);
|
int swap_rc = _pend_current_thread(key, &wait_q, timeout);
|
||||||
|
|
||||||
int swap_rc = _Swap(key);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear all event registrations. If events happen while we're in this
|
* Clear all event registrations. If events happen while we're in this
|
||||||
|
|
|
@ -224,8 +224,8 @@ void *k_queue_get(struct k_queue *queue, s32_t timeout)
|
||||||
return k_queue_poll(queue, timeout);
|
return k_queue_poll(queue, timeout);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
_pend_current_thread(&queue->wait_q, timeout);
|
int ret = _pend_current_thread(key, &queue->wait_q, timeout);
|
||||||
|
|
||||||
return _Swap(key) ? NULL : _current->base.swap_data;
|
return ret ? NULL : _current->base.swap_data;
|
||||||
#endif /* CONFIG_POLL */
|
#endif /* CONFIG_POLL */
|
||||||
}
|
}
|
||||||
|
|
|
@ -240,12 +240,15 @@ inserted:
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pend the current thread */
|
/* Block the current thread and swap to the next. Releases the
|
||||||
/* must be called with interrupts locked */
|
* irq_lock, does a _Swap and returns the return value set at wakeup
|
||||||
void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout)
|
* time
|
||||||
|
*/
|
||||||
|
int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout)
|
||||||
{
|
{
|
||||||
_remove_thread_from_ready_q(_current);
|
_remove_thread_from_ready_q(_current);
|
||||||
_pend_thread(_current, wait_q, timeout);
|
_pend_thread(_current, wait_q, timeout);
|
||||||
|
return _Swap(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
int _impl_k_thread_priority_get(k_tid_t thread)
|
int _impl_k_thread_priority_get(k_tid_t thread)
|
||||||
|
|
|
@ -160,9 +160,7 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
_pend_current_thread(&sem->wait_q, timeout);
|
return _pend_current_thread(key, &sem->wait_q, timeout);
|
||||||
|
|
||||||
return _Swap(key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_USERSPACE
|
#ifdef CONFIG_USERSPACE
|
||||||
|
|
|
@ -128,9 +128,8 @@ int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
_pend_current_thread(&stack->wait_q, timeout);
|
result = _pend_current_thread(key, &stack->wait_q, timeout);
|
||||||
|
|
||||||
result = _Swap(key);
|
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
*data = (u32_t)_current->base.swap_data;
|
*data = (u32_t)_current->base.swap_data;
|
||||||
}
|
}
|
||||||
|
|
|
@ -206,8 +206,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer)
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
|
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
|
||||||
/* wait for timer to expire or stop */
|
/* wait for timer to expire or stop */
|
||||||
_pend_current_thread(&timer->wait_q, K_FOREVER);
|
_pend_current_thread(key, &timer->wait_q, K_FOREVER);
|
||||||
_Swap(key);
|
|
||||||
|
|
||||||
/* get updated timer status */
|
/* get updated timer status */
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
|
|
|
@ -24,9 +24,8 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
||||||
while (!sys_dlist_is_empty(&b->wait_q)) {
|
while (!sys_dlist_is_empty(&b->wait_q)) {
|
||||||
ready_one_thread(&b->wait_q);
|
ready_one_thread(&b->wait_q);
|
||||||
}
|
}
|
||||||
|
return _reschedule_noyield(key);
|
||||||
} else {
|
} else {
|
||||||
_pend_current_thread(&b->wait_q, K_FOREVER);
|
return _pend_current_thread(key, &b->wait_q, K_FOREVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
return _reschedule_noyield(key);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,9 +20,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout)
|
||||||
|
|
||||||
mut->sem->count = 1;
|
mut->sem->count = 1;
|
||||||
ready_one_thread(&mut->sem->wait_q);
|
ready_one_thread(&mut->sem->wait_q);
|
||||||
_pend_current_thread(&cv->wait_q, timeout);
|
ret = _pend_current_thread(&cv->wait_q, timeout);
|
||||||
|
|
||||||
ret = _reschedule_yield(key);
|
|
||||||
|
|
||||||
/* FIXME: this extra lock (and the potential context switch it
|
/* FIXME: this extra lock (and the potential context switch it
|
||||||
* can cause) could be optimized out. At the point of the
|
* can cause) could be optimized out. At the point of the
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue