diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index caae85444ae..4586ff95238 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -26,7 +26,7 @@ extern int _reschedule_yield(int key); extern void k_sched_unlock(void); extern void _pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout); -extern void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout); +extern int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout); extern void _move_thread_to_end_of_prio_q(struct k_thread *thread); extern int _is_thread_time_slicing(struct k_thread *thread); extern void _update_time_slice_before_swap(void); diff --git a/kernel/mailbox.c b/kernel/mailbox.c index 42bd8a029bc..fd02c734846 100644 --- a/kernel/mailbox.c +++ b/kernel/mailbox.c @@ -285,8 +285,8 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, * synchronous send: pend current thread (unqueued) * until the receiver consumes the message */ - _pend_current_thread(NULL, K_FOREVER); - return _Swap(key); + return _pend_current_thread(key, NULL, K_FOREVER); + } } @@ -306,8 +306,7 @@ static int mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, #endif /* synchronous send: sender waits on tx queue for receiver or timeout */ - _pend_current_thread(&mbox->tx_msg_queue, timeout); - return _Swap(key); + return _pend_current_thread(key, &mbox->tx_msg_queue, timeout); } int k_mbox_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, s32_t timeout) @@ -461,9 +460,8 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, } /* wait until a matching sender appears or a timeout occurs */ - _pend_current_thread(&mbox->rx_msg_queue, timeout); _current->base.swap_data = rx_msg; - result = _Swap(key); + result = _pend_current_thread(key, &mbox->rx_msg_queue, timeout); /* consume message data immediately, if needed */ if (result == 0) { diff --git a/kernel/mem_slab.c b/kernel/mem_slab.c index 2dd8b5344d5..6c0c0476125 100644 --- a/kernel/mem_slab.c +++ b/kernel/mem_slab.c @@ -101,8 +101,7 @@ int k_mem_slab_alloc(struct k_mem_slab *slab, void **mem, s32_t timeout) result = -ENOMEM; } else { /* wait for a free block or timeout */ - _pend_current_thread(&slab->wait_q, timeout); - result = _Swap(key); + result = _pend_current_thread(key, &slab->wait_q, timeout); if (result == 0) { *mem = _current->base.swap_data; } diff --git a/kernel/mempool.c b/kernel/mempool.c index 83b2437cb4f..ae7dca98e09 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -51,7 +51,7 @@ SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, size_t size, s32_t timeout) { - int ret, key; + int ret; s64_t end = 0; __ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), ""); @@ -74,9 +74,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, return ret; } - key = irq_lock(); - _pend_current_thread(&p->wait_q, timeout); - _Swap(key); + _pend_current_thread(irq_lock(), &p->wait_q, timeout); if (timeout != K_FOREVER) { timeout = end - _tick_get(); diff --git a/kernel/msg_q.c b/kernel/msg_q.c index cd9810e5939..4b9ffe58ec5 100644 --- a/kernel/msg_q.c +++ b/kernel/msg_q.c @@ -112,9 +112,8 @@ int _impl_k_msgq_put(struct k_msgq *q, void *data, s32_t timeout) result = -ENOMSG; } else { /* wait for put message success, failure, or timeout */ - _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; - return _reschedule_yield(key); + return _pend_current_thread(key, &q->wait_q, timeout); } irq_unlock(key); @@ -195,9 +194,8 @@ int _impl_k_msgq_get(struct k_msgq *q, void *data, s32_t timeout) result = -ENOMSG; } else { /* wait for get message success or timeout */ - _pend_current_thread(&q->wait_q, timeout); _current->base.swap_data = data; - return _Swap(key); + return _pend_current_thread(key, &q->wait_q, timeout); } irq_unlock(key); diff --git a/kernel/mutex.c b/kernel/mutex.c index 1602217d6b5..cddfad21597 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -158,9 +158,7 @@ int _impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) adjust_owner_prio(mutex, new_prio); } - _pend_current_thread(&mutex->wait_q, timeout); - - int got_mutex = _Swap(key); + int got_mutex = _pend_current_thread(key, &mutex->wait_q, timeout); K_DEBUG("on mutex %p got_mutex value: %d\n", mutex, got_mutex); diff --git a/kernel/pipes.c b/kernel/pipes.c index d7204030140..aeddc6045ad 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -529,8 +529,7 @@ int _k_pipe_put_internal(struct k_pipe *pipe, struct k_pipe_async *async_desc, */ key = irq_lock(); _sched_unlock_no_reschedule(); - _pend_current_thread(&pipe->wait_q.writers, timeout); - _Swap(key); + _pend_current_thread(key, &pipe->wait_q.writers, timeout); } else { k_sched_unlock(); } @@ -672,8 +671,7 @@ int _impl_k_pipe_get(struct k_pipe *pipe, void *data, size_t bytes_to_read, _current->base.swap_data = &pipe_desc; key = irq_lock(); _sched_unlock_no_reschedule(); - _pend_current_thread(&pipe->wait_q.readers, timeout); - _Swap(key); + _pend_current_thread(key, &pipe->wait_q.readers, timeout); } else { k_sched_unlock(); } diff --git a/kernel/poll.c b/kernel/poll.c index 7959476877d..858ec99ed3d 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -246,9 +246,7 @@ int k_poll(struct k_poll_event *events, int num_events, s32_t timeout) _wait_q_t wait_q = _WAIT_Q_INIT(&wait_q); - _pend_current_thread(&wait_q, timeout); - - int swap_rc = _Swap(key); + int swap_rc = _pend_current_thread(key, &wait_q, timeout); /* * Clear all event registrations. If events happen while we're in this diff --git a/kernel/queue.c b/kernel/queue.c index e960a83c8e8..36ff8908a53 100644 --- a/kernel/queue.c +++ b/kernel/queue.c @@ -224,8 +224,8 @@ void *k_queue_get(struct k_queue *queue, s32_t timeout) return k_queue_poll(queue, timeout); #else - _pend_current_thread(&queue->wait_q, timeout); + int ret = _pend_current_thread(key, &queue->wait_q, timeout); - return _Swap(key) ? NULL : _current->base.swap_data; + return ret ? NULL : _current->base.swap_data; #endif /* CONFIG_POLL */ } diff --git a/kernel/sched.c b/kernel/sched.c index 1564e6afd2b..65f1376a8e5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -240,12 +240,15 @@ inserted: #endif } -/* pend the current thread */ -/* must be called with interrupts locked */ -void _pend_current_thread(_wait_q_t *wait_q, s32_t timeout) +/* Block the current thread and swap to the next. Releases the + * irq_lock, does a _Swap and returns the return value set at wakeup + * time + */ +int _pend_current_thread(int key, _wait_q_t *wait_q, s32_t timeout) { _remove_thread_from_ready_q(_current); _pend_thread(_current, wait_q, timeout); + return _Swap(key); } int _impl_k_thread_priority_get(k_tid_t thread) diff --git a/kernel/sem.c b/kernel/sem.c index cddd348c9e7..fccb8f1898e 100644 --- a/kernel/sem.c +++ b/kernel/sem.c @@ -160,9 +160,7 @@ int _impl_k_sem_take(struct k_sem *sem, s32_t timeout) return -EBUSY; } - _pend_current_thread(&sem->wait_q, timeout); - - return _Swap(key); + return _pend_current_thread(key, &sem->wait_q, timeout); } #ifdef CONFIG_USERSPACE diff --git a/kernel/stack.c b/kernel/stack.c index 2288a8f91a4..2713e539322 100644 --- a/kernel/stack.c +++ b/kernel/stack.c @@ -128,9 +128,8 @@ int _impl_k_stack_pop(struct k_stack *stack, u32_t *data, s32_t timeout) return -EBUSY; } - _pend_current_thread(&stack->wait_q, timeout); + result = _pend_current_thread(key, &stack->wait_q, timeout); - result = _Swap(key); if (result == 0) { *data = (u32_t)_current->base.swap_data; } diff --git a/kernel/timer.c b/kernel/timer.c index cab55a3d96e..662321422ff 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -206,8 +206,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer) if (result == 0) { if (timer->timeout.delta_ticks_from_prev != _INACTIVE) { /* wait for timer to expire or stop */ - _pend_current_thread(&timer->wait_q, K_FOREVER); - _Swap(key); + _pend_current_thread(key, &timer->wait_q, K_FOREVER); /* get updated timer status */ key = irq_lock(); diff --git a/lib/posix/pthread_barrier.c b/lib/posix/pthread_barrier.c index 5e213cd0723..38554d0319e 100644 --- a/lib/posix/pthread_barrier.c +++ b/lib/posix/pthread_barrier.c @@ -24,9 +24,8 @@ int pthread_barrier_wait(pthread_barrier_t *b) while (!sys_dlist_is_empty(&b->wait_q)) { ready_one_thread(&b->wait_q); } + return _reschedule_noyield(key); } else { - _pend_current_thread(&b->wait_q, K_FOREVER); + return _pend_current_thread(key, &b->wait_q, K_FOREVER); } - - return _reschedule_noyield(key); } diff --git a/lib/posix/pthread_cond.c b/lib/posix/pthread_cond.c index e41744c0bcb..24e6987bb34 100644 --- a/lib/posix/pthread_cond.c +++ b/lib/posix/pthread_cond.c @@ -20,9 +20,7 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int timeout) mut->sem->count = 1; ready_one_thread(&mut->sem->wait_q); - _pend_current_thread(&cv->wait_q, timeout); - - ret = _reschedule_yield(key); + ret = _pend_current_thread(&cv->wait_q, timeout); /* FIXME: this extra lock (and the potential context switch it * can cause) could be optimized out. At the point of the