unified: streamline "timeout abort" internal interface
Rename _do_timeout_abort to _abort_timeout, rename _timeout_abort to _abort_thread_timeout to better reflect their functionalities. Have the latter call the former, remove _do_timeout_abort and _nano_timer_timeout_abort. Change-Id: I0fea9474b19a2eb47a37489eb06c0d1d56886c9c Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
179fd3a80c
commit
7caef4581d
16 changed files with 29 additions and 34 deletions
|
@ -40,7 +40,7 @@ void k_fifo_init(struct k_fifo *fifo)
|
|||
|
||||
static void prepare_thread_to_run(struct k_thread *thread, void *data)
|
||||
{
|
||||
_timeout_abort(thread);
|
||||
_abort_thread_timeout(thread);
|
||||
_ready_thread(thread);
|
||||
_set_thread_return_value_with_data(thread, 0, data);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
static inline int _do_timeout_abort(struct _timeout *t);
|
||||
static inline void _do_timeout_add(struct tcs *tcs,
|
||||
struct _timeout *t,
|
||||
_wait_q_t *wait_q,
|
||||
|
@ -104,12 +103,6 @@ static inline void _timeout_object_dequeue(struct tcs *tcs, struct _timeout *t)
|
|||
}
|
||||
}
|
||||
|
||||
/* abort a timeout for a specified fiber */
|
||||
static inline int _timeout_abort(struct tcs *tcs)
|
||||
{
|
||||
return _do_timeout_abort(&tcs->timeout);
|
||||
}
|
||||
|
||||
/* put a fiber on the timeout queue and record its wait queue */
|
||||
static inline void _timeout_add(struct tcs *tcs, _wait_q_t *wait_q,
|
||||
int32_t timeout)
|
||||
|
@ -175,7 +168,7 @@ static inline void _timeout_handle_timeouts(void)
|
|||
*
|
||||
* @return 0 in success and -1 if the timer has expired
|
||||
*/
|
||||
static inline int _do_timeout_abort(struct _timeout *t)
|
||||
static inline int _abort_timeout(struct _timeout *t)
|
||||
{
|
||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||
|
||||
|
@ -195,9 +188,9 @@ static inline int _do_timeout_abort(struct _timeout *t)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int _nano_timer_timeout_abort(struct _timeout *t)
|
||||
static inline int _abort_thread_timeout(struct k_thread *thread)
|
||||
{
|
||||
return _do_timeout_abort(t);
|
||||
return _abort_timeout(&thread->timeout);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -48,12 +48,12 @@ static inline void _timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
|||
#elif defined(CONFIG_NANO_TIMERS)
|
||||
#include <timeout_q.h>
|
||||
#define _timeout_tcs_init(tcs) do { } while ((0))
|
||||
#define _timeout_abort(tcs) do { } while ((0))
|
||||
#define _abort_thread_timeout(tcs) do { } while ((0))
|
||||
|
||||
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
|
||||
#else
|
||||
#define _timeout_tcs_init(tcs) do { } while ((0))
|
||||
#define _timeout_abort(tcs) do { } while ((0))
|
||||
#define _abort_thread_timeout(tcs) do { } while ((0))
|
||||
#define _timeout_get_next_expiry() (K_FOREVER)
|
||||
|
||||
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
|
||||
|
|
|
@ -45,7 +45,7 @@ void k_lifo_put(struct k_lifo *lifo, void *data)
|
|||
first_pending_thread = _unpend_first_thread(&lifo->wait_q);
|
||||
|
||||
if (first_pending_thread) {
|
||||
_timeout_abort(first_pending_thread);
|
||||
_abort_thread_timeout(first_pending_thread);
|
||||
_ready_thread(first_pending_thread);
|
||||
|
||||
_set_thread_return_value_with_data(first_pending_thread,
|
||||
|
|
|
@ -260,7 +260,7 @@ static int _mbox_message_put(struct k_mbox *mbox, struct k_mbox_msg *tx_msg,
|
|||
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
|
||||
/* take receiver out of rx queue */
|
||||
_unpend_thread(receiving_thread);
|
||||
_timeout_abort(receiving_thread);
|
||||
_abort_thread_timeout(receiving_thread);
|
||||
|
||||
/* ready receiver for execution */
|
||||
_set_thread_return_value(receiving_thread, 0);
|
||||
|
@ -532,7 +532,7 @@ int k_mbox_get(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer,
|
|||
if (_mbox_message_match(tx_msg, rx_msg) == 0) {
|
||||
/* take sender out of mailbox's tx queue */
|
||||
_unpend_thread(sending_thread);
|
||||
_timeout_abort(sending_thread);
|
||||
_abort_thread_timeout(sending_thread);
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ void k_mem_map_free(struct k_mem_map *map, void **mem)
|
|||
|
||||
if (pending_thread) {
|
||||
_set_thread_return_value_with_data(pending_thread, 0, *mem);
|
||||
_timeout_abort(pending_thread);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (_must_switch_threads()) {
|
||||
_Swap(key);
|
||||
|
|
|
@ -455,7 +455,7 @@ static void block_waiters_check(struct k_mem_pool *pool)
|
|||
* outside the function by k_sched_unlock()
|
||||
*/
|
||||
_unpend_thread(waiter);
|
||||
_timeout_abort(waiter);
|
||||
_abort_thread_timeout(waiter);
|
||||
_ready_thread(waiter);
|
||||
}
|
||||
waiter = next_waiter;
|
||||
|
|
|
@ -79,7 +79,7 @@ int k_msgq_put(struct k_msgq *q, void *data, int32_t timeout)
|
|||
memcpy(pending_thread->swap_data, data, q->msg_size);
|
||||
/* wake up waiting thread */
|
||||
_set_thread_return_value(pending_thread, 0);
|
||||
_timeout_abort(pending_thread);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (_must_switch_threads()) {
|
||||
_Swap(key);
|
||||
|
@ -150,7 +150,7 @@ int k_msgq_get(struct k_msgq *q, void *data, int32_t timeout)
|
|||
|
||||
/* wake up waiting thread */
|
||||
_set_thread_return_value(pending_thread, 0);
|
||||
_timeout_abort(pending_thread);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
if (_must_switch_threads()) {
|
||||
_Swap(key);
|
||||
|
@ -191,7 +191,7 @@ void k_msgq_purge(struct k_msgq *q)
|
|||
/* wake up any threads that are waiting to write */
|
||||
while ((pending_thread = _unpend_first_thread(&q->wait_q)) != NULL) {
|
||||
_set_thread_return_value(pending_thread, -ENOMSG);
|
||||
_timeout_abort(pending_thread);
|
||||
_abort_thread_timeout(pending_thread);
|
||||
_ready_thread(pending_thread);
|
||||
}
|
||||
|
||||
|
|
|
@ -218,7 +218,7 @@ void k_mutex_unlock(struct k_mutex *mutex)
|
|||
mutex, new_owner, new_owner ? new_owner->prio : -1000);
|
||||
|
||||
if (new_owner) {
|
||||
_timeout_abort(new_owner);
|
||||
_abort_thread_timeout(new_owner);
|
||||
_ready_thread(new_owner);
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -318,7 +318,7 @@ static bool _pipe_xfer_prepare(sys_dlist_t *xfer_list,
|
|||
* Add it to the transfer list.
|
||||
*/
|
||||
_unpend_thread(thread);
|
||||
_timeout_abort(thread);
|
||||
_abort_thread_timeout(thread);
|
||||
sys_dlist_append(xfer_list, &thread->k_q_node);
|
||||
}
|
||||
|
||||
|
|
|
@ -309,7 +309,7 @@ void k_wakeup(k_tid_t thread)
|
|||
return;
|
||||
}
|
||||
|
||||
if (_timeout_abort(thread) < 0) {
|
||||
if (_abort_thread_timeout(thread) < 0) {
|
||||
irq_unlock(key);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -171,9 +171,11 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
|
|||
if (desc->sem != sem) {
|
||||
sem_thread = CONTAINER_OF(desc, struct _sem_thread,
|
||||
desc);
|
||||
struct k_thread *dummy_thread =
|
||||
(struct k_thread *)&sem_thread->dummy;
|
||||
|
||||
_timeout_abort((struct k_thread *)&sem_thread->dummy);
|
||||
_unpend_thread((struct k_thread *)&sem_thread->dummy);
|
||||
_abort_thread_timeout(dummy_thread);
|
||||
_unpend_thread(dummy_thread);
|
||||
|
||||
sys_dlist_remove(node);
|
||||
}
|
||||
|
@ -196,7 +198,7 @@ static int handle_sem_group(struct k_sem *sem, struct k_thread *thread)
|
|||
|
||||
if (!_is_thread_ready(desc->thread)) {
|
||||
_reset_thread_states(desc->thread, K_PENDING | K_TIMING);
|
||||
_timeout_abort(desc->thread);
|
||||
_abort_thread_timeout(desc->thread);
|
||||
if (_is_thread_ready(desc->thread)) {
|
||||
_add_thread_to_ready_q(desc->thread);
|
||||
}
|
||||
|
@ -230,7 +232,7 @@ static bool sem_give_common(struct k_sem *sem)
|
|||
return false;
|
||||
}
|
||||
|
||||
_timeout_abort(thread);
|
||||
_abort_thread_timeout(thread);
|
||||
|
||||
if (!handle_sem_group(sem, thread)) {
|
||||
/* Handle the non-group case */
|
||||
|
|
|
@ -54,7 +54,7 @@ void k_stack_push(struct k_stack *stack, uint32_t data)
|
|||
first_pending_thread = _unpend_first_thread(&stack->wait_q);
|
||||
|
||||
if (first_pending_thread) {
|
||||
_timeout_abort(first_pending_thread);
|
||||
_abort_thread_timeout(first_pending_thread);
|
||||
_ready_thread(first_pending_thread);
|
||||
|
||||
_set_thread_return_value_with_data(first_pending_thread,
|
||||
|
|
|
@ -306,7 +306,7 @@ int k_thread_cancel(k_tid_t tid)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
_timeout_abort(thread);
|
||||
_abort_thread_timeout(thread);
|
||||
_thread_exit(thread);
|
||||
|
||||
irq_unlock(key);
|
||||
|
@ -415,7 +415,7 @@ void _k_thread_single_abort(struct tcs *thread)
|
|||
_unpend_thread(thread);
|
||||
}
|
||||
if (_is_thread_timing(thread)) {
|
||||
_timeout_abort(thread);
|
||||
_abort_thread_timeout(thread);
|
||||
_mark_thread_as_not_timing(thread);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period,
|
|||
unsigned int key = irq_lock();
|
||||
|
||||
if (timer->timeout.delta_ticks_from_prev != -1) {
|
||||
_do_timeout_abort(&timer->timeout);
|
||||
_abort_timeout(&timer->timeout);
|
||||
}
|
||||
|
||||
timer->period = _ms_to_ticks(period);
|
||||
|
@ -229,7 +229,7 @@ void k_timer_stop(struct k_timer *timer)
|
|||
|
||||
int key = irq_lock();
|
||||
|
||||
_do_timeout_abort(&timer->timeout);
|
||||
_abort_timeout(&timer->timeout);
|
||||
|
||||
irq_unlock(key);
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ int k_delayed_work_cancel(struct k_delayed_work *work)
|
|||
}
|
||||
|
||||
/* Abort timeout, if it has expired this will do nothing */
|
||||
_do_timeout_abort(&work->timeout);
|
||||
_abort_timeout(&work->timeout);
|
||||
|
||||
/* Detach from workqueue */
|
||||
work->work_q = NULL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue