unified: streamline "timeout add" internal interfaces.
Rename _do_timeout_add to _add_timeout, rename _TIMEOUT_ADD to _add_thread_timeout to better reflect their functionalities. Have the latter call the former, remove _do_timeout_add and _nano_timer_timeout_add. Change-Id: Ica86bea10d99d72bf78379598a942d277e7002d0 Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
0bd7e0d148
commit
688973e5ab
6 changed files with 34 additions and 52 deletions
|
@ -29,11 +29,6 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void _do_timeout_add(struct tcs *tcs,
|
|
||||||
struct _timeout *t,
|
|
||||||
_wait_q_t *wait_q,
|
|
||||||
int32_t timeout);
|
|
||||||
|
|
||||||
#if defined(CONFIG_NANO_TIMEOUTS)
|
#if defined(CONFIG_NANO_TIMEOUTS)
|
||||||
/* initialize the nano timeouts part of TCS when enabled in the kernel */
|
/* initialize the nano timeouts part of TCS when enabled in the kernel */
|
||||||
|
|
||||||
|
@ -103,13 +98,6 @@ static inline void _timeout_object_dequeue(struct tcs *tcs, struct _timeout *t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* put a fiber on the timeout queue and record its wait queue */
|
|
||||||
static inline void _timeout_add(struct tcs *tcs, _wait_q_t *wait_q,
|
|
||||||
int32_t timeout)
|
|
||||||
{
|
|
||||||
_do_timeout_add(tcs, &tcs->timeout, wait_q, timeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define _timeout_object_dequeue(tcs, t) do { } while (0)
|
#define _timeout_object_dequeue(tcs, t) do { } while (0)
|
||||||
#endif /* CONFIG_NANO_TIMEOUTS */
|
#endif /* CONFIG_NANO_TIMEOUTS */
|
||||||
|
@ -219,22 +207,19 @@ static int _timeout_insert_point_test(sys_dnode_t *test, void *timeout)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
|
* Add timeout to timeout queue. Record waiting thread and wait queue if any.
|
||||||
*
|
*
|
||||||
* @brief Put timeout on the timeout queue, record waiting fiber and wait queue
|
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||||
*
|
|
||||||
* @param tcs Fiber waiting on a timeout
|
|
||||||
* @param t Timeout structure to be added to the nanokernel queue
|
|
||||||
* @wait_q nanokernel object wait queue
|
|
||||||
* @timeout Timeout in ticks
|
|
||||||
*
|
|
||||||
* @return N/A
|
|
||||||
*/
|
*/
|
||||||
static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t,
|
static inline void _add_timeout(struct k_thread *thread,
|
||||||
_wait_q_t *wait_q, int32_t timeout)
|
struct _timeout *timeout_obj,
|
||||||
|
_wait_q_t *wait_q, int32_t timeout)
|
||||||
{
|
{
|
||||||
|
__ASSERT(timeout > 0, "");
|
||||||
|
|
||||||
K_DEBUG("thread %p on wait_q %p, for timeout: %d\n",
|
K_DEBUG("thread %p on wait_q %p, for timeout: %d\n",
|
||||||
tcs, wait_q, timeout);
|
thread, wait_q, timeout);
|
||||||
|
|
||||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||||
|
|
||||||
|
@ -244,14 +229,14 @@ static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t,
|
||||||
_nanokernel.timeout_q.tail);
|
_nanokernel.timeout_q.tail);
|
||||||
|
|
||||||
K_DEBUG("timeout %p before: next: %p, prev: %p\n",
|
K_DEBUG("timeout %p before: next: %p, prev: %p\n",
|
||||||
t, t->node.next, t->node.prev);
|
timeout_obj, timeout_obj->node.next, timeout_obj->node.prev);
|
||||||
|
|
||||||
t->tcs = tcs;
|
timeout_obj->tcs = thread;
|
||||||
t->delta_ticks_from_prev = timeout;
|
timeout_obj->delta_ticks_from_prev = timeout;
|
||||||
t->wait_q = (sys_dlist_t *)wait_q;
|
timeout_obj->wait_q = (sys_dlist_t *)wait_q;
|
||||||
sys_dlist_insert_at(timeout_q, (void *)t,
|
sys_dlist_insert_at(timeout_q, (void *)timeout_obj,
|
||||||
_timeout_insert_point_test,
|
_timeout_insert_point_test,
|
||||||
&t->delta_ticks_from_prev);
|
&timeout_obj->delta_ticks_from_prev);
|
||||||
|
|
||||||
K_DEBUG("timeout_q %p after: head: %p, tail: %p\n",
|
K_DEBUG("timeout_q %p after: head: %p, tail: %p\n",
|
||||||
&_nanokernel.timeout_q,
|
&_nanokernel.timeout_q,
|
||||||
|
@ -259,14 +244,18 @@ static inline void _do_timeout_add(struct tcs *tcs, struct _timeout *t,
|
||||||
_nanokernel.timeout_q.tail);
|
_nanokernel.timeout_q.tail);
|
||||||
|
|
||||||
K_DEBUG("timeout %p after: next: %p, prev: %p\n",
|
K_DEBUG("timeout %p after: next: %p, prev: %p\n",
|
||||||
t, t->node.next, t->node.prev);
|
timeout_obj, timeout_obj->node.next, timeout_obj->node.prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void _nano_timer_timeout_add(struct _timeout *t,
|
/*
|
||||||
_wait_q_t *wait_q,
|
* Put thread on timeout queue. Record wait queue if any.
|
||||||
int32_t timeout)
|
*
|
||||||
|
* Cannot handle timeout == 0 and timeout == K_FOREVER.
|
||||||
|
*/
|
||||||
|
static inline void _add_thread_timeout(struct k_thread *thread,
|
||||||
|
_wait_q_t *wait_q, int32_t timeout)
|
||||||
{
|
{
|
||||||
_do_timeout_add(NULL, t, wait_q, timeout);
|
_add_timeout(thread, &thread->timeout, wait_q, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find the closest deadline in the timeout queue */
|
/* find the closest deadline in the timeout queue */
|
||||||
|
|
|
@ -38,25 +38,18 @@ static inline void _timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
||||||
}
|
}
|
||||||
#include <timeout_q.h>
|
#include <timeout_q.h>
|
||||||
|
|
||||||
#define _TIMEOUT_ADD(thread, pq, ticks) \
|
|
||||||
do { \
|
|
||||||
if ((ticks) != TICKS_UNLIMITED) { \
|
|
||||||
_timeout_add(thread, pq, ticks); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#elif defined(CONFIG_NANO_TIMERS)
|
#elif defined(CONFIG_NANO_TIMERS)
|
||||||
#include <timeout_q.h>
|
#include <timeout_q.h>
|
||||||
#define _timeout_tcs_init(tcs) do { } while ((0))
|
#define _timeout_tcs_init(tcs) do { } while ((0))
|
||||||
#define _abort_thread_timeout(tcs) do { } while ((0))
|
#define _abort_thread_timeout(tcs) do { } while ((0))
|
||||||
|
|
||||||
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
|
#define _add_thread_timeout(thread, pq, ticks) do { } while (0)
|
||||||
#else
|
#else
|
||||||
#define _timeout_tcs_init(tcs) do { } while ((0))
|
#define _timeout_tcs_init(tcs) do { } while ((0))
|
||||||
#define _abort_thread_timeout(tcs) do { } while ((0))
|
#define _abort_thread_timeout(tcs) do { } while ((0))
|
||||||
#define _timeout_get_next_expiry() (K_FOREVER)
|
#define _timeout_get_next_expiry() (K_FOREVER)
|
||||||
|
|
||||||
#define _TIMEOUT_ADD(thread, pq, ticks) do { } while (0)
|
#define _add_thread_timeout(thread, pq, ticks) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -151,7 +151,7 @@ void _pend_thread(struct tcs *thread, _wait_q_t *wait_q, int32_t timeout)
|
||||||
|
|
||||||
if (timeout != K_FOREVER) {
|
if (timeout != K_FOREVER) {
|
||||||
_mark_thread_as_timing(thread);
|
_mark_thread_as_timing(thread);
|
||||||
_TIMEOUT_ADD(thread, wait_q, _ms_to_ticks(timeout));
|
_add_thread_timeout(thread, wait_q, _ms_to_ticks(timeout));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,6 +280,7 @@ void k_yield(void)
|
||||||
void k_sleep(int32_t duration)
|
void k_sleep(int32_t duration)
|
||||||
{
|
{
|
||||||
__ASSERT(!_is_in_isr(), "");
|
__ASSERT(!_is_in_isr(), "");
|
||||||
|
__ASSERT(duration != K_FOREVER, "");
|
||||||
|
|
||||||
K_DEBUG("thread %p for %d ns\n", _current, duration);
|
K_DEBUG("thread %p for %d ns\n", _current, duration);
|
||||||
|
|
||||||
|
@ -293,7 +294,7 @@ void k_sleep(int32_t duration)
|
||||||
|
|
||||||
_mark_thread_as_timing(_current);
|
_mark_thread_as_timing(_current);
|
||||||
_remove_thread_from_ready_q(_current);
|
_remove_thread_from_ready_q(_current);
|
||||||
_timeout_add(_current, NULL, _ms_to_ticks(duration));
|
_add_thread_timeout(_current, NULL, _ms_to_ticks(duration));
|
||||||
|
|
||||||
_Swap(key);
|
_Swap(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -271,7 +271,7 @@ static void schedule_new_thread(struct k_thread *thread, int32_t delay)
|
||||||
start_thread(thread);
|
start_thread(thread);
|
||||||
} else {
|
} else {
|
||||||
_mark_thread_as_timing(thread);
|
_mark_thread_as_timing(thread);
|
||||||
_timeout_add(thread, NULL, _ms_to_ticks(delay));
|
_add_thread_timeout(thread, NULL, _ms_to_ticks(delay));
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
ARG_UNUSED(delay);
|
ARG_UNUSED(delay);
|
||||||
|
|
|
@ -35,7 +35,7 @@ void timer_expiration_handler(struct _timeout *t)
|
||||||
|
|
||||||
/* if the time is periodic, start it again */
|
/* if the time is periodic, start it again */
|
||||||
if (timer->period > 0) {
|
if (timer->period > 0) {
|
||||||
_do_timeout_add(NULL, &timer->timeout, &timer->wait_q,
|
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
|
||||||
timer->period);
|
timer->period);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period,
|
||||||
timer->stop_handler = stop_handler;
|
timer->stop_handler = stop_handler;
|
||||||
timer->stop_handler_arg = stop_handler_arg;
|
timer->stop_handler_arg = stop_handler_arg;
|
||||||
|
|
||||||
_do_timeout_add(NULL, &timer->timeout, &timer->wait_q,
|
_add_timeout(NULL, &timer->timeout, &timer->wait_q,
|
||||||
_ms_to_ticks(duration));
|
_ms_to_ticks(duration));
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,8 +108,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
|
||||||
k_work_submit_to_queue(work_q, &work->work);
|
k_work_submit_to_queue(work_q, &work->work);
|
||||||
} else {
|
} else {
|
||||||
/* Add timeout */
|
/* Add timeout */
|
||||||
_do_timeout_add(NULL, &work->timeout, NULL,
|
_add_timeout(NULL, &work->timeout, NULL, _ms_to_ticks(timeout));
|
||||||
_ms_to_ticks(timeout));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue