kernel: Combine nano_timers and nano_timeouts
To avoid code duplication nano_timers use nano_timeout mechanism. Change-Id: I916bffd2b86e29ee7b7ff7bbb009cd4c844e2a44 Signed-off-by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
This commit is contained in:
parent
e23e4c76e2
commit
9b2452047d
13 changed files with 333 additions and 201 deletions
|
@ -221,7 +221,7 @@ struct s_NANO {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct firq_regs firq_regs;
|
struct firq_regs firq_regs;
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
sys_dlist_t timeout_q;
|
sys_dlist_t timeout_q;
|
||||||
int32_t task_timeout;
|
int32_t task_timeout;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -159,7 +159,7 @@ struct s_NANO {
|
||||||
int32_t idle; /* Number of ticks for kernel idling */
|
int32_t idle; /* Number of ticks for kernel idling */
|
||||||
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
#endif /* CONFIG_ADVANCED_POWER_MANAGEMENT */
|
||||||
|
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
sys_dlist_t timeout_q;
|
sys_dlist_t timeout_q;
|
||||||
int32_t task_timeout;
|
int32_t task_timeout;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -727,7 +727,7 @@ typedef struct s_NANO {
|
||||||
|
|
||||||
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
struct tcs *current_fp; /* thread (fiber or task) that owns the FP regs */
|
||||||
#endif /* CONFIG_FP_SHARING */
|
#endif /* CONFIG_FP_SHARING */
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
sys_dlist_t timeout_q;
|
sys_dlist_t timeout_q;
|
||||||
int32_t task_timeout;
|
int32_t task_timeout;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -169,3 +169,6 @@ APIs for a nanokernel timer provided by :file:`nanokernel.h`
|
||||||
:cpp:func:`nano_task_timer_stop()`, :cpp:func:`nano_fiber_timer_stop()`,
|
:cpp:func:`nano_task_timer_stop()`, :cpp:func:`nano_fiber_timer_stop()`,
|
||||||
:cpp:func:`nano_isr_timer_stop()`, :cpp:func:`nano_timer_stop()`
|
:cpp:func:`nano_isr_timer_stop()`, :cpp:func:`nano_timer_stop()`
|
||||||
Force timer expiration, if not already expired.
|
Force timer expiration, if not already expired.
|
||||||
|
|
||||||
|
:cpp:func:`nano_timer_ticks_remain()`
|
||||||
|
Return timer ticks before timer expiration.
|
||||||
|
|
|
@ -43,6 +43,8 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct tcs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @cond internal
|
* @cond internal
|
||||||
* nanokernel private APIs that are exposed via the public API
|
* nanokernel private APIs that are exposed via the public API
|
||||||
|
@ -59,6 +61,7 @@ struct _nano_queue {
|
||||||
|
|
||||||
struct _nano_timeout {
|
struct _nano_timeout {
|
||||||
sys_dlist_t node;
|
sys_dlist_t node;
|
||||||
|
struct tcs *tcs;
|
||||||
struct _nano_queue *wait_q;
|
struct _nano_queue *wait_q;
|
||||||
int32_t delta_ticks_from_prev;
|
int32_t delta_ticks_from_prev;
|
||||||
};
|
};
|
||||||
|
@ -66,8 +69,6 @@ struct _nano_timeout {
|
||||||
* @endcond
|
* @endcond
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct tcs;
|
|
||||||
|
|
||||||
/* architecture-independent nanokernel public APIs */
|
/* architecture-independent nanokernel public APIs */
|
||||||
|
|
||||||
typedef struct tcs *nano_thread_id_t;
|
typedef struct tcs *nano_thread_id_t;
|
||||||
|
@ -1190,10 +1191,13 @@ extern void *sys_thread_custom_data_get(void);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct nano_timer {
|
struct nano_timer {
|
||||||
struct nano_timer *link;
|
struct _nano_timeout timeout_data;
|
||||||
uint32_t ticks;
|
void *user_data;
|
||||||
struct nano_lifo lifo;
|
/*
|
||||||
void *userData;
|
* User data pointer in backup for cases when nanokernel_timer_test()
|
||||||
|
* has to return NULL
|
||||||
|
*/
|
||||||
|
void *user_data_backup;
|
||||||
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
#ifdef CONFIG_DEBUG_TRACING_KERNEL_OBJECTS
|
||||||
struct nano_timer *__next;
|
struct nano_timer *__next;
|
||||||
#endif
|
#endif
|
||||||
|
@ -1249,18 +1253,13 @@ extern void nano_timer_start(struct nano_timer *timer, int ticks);
|
||||||
* not expired.
|
* not expired.
|
||||||
* For TICKS_NONE, return immediately.
|
* For TICKS_NONE, return immediately.
|
||||||
* For TICKS_UNLIMITED, wait as long as necessary.
|
* For TICKS_UNLIMITED, wait as long as necessary.
|
||||||
* Otherwise, wait up to the specified number of ticks before timing
|
|
||||||
* out.
|
|
||||||
*
|
*
|
||||||
* @return N/A
|
* @return N/A
|
||||||
*
|
*
|
||||||
* @warning If called from an ISR, then @a timeout_in_ticks must be TICKS_NONE.
|
* @warning If called from an ISR, then @a timeout_in_ticks must be TICKS_NONE.
|
||||||
* @sa TICKS_NONE, TICKS_UNLIMITED
|
* @sa TICKS_NONE, TICKS_UNLIMITED
|
||||||
*/
|
*/
|
||||||
static inline void *nano_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
extern void *nano_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks);
|
||||||
{
|
|
||||||
return nano_lifo_get(&timer->lifo, timeout_in_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Stop a nanokernel timer.
|
* @brief Stop a nanokernel timer.
|
||||||
|
@ -1304,10 +1303,7 @@ extern void nano_isr_timer_start(struct nano_timer *timer, int ticks);
|
||||||
* @return Pointer to timer initialization data.
|
* @return Pointer to timer initialization data.
|
||||||
* @retval NULL If timer not expired.
|
* @retval NULL If timer not expired.
|
||||||
*/
|
*/
|
||||||
static inline void *nano_isr_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
extern void *nano_isr_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks);
|
||||||
{
|
|
||||||
return nano_isr_lifo_get(&timer->lifo, timeout_in_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Stop a nanokernel timer from an ISR.
|
* @brief Stop a nanokernel timer from an ISR.
|
||||||
|
@ -1347,18 +1343,13 @@ extern void nano_fiber_timer_start(struct nano_timer *timer, int ticks);
|
||||||
* not expired.
|
* not expired.
|
||||||
* For TICKS_NONE, return immediately.
|
* For TICKS_NONE, return immediately.
|
||||||
* For TICKS_UNLIMITED, wait as long as necessary.
|
* For TICKS_UNLIMITED, wait as long as necessary.
|
||||||
* Otherwise, wait up to the specified number of ticks before timing
|
|
||||||
* out.
|
|
||||||
*
|
*
|
||||||
* @return Pointer to timer initialization data
|
* @return Pointer to timer initialization data
|
||||||
* @retval NULL If timer has not expired.
|
* @retval NULL If timer has not expired.
|
||||||
*
|
*
|
||||||
* @sa TICKS_NONE, TICKS_UNLIMITED
|
* @sa TICKS_NONE, TICKS_UNLIMITED
|
||||||
*/
|
*/
|
||||||
static inline void *nano_fiber_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
extern void *nano_fiber_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks);
|
||||||
{
|
|
||||||
return nano_fiber_lifo_get(&timer->lifo, timeout_in_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Stop a nanokernel timer.
|
* @brief Stop a nanokernel timer.
|
||||||
|
@ -1398,18 +1389,13 @@ extern void nano_task_timer_start(struct nano_timer *timer, int ticks);
|
||||||
* not expired.
|
* not expired.
|
||||||
* For TICKS_NONE, return immediately.
|
* For TICKS_NONE, return immediately.
|
||||||
* For TICKS_UNLIMITED, wait as long as necessary.
|
* For TICKS_UNLIMITED, wait as long as necessary.
|
||||||
* Otherwise, wait up to the specified number of ticks before timing
|
|
||||||
* out.
|
|
||||||
*
|
*
|
||||||
* @return Pointer to timer initialization data.
|
* @return Pointer to timer initialization data.
|
||||||
* @retval NULL If timer has not expired.
|
* @retval NULL If timer has not expired.
|
||||||
*
|
*
|
||||||
* @sa TICKS_NONE, TICKS_UNLIMITED
|
* @sa TICKS_NONE, TICKS_UNLIMITED
|
||||||
*/
|
*/
|
||||||
static inline void *nano_task_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
extern void *nano_task_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks);
|
||||||
{
|
|
||||||
return nano_task_lifo_get(&timer->lifo, timeout_in_ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Stop a nanokernel timer from a task.
|
* @brief Stop a nanokernel timer from a task.
|
||||||
|
@ -1422,6 +1408,16 @@ static inline void *nano_task_timer_test(struct nano_timer *timer, int32_t timeo
|
||||||
*/
|
*/
|
||||||
extern void nano_task_timer_stop(struct nano_timer *timer);
|
extern void nano_task_timer_stop(struct nano_timer *timer);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Get nanokernel timer remaining ticks.
|
||||||
|
*
|
||||||
|
* This function returns the remaining ticks of the previously
|
||||||
|
* started nanokernel timer object.
|
||||||
|
*
|
||||||
|
* @return remaining ticks or 0 if the timer has expired
|
||||||
|
*/
|
||||||
|
extern int32_t nano_timer_ticks_remain(struct nano_timer *timer);
|
||||||
|
|
||||||
/* Methods for tasks and fibers for handling time and ticks */
|
/* Methods for tasks and fibers for handling time and ticks */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -91,7 +91,6 @@ extern int sys_clock_hw_cycles_per_tick;
|
||||||
#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X))
|
#define SYS_CLOCK_HW_CYCLES_TO_NS(X) (uint32_t)(SYS_CLOCK_HW_CYCLES_TO_NS64(X))
|
||||||
|
|
||||||
extern int64_t _sys_clock_tick_count;
|
extern int64_t _sys_clock_tick_count;
|
||||||
extern struct nano_timer *_nano_timer_list;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of ticks for x seconds. NOTE: With MSEC(), since it does an integer
|
* Number of ticks for x seconds. NOTE: With MSEC(), since it does an integer
|
||||||
|
|
|
@ -51,31 +51,15 @@ void nano_cpu_set_idle(int32_t ticks)
|
||||||
|
|
||||||
int32_t _sys_idle_ticks_threshold = CONFIG_TICKLESS_IDLE_THRESH;
|
int32_t _sys_idle_ticks_threshold = CONFIG_TICKLESS_IDLE_THRESH;
|
||||||
|
|
||||||
#if defined(CONFIG_NANO_TIMEOUTS)
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
static inline int32_t get_next_timeout_expiry(void)
|
static inline int32_t get_next_tick_expiry(void)
|
||||||
{
|
{
|
||||||
return _nano_get_earliest_timeouts_deadline();
|
return _nano_get_earliest_timeouts_deadline();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
#define get_next_timeout_expiry(void) TICKS_UNLIMITED
|
#define get_next_tick_expiry(void) TICKS_UNLIMITED
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* @brief - obtain number of ticks until next timer expires
|
|
||||||
*
|
|
||||||
* Must be called with interrupts locked to prevent the timer queues from
|
|
||||||
* changing.
|
|
||||||
*
|
|
||||||
* @return Number of ticks until next timer expires.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline int32_t get_next_timer_expiry(void)
|
|
||||||
{
|
|
||||||
return _nano_timer_list ? _nano_timer_list->ticks : TICKS_UNLIMITED;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int was_in_tickless_idle(void)
|
static inline int was_in_tickless_idle(void)
|
||||||
{
|
{
|
||||||
return (_nanokernel.idle == TICKS_UNLIMITED) ||
|
return (_nanokernel.idle == TICKS_UNLIMITED) ||
|
||||||
|
@ -88,14 +72,6 @@ static inline int must_enter_tickless_idle(void)
|
||||||
return was_in_tickless_idle();
|
return was_in_tickless_idle();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int32_t get_next_tick_expiry(void)
|
|
||||||
{
|
|
||||||
int32_t timers = get_next_timer_expiry();
|
|
||||||
int32_t timeouts = get_next_timeout_expiry();
|
|
||||||
|
|
||||||
return (int32_t)min((uint32_t)timers, (uint32_t)timeouts);
|
|
||||||
}
|
|
||||||
|
|
||||||
void _power_save_idle(void)
|
void _power_save_idle(void)
|
||||||
{
|
{
|
||||||
_nanokernel.idle = get_next_tick_expiry();
|
_nanokernel.idle = get_next_tick_expiry();
|
||||||
|
|
|
@ -29,6 +29,13 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static inline int _do_nano_timeout_abort(struct _nano_timeout *t);
|
||||||
|
static inline void _do_nano_timeout_add(struct tcs *tcs,
|
||||||
|
struct _nano_timeout *t,
|
||||||
|
struct _nano_queue *wait_q,
|
||||||
|
int32_t timeout);
|
||||||
|
|
||||||
|
#if defined(CONFIG_NANO_TIMEOUTS)
|
||||||
/* initialize the nano timeouts part of TCS when enabled in the kernel */
|
/* initialize the nano timeouts part of TCS when enabled in the kernel */
|
||||||
|
|
||||||
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
|
static inline void _nano_timeout_tcs_init(struct tcs *tcs)
|
||||||
|
@ -46,6 +53,12 @@ static inline void _nano_timeout_tcs_init(struct tcs *tcs)
|
||||||
*/
|
*/
|
||||||
tcs->nano_timeout.wait_q = NULL;
|
tcs->nano_timeout.wait_q = NULL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Must be initialized here, so the _nano_timeout_handle_one_timeout()
|
||||||
|
* routine can check if there is a fiber waiting on this timeout
|
||||||
|
*/
|
||||||
|
tcs->nano_timeout.tcs = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These are initialized when enqueing on the timeout queue:
|
* These are initialized when enqueing on the timeout queue:
|
||||||
*
|
*
|
||||||
|
@ -54,6 +67,44 @@ static inline void _nano_timeout_tcs_init(struct tcs *tcs)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Remove the thread from nanokernel object wait queue
|
||||||
|
*
|
||||||
|
* If a thread waits on a nanokernel object with timeout,
|
||||||
|
* remove the thread from the wait queue
|
||||||
|
*
|
||||||
|
* @param tcs Waiting thread
|
||||||
|
* @param t nano timer
|
||||||
|
*
|
||||||
|
* @return N/A
|
||||||
|
*/
|
||||||
|
static inline void _nano_timeout_object_dequeue(
|
||||||
|
struct tcs *tcs, struct _nano_timeout *t)
|
||||||
|
{
|
||||||
|
if (t->wait_q) {
|
||||||
|
_nano_timeout_remove_tcs_from_wait_q(tcs, t->wait_q);
|
||||||
|
fiberRtnValueSet(tcs, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* abort a timeout for a specified fiber */
|
||||||
|
static inline int _nano_timeout_abort(struct tcs *tcs)
|
||||||
|
{
|
||||||
|
return _do_nano_timeout_abort(&tcs->nano_timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* put a fiber on the timeout queue and record its wait queue */
|
||||||
|
static inline void _nano_timeout_add(struct tcs *tcs,
|
||||||
|
struct _nano_queue *wait_q,
|
||||||
|
int32_t timeout)
|
||||||
|
{
|
||||||
|
_do_nano_timeout_add(tcs, &tcs->nano_timeout, wait_q, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define _nano_timeout_object_dequeue(tcs, t) do { } while (0)
|
||||||
|
#endif /* CONFIG_NANO_TIMEOUTS */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle one expired timeout.
|
* Handle one expired timeout.
|
||||||
* This removes the fiber from the timeout queue head, and also removes it
|
* This removes the fiber from the timeout queue head, and also removes it
|
||||||
|
@ -65,13 +116,12 @@ static inline struct _nano_timeout *_nano_timeout_handle_one_timeout(
|
||||||
sys_dlist_t *timeout_q)
|
sys_dlist_t *timeout_q)
|
||||||
{
|
{
|
||||||
struct _nano_timeout *t = (void *)sys_dlist_get(timeout_q);
|
struct _nano_timeout *t = (void *)sys_dlist_get(timeout_q);
|
||||||
struct tcs *tcs = CONTAINER_OF(t, struct tcs, nano_timeout);
|
struct tcs *tcs = t->tcs;
|
||||||
|
|
||||||
if (tcs->nano_timeout.wait_q) {
|
if (tcs != NULL) {
|
||||||
_nano_timeout_remove_tcs_from_wait_q(tcs);
|
_nano_timeout_object_dequeue(tcs, t);
|
||||||
fiberRtnValueSet(tcs, (unsigned int)0);
|
|
||||||
}
|
|
||||||
_nano_fiber_ready(tcs);
|
_nano_fiber_ready(tcs);
|
||||||
|
}
|
||||||
t->delta_ticks_from_prev = -1;
|
t->delta_ticks_from_prev = -1;
|
||||||
|
|
||||||
return (struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
|
return (struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
|
||||||
|
@ -89,11 +139,17 @@ static inline void _nano_timeout_handle_timeouts(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* abort a timeout for a specific fiber */
|
/**
|
||||||
static inline int _nano_timeout_abort(struct tcs *tcs)
|
*
|
||||||
|
* @brief abort a timeout
|
||||||
|
*
|
||||||
|
* @param t Timeout to abort
|
||||||
|
*
|
||||||
|
* @return 0 in success and -1 if the timer has expired
|
||||||
|
*/
|
||||||
|
static inline int _do_nano_timeout_abort(struct _nano_timeout *t)
|
||||||
{
|
{
|
||||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||||
struct _nano_timeout *t = &tcs->nano_timeout;
|
|
||||||
|
|
||||||
if (-1 == t->delta_ticks_from_prev) {
|
if (-1 == t->delta_ticks_from_prev) {
|
||||||
return -1;
|
return -1;
|
||||||
|
@ -111,6 +167,11 @@ static inline int _nano_timeout_abort(struct tcs *tcs)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int _nano_timer_timeout_abort(struct _nano_timeout *t)
|
||||||
|
{
|
||||||
|
return _do_nano_timeout_abort(t);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* callback for sys_dlist_insert_at():
|
* callback for sys_dlist_insert_at():
|
||||||
*
|
*
|
||||||
|
@ -137,14 +198,25 @@ static int _nano_timeout_insert_point_test(sys_dnode_t *test, void *timeout)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* put a fiber on the timeout queue and record its wait queue */
|
/**
|
||||||
static inline void _nano_timeout_add(struct tcs *tcs,
|
*
|
||||||
|
* @brief Put timeout on the timeout queue, record waiting fiber and wait queue
|
||||||
|
*
|
||||||
|
* @param tcs Fiber waiting on a timeout
|
||||||
|
* @param t Timeout structure to be added to the nanokernel queue
|
||||||
|
* @wait_q nanokernel object wait queue
|
||||||
|
* @timeout Timeout in ticks
|
||||||
|
*
|
||||||
|
* @return N/A
|
||||||
|
*/
|
||||||
|
static inline void _do_nano_timeout_add(struct tcs *tcs,
|
||||||
|
struct _nano_timeout *t,
|
||||||
struct _nano_queue *wait_q,
|
struct _nano_queue *wait_q,
|
||||||
int32_t timeout)
|
int32_t timeout)
|
||||||
{
|
{
|
||||||
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||||
struct _nano_timeout *t = &tcs->nano_timeout;
|
|
||||||
|
|
||||||
|
t->tcs = tcs;
|
||||||
t->delta_ticks_from_prev = timeout;
|
t->delta_ticks_from_prev = timeout;
|
||||||
t->wait_q = wait_q;
|
t->wait_q = wait_q;
|
||||||
sys_dlist_insert_at(timeout_q, (void *)t,
|
sys_dlist_insert_at(timeout_q, (void *)t,
|
||||||
|
@ -152,6 +224,13 @@ static inline void _nano_timeout_add(struct tcs *tcs,
|
||||||
&t->delta_ticks_from_prev);
|
&t->delta_ticks_from_prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void _nano_timer_timeout_add(struct _nano_timeout *t,
|
||||||
|
struct _nano_queue *wait_q,
|
||||||
|
int32_t timeout)
|
||||||
|
{
|
||||||
|
_do_nano_timeout_add(NULL, t, wait_q, timeout);
|
||||||
|
}
|
||||||
|
|
||||||
/* find the closest deadline in the timeout queue */
|
/* find the closest deadline in the timeout queue */
|
||||||
static inline uint32_t _nano_get_earliest_timeouts_deadline(void)
|
static inline uint32_t _nano_get_earliest_timeouts_deadline(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -74,11 +74,10 @@ static inline void _nano_wait_q_put(struct _nano_queue *wait_q)
|
||||||
wait_q->tail = _nanokernel.current;
|
wait_q->tail = _nanokernel.current;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS)
|
||||||
static inline void _nano_timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
static inline void _nano_timeout_remove_tcs_from_wait_q(
|
||||||
|
struct tcs *tcs, struct _nano_queue *wait_q)
|
||||||
{
|
{
|
||||||
struct _nano_queue *wait_q = tcs->nano_timeout.wait_q;
|
|
||||||
|
|
||||||
if (wait_q->head == tcs) {
|
if (wait_q->head == tcs) {
|
||||||
if (wait_q->tail == wait_q->head) {
|
if (wait_q->tail == wait_q->head) {
|
||||||
_nano_wait_q_reset(wait_q);
|
_nano_wait_q_reset(wait_q);
|
||||||
|
@ -111,6 +110,14 @@ static inline void _nano_timeout_remove_tcs_from_wait_q(struct tcs *tcs)
|
||||||
} while (0)
|
} while (0)
|
||||||
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) \
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) \
|
||||||
_nanokernel.task_timeout = (ticks)
|
_nanokernel.task_timeout = (ticks)
|
||||||
|
#elif defined(CONFIG_NANO_TIMERS)
|
||||||
|
#include <timeout_q.h>
|
||||||
|
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
||||||
|
#define _nano_timeout_abort(tcs) do { } while ((0))
|
||||||
|
|
||||||
|
#define _NANO_TIMEOUT_TICK_GET() 0
|
||||||
|
#define _NANO_TIMEOUT_ADD(pq, ticks) do { } while (0)
|
||||||
|
#define _NANO_TIMEOUT_SET_TASK_TIMEOUT(ticks) do { } while ((0))
|
||||||
#else
|
#else
|
||||||
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
#define _nano_timeout_tcs_init(tcs) do { } while ((0))
|
||||||
#define _nano_timeout_abort(tcs) do { } while ((0))
|
#define _nano_timeout_abort(tcs) do { } while ((0))
|
||||||
|
|
|
@ -82,7 +82,7 @@ char __noinit __stack main_task_stack[CONFIG_MAIN_STACK_SIZE];
|
||||||
|
|
||||||
char __noinit _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
char __noinit _interrupt_stack[CONFIG_ISR_STACK_SIZE];
|
||||||
|
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
#include <misc/dlist.h>
|
#include <misc/dlist.h>
|
||||||
#define initialize_nano_timeouts() do { \
|
#define initialize_nano_timeouts() do { \
|
||||||
sys_dlist_init(&_nanokernel.timeout_q); \
|
sys_dlist_init(&_nanokernel.timeout_q); \
|
||||||
|
|
|
@ -150,7 +150,7 @@ uint32_t sys_tick_delta_32(int64_t *reftime)
|
||||||
|
|
||||||
/* handle the expired timeouts in the nano timeout queue */
|
/* handle the expired timeouts in the nano timeout queue */
|
||||||
|
|
||||||
#ifdef CONFIG_NANO_TIMEOUTS
|
#if defined(CONFIG_NANO_TIMEOUTS) || defined(CONFIG_NANO_TIMERS)
|
||||||
#include <wait_q.h>
|
#include <wait_q.h>
|
||||||
|
|
||||||
static inline void handle_expired_nano_timeouts(int32_t ticks)
|
static inline void handle_expired_nano_timeouts(int32_t ticks)
|
||||||
|
@ -168,27 +168,6 @@ static inline void handle_expired_nano_timeouts(int32_t ticks)
|
||||||
#define handle_expired_nano_timeouts(ticks) do { } while ((0))
|
#define handle_expired_nano_timeouts(ticks) do { } while ((0))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* handle the expired nano timers in the nano timers queue */
|
|
||||||
#ifdef CONFIG_NANO_TIMERS
|
|
||||||
#include <sys_clock.h>
|
|
||||||
static inline void handle_expired_nano_timers(int ticks)
|
|
||||||
{
|
|
||||||
if (_nano_timer_list) {
|
|
||||||
_nano_timer_list->ticks -= ticks;
|
|
||||||
|
|
||||||
while (_nano_timer_list && (!_nano_timer_list->ticks)) {
|
|
||||||
struct nano_timer *expired = _nano_timer_list;
|
|
||||||
struct nano_lifo *lifo = &expired->lifo;
|
|
||||||
|
|
||||||
_nano_timer_list = expired->link;
|
|
||||||
nano_isr_lifo_put(lifo, expired->userData);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define handle_expired_nano_timers(ticks) do { } while ((0))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @brief Announce a tick to the nanokernel
|
* @brief Announce a tick to the nanokernel
|
||||||
|
@ -206,29 +185,14 @@ void _nano_sys_clock_tick_announce(int32_t ticks)
|
||||||
key = irq_lock();
|
key = irq_lock();
|
||||||
_sys_clock_tick_count += ticks;
|
_sys_clock_tick_count += ticks;
|
||||||
handle_expired_nano_timeouts(ticks);
|
handle_expired_nano_timeouts(ticks);
|
||||||
handle_expired_nano_timers(ticks);
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get closest nano timers deadline expiry, (uint32_t)TICKS_UNLIMITED if none */
|
|
||||||
#ifdef CONFIG_NANO_TIMERS
|
|
||||||
static inline uint32_t _nano_get_earliest_timers_deadline(void)
|
|
||||||
{
|
|
||||||
return _nano_timer_list ? _nano_timer_list->ticks : TICKS_UNLIMITED;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline uint32_t _nano_get_earliest_timers_deadline(void)
|
|
||||||
{
|
|
||||||
return TICKS_UNLIMITED;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED
|
* Get closest nano timeouts/timers deadline expiry, (uint32_t)TICKS_UNLIMITED
|
||||||
* if none.
|
* if none.
|
||||||
*/
|
*/
|
||||||
uint32_t _nano_get_earliest_deadline(void)
|
uint32_t _nano_get_earliest_deadline(void)
|
||||||
{
|
{
|
||||||
return min(_nano_get_earliest_timeouts_deadline(),
|
return _nano_get_earliest_timeouts_deadline();
|
||||||
_nano_get_earliest_timers_deadline());
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/* nano_timer.c - timer for nanokernel-only systems */
|
/* nano_timer.c - timer for nanokernel-only systems */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997-2014 Wind River Systems, Inc.
|
* Copyright (c) 1997-2016 Wind River Systems, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
|
@ -18,14 +18,24 @@
|
||||||
|
|
||||||
#include <nano_private.h>
|
#include <nano_private.h>
|
||||||
#include <misc/debug/object_tracing_common.h>
|
#include <misc/debug/object_tracing_common.h>
|
||||||
|
#include <wait_q.h>
|
||||||
struct nano_timer *_nano_timer_list;
|
|
||||||
|
|
||||||
|
|
||||||
void nano_timer_init(struct nano_timer *timer, void *data)
|
void nano_timer_init(struct nano_timer *timer, void *data)
|
||||||
{
|
{
|
||||||
nano_lifo_init(&timer->lifo);
|
/* initialize timer in expired state */
|
||||||
timer->userData = data;
|
timer->timeout_data.delta_ticks_from_prev = -1;
|
||||||
|
|
||||||
|
/* initialize to no object to wait on */
|
||||||
|
timer->timeout_data.wait_q = NULL;
|
||||||
|
|
||||||
|
/* initialize to no fiber waiting for the timer expire */
|
||||||
|
timer->timeout_data.tcs = NULL;
|
||||||
|
|
||||||
|
/* nano_timer_test() returns NULL on timer that was not started */
|
||||||
|
timer->user_data = NULL;
|
||||||
|
|
||||||
|
timer->user_data_backup = data;
|
||||||
|
|
||||||
SYS_TRACING_OBJ_INIT(nano_timer, timer);
|
SYS_TRACING_OBJ_INIT(nano_timer, timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,76 +59,16 @@ FUNC_ALIAS(_timer_start, nano_timer_start, void);
|
||||||
*/
|
*/
|
||||||
void _timer_start(struct nano_timer *timer, int ticks)
|
void _timer_start(struct nano_timer *timer, int ticks)
|
||||||
{
|
{
|
||||||
unsigned int imask;
|
int key = irq_lock();
|
||||||
struct nano_timer *cur;
|
|
||||||
struct nano_timer *prev = NULL;
|
|
||||||
|
|
||||||
timer->ticks = ticks;
|
/*
|
||||||
|
* Once timer is started nano_timer_test() returns
|
||||||
imask = irq_lock();
|
* the pointer to user data
|
||||||
|
|
||||||
cur = _nano_timer_list;
|
|
||||||
|
|
||||||
while (cur && (timer->ticks > cur->ticks)) {
|
|
||||||
timer->ticks -= cur->ticks;
|
|
||||||
prev = cur;
|
|
||||||
cur = cur->link;
|
|
||||||
}
|
|
||||||
|
|
||||||
timer->link = cur;
|
|
||||||
if (cur != NULL)
|
|
||||||
cur->ticks -= timer->ticks;
|
|
||||||
|
|
||||||
if (prev != NULL)
|
|
||||||
prev->link = timer;
|
|
||||||
else
|
|
||||||
_nano_timer_list = timer;
|
|
||||||
|
|
||||||
irq_unlock(imask);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief Stop a nanokernel timer (generic implementation)
|
|
||||||
*
|
|
||||||
* This function stops a previously started nanokernel timer object.
|
|
||||||
* @param timer Timer to stop
|
|
||||||
* @return N/A
|
|
||||||
*/
|
*/
|
||||||
static void _timer_stop(struct nano_timer *timer)
|
timer->user_data = timer->user_data_backup;
|
||||||
{
|
_nano_timer_timeout_add(&timer->timeout_data,
|
||||||
unsigned int imask;
|
NULL, ticks);
|
||||||
struct nano_timer *cur;
|
irq_unlock(key);
|
||||||
struct nano_timer *prev = NULL;
|
|
||||||
|
|
||||||
imask = irq_lock();
|
|
||||||
|
|
||||||
cur = _nano_timer_list;
|
|
||||||
|
|
||||||
/* find prev */
|
|
||||||
while (cur && cur != timer) {
|
|
||||||
prev = cur;
|
|
||||||
cur = cur->link;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if found it, remove it */
|
|
||||||
if (cur) {
|
|
||||||
/* if it was first */
|
|
||||||
if (prev == NULL) {
|
|
||||||
_nano_timer_list = timer->link;
|
|
||||||
/* if not last */
|
|
||||||
if (_nano_timer_list)
|
|
||||||
_nano_timer_list->ticks += timer->ticks;
|
|
||||||
} else {
|
|
||||||
prev->link = timer->link;
|
|
||||||
/* if not last */
|
|
||||||
if (prev->link)
|
|
||||||
prev->link->ticks += timer->ticks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* now the timer can't expire since it is removed from the list */
|
|
||||||
|
|
||||||
irq_unlock(imask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -126,24 +76,47 @@ FUNC_ALIAS(_timer_stop_non_preemptible, nano_isr_timer_stop, void);
|
||||||
FUNC_ALIAS(_timer_stop_non_preemptible, nano_fiber_timer_stop, void);
|
FUNC_ALIAS(_timer_stop_non_preemptible, nano_fiber_timer_stop, void);
|
||||||
void _timer_stop_non_preemptible(struct nano_timer *timer)
|
void _timer_stop_non_preemptible(struct nano_timer *timer)
|
||||||
{
|
{
|
||||||
extern void _lifo_put_non_preemptible(struct nano_lifo *lifo, void *data);
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
struct tcs *tcs = t->tcs;
|
||||||
|
int key = irq_lock();
|
||||||
|
|
||||||
_timer_stop(timer);
|
/*
|
||||||
|
* Verify first if fiber is not waiting on an object,
|
||||||
/* if there was a waiter, kick it */
|
* timer is not expired and there is a fiber waiting
|
||||||
if (timer->lifo.wait_q.head) {
|
* on it
|
||||||
_lifo_put_non_preemptible(&timer->lifo, (void *)0);
|
*/
|
||||||
|
if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) &&
|
||||||
|
tcs != NULL) {
|
||||||
|
_nano_fiber_ready(tcs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* After timer gets aborted nano_timer_test() should
|
||||||
|
* return NULL until timer gets restarted
|
||||||
|
*/
|
||||||
|
timer->user_data = NULL;
|
||||||
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
void nano_task_timer_stop(struct nano_timer *timer)
|
void nano_task_timer_stop(struct nano_timer *timer)
|
||||||
{
|
{
|
||||||
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
struct tcs *tcs = t->tcs;
|
||||||
|
int key = irq_lock();
|
||||||
|
|
||||||
_timer_stop(timer);
|
timer->user_data = NULL;
|
||||||
|
|
||||||
/* if there was a waiter, kick it */
|
/*
|
||||||
if (timer->lifo.wait_q.head) {
|
* Verify first if fiber is not waiting on an object,
|
||||||
nano_task_lifo_put(&timer->lifo, (void *)0);
|
* timer is not expired and there is a fiber waiting
|
||||||
|
* on it
|
||||||
|
*/
|
||||||
|
if (!t->wait_q && (_nano_timer_timeout_abort(t) == 0) &&
|
||||||
|
tcs != NULL) {
|
||||||
|
_nano_fiber_ready(tcs);
|
||||||
|
_Swap(key);
|
||||||
|
} else {
|
||||||
|
irq_unlock(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,3 +130,131 @@ void nano_timer_stop(struct nano_timer *timer)
|
||||||
|
|
||||||
func[sys_execution_context_type_get()](timer);
|
func[sys_execution_context_type_get()](timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @brief Test nano timer for cases when the calling thread does not wait
|
||||||
|
*
|
||||||
|
* @param timer Timer to check
|
||||||
|
* @param timeout_in_ticks Determines the action to take when the timer has
|
||||||
|
* not expired.
|
||||||
|
* For TICKS_NONE, return immediately.
|
||||||
|
* For TICKS_UNLIMITED, wait as long as necessary.
|
||||||
|
* @param user_data_ptr Pointer to user data if the timer is expired
|
||||||
|
* it's set to timer->user_data. Otherwise it's set to NULL
|
||||||
|
*
|
||||||
|
* @return 1 if the thread waits for timer to expire and 0 otherwise
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int _nano_timer_expire_wait(struct nano_timer *timer,
|
||||||
|
int32_t timeout_in_ticks,
|
||||||
|
void **user_data_ptr)
|
||||||
|
{
|
||||||
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
|
||||||
|
/* check if the timer has expired */
|
||||||
|
if (t->delta_ticks_from_prev == -1) {
|
||||||
|
*user_data_ptr = timer->user_data;
|
||||||
|
timer->user_data = NULL;
|
||||||
|
/* if the thread should not wait, return immediately */
|
||||||
|
} else if (timeout_in_ticks == TICKS_NONE) {
|
||||||
|
*user_data_ptr = NULL;
|
||||||
|
} else {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *nano_isr_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
||||||
|
{
|
||||||
|
int key = irq_lock();
|
||||||
|
void *user_data;
|
||||||
|
|
||||||
|
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
|
||||||
|
/* since ISR can not wait, return NULL */
|
||||||
|
user_data = NULL;
|
||||||
|
}
|
||||||
|
irq_unlock(key);
|
||||||
|
return user_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *nano_fiber_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
||||||
|
{
|
||||||
|
int key = irq_lock();
|
||||||
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
void *user_data;
|
||||||
|
|
||||||
|
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
|
||||||
|
t->tcs = _nanokernel.current;
|
||||||
|
_Swap(key);
|
||||||
|
key = irq_lock();
|
||||||
|
user_data = timer->user_data;
|
||||||
|
timer->user_data = NULL;
|
||||||
|
}
|
||||||
|
irq_unlock(key);
|
||||||
|
return user_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *nano_task_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
||||||
|
{
|
||||||
|
int key = irq_lock();
|
||||||
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
void *user_data;
|
||||||
|
|
||||||
|
if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
|
||||||
|
/* task goes to busy waiting loop */
|
||||||
|
while (t->delta_ticks_from_prev != -1) {
|
||||||
|
_nanokernel.task_timeout =
|
||||||
|
nano_timer_ticks_remain(timer);
|
||||||
|
nano_cpu_atomic_idle(key);
|
||||||
|
key = irq_lock();
|
||||||
|
}
|
||||||
|
user_data = timer->user_data;
|
||||||
|
timer->user_data = NULL;
|
||||||
|
}
|
||||||
|
irq_unlock(key);
|
||||||
|
return user_data;
|
||||||
|
}
|
||||||
|
|
||||||
|
void *nano_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
|
||||||
|
{
|
||||||
|
static void *(*func[3])(struct nano_timer *, int32_t) = {
|
||||||
|
nano_isr_timer_test,
|
||||||
|
nano_fiber_timer_test,
|
||||||
|
nano_task_timer_test,
|
||||||
|
};
|
||||||
|
|
||||||
|
return func[sys_execution_context_type_get()](timer, timeout_in_ticks);
|
||||||
|
}
|
||||||
|
|
||||||
|
int32_t nano_timer_ticks_remain(struct nano_timer *timer)
|
||||||
|
{
|
||||||
|
int key = irq_lock();
|
||||||
|
int32_t remaining_ticks;
|
||||||
|
struct _nano_timeout *t = &timer->timeout_data;
|
||||||
|
sys_dlist_t *timeout_q = &_nanokernel.timeout_q;
|
||||||
|
struct _nano_timeout *iterator;
|
||||||
|
|
||||||
|
if (t->delta_ticks_from_prev == -1) {
|
||||||
|
remaining_ticks = 0;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* As nanokernel timeouts are stored in a linked list with
|
||||||
|
* delta_ticks_from_prev, to get the actual number of ticks
|
||||||
|
* remaining for the timer, walk through the timeouts list
|
||||||
|
* and accumulate all the delta_ticks_from_prev values up to
|
||||||
|
* the timer.
|
||||||
|
*/
|
||||||
|
iterator =
|
||||||
|
(struct _nano_timeout *)sys_dlist_peek_head(timeout_q);
|
||||||
|
remaining_ticks = iterator->delta_ticks_from_prev;
|
||||||
|
while (iterator != t) {
|
||||||
|
iterator = (struct _nano_timeout *)sys_dlist_peek_next(
|
||||||
|
timeout_q, &iterator->node);
|
||||||
|
remaining_ticks += iterator->delta_ticks_from_prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
irq_unlock(key);
|
||||||
|
return remaining_ticks;
|
||||||
|
}
|
||||||
|
|
|
@ -51,10 +51,17 @@
|
||||||
#define DEBUG DEBUG_NONE
|
#define DEBUG DEBUG_NONE
|
||||||
#include "contiki/ip/uip-debug.h"
|
#include "contiki/ip/uip-debug.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Code indicates that the timer has expired.
|
||||||
|
* nano_timer_test(), called by timer_expired() returns it
|
||||||
|
* on timer expiration instead of NULL if timer is still running
|
||||||
|
*/
|
||||||
|
#define TIMER_EXPIRED_CODE ((void *)0xfede0123)
|
||||||
|
|
||||||
static inline void do_init(struct timer *t)
|
static inline void do_init(struct timer *t)
|
||||||
{
|
{
|
||||||
if (t && !t->init_done) {
|
if (t && !t->init_done) {
|
||||||
nano_timer_init(&t->nano_timer, NULL);
|
nano_timer_init(&t->nano_timer, TIMER_EXPIRED_CODE);
|
||||||
t->init_done = true;
|
t->init_done = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -174,7 +181,7 @@ timer_expired(struct timer *t)
|
||||||
{
|
{
|
||||||
do_init(t);
|
do_init(t);
|
||||||
|
|
||||||
return t->nano_timer.ticks == 0;
|
return (nano_timer_test(&t->nano_timer, TICKS_NONE) != NULL);
|
||||||
}
|
}
|
||||||
/*---------------------------------------------------------------------------*/
|
/*---------------------------------------------------------------------------*/
|
||||||
bool timer_is_triggered(struct timer *t)
|
bool timer_is_triggered(struct timer *t)
|
||||||
|
@ -200,7 +207,7 @@ void timer_set_triggered(struct timer *t)
|
||||||
clock_time_t
|
clock_time_t
|
||||||
timer_remaining(struct timer *t)
|
timer_remaining(struct timer *t)
|
||||||
{
|
{
|
||||||
return t->nano_timer.ticks;
|
return nano_timer_ticks_remain(&t->nano_timer);
|
||||||
}
|
}
|
||||||
/*---------------------------------------------------------------------------*/
|
/*---------------------------------------------------------------------------*/
|
||||||
bool timer_stop(struct timer *t)
|
bool timer_stop(struct timer *t)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue