kernel: add defines for delta_ticks_from_prev special values
Use _INACTIVE instead of hardcoding -1. _EXPIRED is defined as -2 and will be used for an improvement so that interrupts are not locked for a non-deterministic amount of time while handling expired timeouts. _abort_timeout/_abort_thread_timeout return _INACTIVE instead of -1 if the timeout has already been disabled. Change-Id: If99226ff316a62c27b2a2e4e874388c3c44a8aeb Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
88b3691415
commit
d211a52fc0
5 changed files with 22 additions and 16 deletions
|
@ -243,13 +243,19 @@ extern void k_thread_abort(k_tid_t thread);
|
||||||
* @cond INTERNAL_HIDDEN
|
* @cond INTERNAL_HIDDEN
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* timeout has timed out and is not on _timeout_q anymore */
|
||||||
|
#define _EXPIRED (-2)
|
||||||
|
|
||||||
|
/* timeout is not in use */
|
||||||
|
#define _INACTIVE (-1)
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
#define _THREAD_TIMEOUT_INIT(obj) \
|
#define _THREAD_TIMEOUT_INIT(obj) \
|
||||||
(obj).nano_timeout = { \
|
(obj).nano_timeout = { \
|
||||||
.node = { {0}, {0} }, \
|
.node = { {0}, {0} }, \
|
||||||
.thread = NULL, \
|
.thread = NULL, \
|
||||||
.wait_q = NULL, \
|
.wait_q = NULL, \
|
||||||
.delta_ticks_from_prev = -1, \
|
.delta_ticks_from_prev = _INACTIVE, \
|
||||||
},
|
},
|
||||||
#else
|
#else
|
||||||
#define _THREAD_TIMEOUT_INIT(obj)
|
#define _THREAD_TIMEOUT_INIT(obj)
|
||||||
|
@ -695,7 +701,7 @@ struct k_timer {
|
||||||
|
|
||||||
#define K_TIMER_INITIALIZER(obj, expiry, stop) \
|
#define K_TIMER_INITIALIZER(obj, expiry, stop) \
|
||||||
{ \
|
{ \
|
||||||
.timeout.delta_ticks_from_prev = -1, \
|
.timeout.delta_ticks_from_prev = _INACTIVE, \
|
||||||
.timeout.wait_q = NULL, \
|
.timeout.wait_q = NULL, \
|
||||||
.timeout.thread = NULL, \
|
.timeout.thread = NULL, \
|
||||||
.timeout.func = _timer_expiration_handler, \
|
.timeout.func = _timer_expiration_handler, \
|
||||||
|
|
|
@ -268,7 +268,7 @@ static inline void _mark_thread_as_not_suspended(struct k_thread *thread)
|
||||||
static inline int _is_thread_timeout_active(struct k_thread *thread)
|
static inline int _is_thread_timeout_active(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||||
return thread->base.timeout.delta_ticks_from_prev != -1;
|
return thread->base.timeout.delta_ticks_from_prev != _INACTIVE;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -38,7 +38,7 @@ static inline void _init_timeout(struct _timeout *t, _timeout_func_t func)
|
||||||
* not dealing with timeouts does not have to handle this, such as when
|
* not dealing with timeouts does not have to handle this, such as when
|
||||||
* waiting forever on a semaphore.
|
* waiting forever on a semaphore.
|
||||||
*/
|
*/
|
||||||
t->delta_ticks_from_prev = -1;
|
t->delta_ticks_from_prev = _INACTIVE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be initialized here so that the _fiber_wakeup family of APIs can
|
* Must be initialized here so that the _fiber_wakeup family of APIs can
|
||||||
|
@ -98,7 +98,7 @@ static inline struct _timeout *_handle_one_timeout(
|
||||||
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
|
struct _timeout *t = (void *)sys_dlist_get(timeout_q);
|
||||||
struct k_thread *thread = t->thread;
|
struct k_thread *thread = t->thread;
|
||||||
|
|
||||||
t->delta_ticks_from_prev = -1;
|
t->delta_ticks_from_prev = _INACTIVE;
|
||||||
|
|
||||||
K_DEBUG("timeout %p\n", t);
|
K_DEBUG("timeout %p\n", t);
|
||||||
if (thread != NULL) {
|
if (thread != NULL) {
|
||||||
|
@ -127,14 +127,13 @@ static inline void _handle_timeouts(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* returns 0 in success and -1 if the timer has expired */
|
/* returns _INACTIVE if the timer has already expired */
|
||||||
|
|
||||||
static inline int _abort_timeout(struct _timeout *t)
|
static inline int _abort_timeout(struct _timeout *t)
|
||||||
{
|
{
|
||||||
sys_dlist_t *timeout_q = &_timeout_q;
|
sys_dlist_t *timeout_q = &_timeout_q;
|
||||||
|
|
||||||
if (-1 == t->delta_ticks_from_prev) {
|
if (t->delta_ticks_from_prev == _INACTIVE) {
|
||||||
return -1;
|
return _INACTIVE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
|
if (!sys_dlist_is_tail(timeout_q, &t->node)) {
|
||||||
|
@ -144,11 +143,12 @@ static inline int _abort_timeout(struct _timeout *t)
|
||||||
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
|
next->delta_ticks_from_prev += t->delta_ticks_from_prev;
|
||||||
}
|
}
|
||||||
sys_dlist_remove(&t->node);
|
sys_dlist_remove(&t->node);
|
||||||
t->delta_ticks_from_prev = -1;
|
t->delta_ticks_from_prev = _INACTIVE;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* returns _INACTIVE if the timer has already expired */
|
||||||
static inline int _abort_thread_timeout(struct k_thread *thread)
|
static inline int _abort_thread_timeout(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
return _abort_timeout(&thread->base.timeout);
|
return _abort_timeout(&thread->base.timeout);
|
||||||
|
|
|
@ -313,7 +313,7 @@ void k_wakeup(k_tid_t thread)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_abort_thread_timeout(thread) < 0) {
|
if (_abort_thread_timeout(thread) == _INACTIVE) {
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,7 @@ void k_timer_start(struct k_timer *timer, int32_t duration, int32_t period)
|
||||||
|
|
||||||
unsigned int key = irq_lock();
|
unsigned int key = irq_lock();
|
||||||
|
|
||||||
if (timer->timeout.delta_ticks_from_prev != -1) {
|
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
|
||||||
_abort_timeout(&timer->timeout);
|
_abort_timeout(&timer->timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,11 +128,11 @@ void k_timer_stop(struct k_timer *timer)
|
||||||
__ASSERT(!_is_in_isr(), "");
|
__ASSERT(!_is_in_isr(), "");
|
||||||
|
|
||||||
int key = irq_lock();
|
int key = irq_lock();
|
||||||
int stopped = _abort_timeout(&timer->timeout);
|
int inactive = (_abort_timeout(&timer->timeout) == _INACTIVE);
|
||||||
|
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
||||||
if (stopped == -1) {
|
if (inactive) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ uint32_t k_timer_status_sync(struct k_timer *timer)
|
||||||
uint32_t result = timer->status;
|
uint32_t result = timer->status;
|
||||||
|
|
||||||
if (result == 0) {
|
if (result == 0) {
|
||||||
if (timer->timeout.delta_ticks_from_prev != -1) {
|
if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
|
||||||
/* wait for timer to expire or stop */
|
/* wait for timer to expire or stop */
|
||||||
_pend_current_thread(&timer->wait_q, K_FOREVER);
|
_pend_current_thread(&timer->wait_q, K_FOREVER);
|
||||||
_Swap(key);
|
_Swap(key);
|
||||||
|
@ -201,7 +201,7 @@ int32_t _timeout_remaining_get(struct _timeout *timeout)
|
||||||
unsigned int key = irq_lock();
|
unsigned int key = irq_lock();
|
||||||
int32_t remaining_ticks;
|
int32_t remaining_ticks;
|
||||||
|
|
||||||
if (timeout->delta_ticks_from_prev == -1) {
|
if (timeout->delta_ticks_from_prev == _INACTIVE) {
|
||||||
remaining_ticks = 0;
|
remaining_ticks = 0;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue