kernel/timeout: Add absolute timeout APIs
Add support for "absolute" timeouts, which are expressed relative to system uptime instead of deltas from current time. These allow for more race-resistant code to be written by allowing application code to do a single timeout computation, once, and then reuse the timeout value even if the thread wakes up and needs to suspend again later. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
cfeb07eded
commit
4c7b77a716
6 changed files with 106 additions and 8 deletions
|
@ -1606,6 +1606,32 @@ const char *k_thread_state_str(k_tid_t thread_id);
|
|||
*/
|
||||
#define K_FOREVER Z_FOREVER
|
||||
|
||||
/**
|
||||
* @brief Generates an absolute/uptime timeout value in ticks
|
||||
*
|
||||
* This macro generates a timeout delay that represents an expiration
|
||||
* at the absolute uptime value specified, in ticks. That is, the
|
||||
* timeout will expire immediately after the system uptime reaches the
|
||||
* specified tick count.
|
||||
*
|
||||
* @param t Tick uptime value
|
||||
* @return Timeout delay value
|
||||
*/
|
||||
#define K_TIMEOUT_ABS_TICKS(t) Z_TIMEOUT_TICKS(Z_TICK_ABS(MAX(t, 0)))
|
||||
|
||||
/**
|
||||
* @brief Generates an absolute/uptime timeout value in ms
|
||||
*
|
||||
* This macro generates a timeout delay that represents an expiration
|
||||
* at the absolute uptime value specified, in milliseconds. That is,
|
||||
* the timeout will expire immediately after the system uptime reaches
|
||||
* the specified tick count.
|
||||
*
|
||||
* @param t Millisecond uptime value
|
||||
* @return Timeout delay value
|
||||
*/
|
||||
#define K_TIMEOUT_ABS_MS(t) K_TIMEOUT_ABS_TICKS(k_ms_to_ticks_ceil64(t))
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
|
|
@ -77,6 +77,16 @@ typedef struct {
|
|||
#define Z_TIMEOUT_NS(t) Z_TIMEOUT_TICKS(k_ns_to_ticks_ceil32(MAX(t, 0)))
|
||||
#define Z_TIMEOUT_CYC(t) Z_TIMEOUT_TICKS(k_cyc_to_ticks_ceil32(MAX(t, 0)))
|
||||
|
||||
/* Converts between absolute timeout expiration values (packed into
|
||||
* the negative space below K_TICKS_FOREVER) and (non-negative) delta
|
||||
* timeout values. If the result of Z_TICK_ABS(t) is >= 0, then the
|
||||
* value was an absolute timeout with the returend expiration time.
|
||||
* Note that this macro is bidirectional: Z_TICK_ABS(Z_TICK_ABS(t)) ==
|
||||
* t for all inputs, and that the representation of K_TICKS_FOREVER is
|
||||
* the same value in both spaces! Clever, huh?
|
||||
*/
|
||||
#define Z_TICK_ABS(t) (K_TICKS_FOREVER - 1 - (t))
|
||||
|
||||
#else
|
||||
|
||||
/* Legacy timeout API */
|
||||
|
|
|
@ -587,7 +587,9 @@ config TIMEOUT_64BIT
|
|||
When this option is true, the k_ticks_t values passed to
|
||||
kernel APIs will be a 64 bit quantity, allowing the use of
|
||||
larger values (and higher precision tick rates) without fear
|
||||
of overflowing the 32 bit word.
|
||||
of overflowing the 32 bit word. This feature also gates the
|
||||
availability of absolute timeout values (which require the
|
||||
extra precision).
|
||||
|
||||
config XIP
|
||||
bool "Execute in place"
|
||||
|
|
|
@ -90,6 +90,10 @@ void z_add_timeout(struct _timeout *to, _timeout_func_t fn,
|
|||
k_ticks_t ticks = timeout;
|
||||
#else
|
||||
k_ticks_t ticks = timeout.ticks + 1;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(ticks) >= 0) {
|
||||
ticks = Z_TICK_ABS(ticks) - (curr_tick + elapsed());
|
||||
}
|
||||
#endif
|
||||
|
||||
__ASSERT(!sys_dnode_is_linked(&to->node), "");
|
||||
|
@ -257,7 +261,9 @@ static inline s64_t z_vrfy_k_uptime_get(void)
|
|||
#endif
|
||||
|
||||
/* Returns the uptime expiration (relative to an unlocked "now"!) of a
|
||||
* timeout object.
|
||||
* timeout object. When used correctly, this should be called once,
|
||||
* synchronously with the user passing a new timeout value. It should
|
||||
* not be used iteratively to adjust a timeout.
|
||||
*/
|
||||
u64_t z_timeout_end_calc(k_timeout_t timeout)
|
||||
{
|
||||
|
@ -273,6 +279,10 @@ u64_t z_timeout_end_calc(k_timeout_t timeout)
|
|||
dt = k_ms_to_ticks_ceil32(timeout);
|
||||
#else
|
||||
dt = timeout.ticks;
|
||||
|
||||
if (IS_ENABLED(CONFIG_TIMEOUT_64BIT) && Z_TICK_ABS(dt) >= 0) {
|
||||
return Z_TICK_ABS(dt);
|
||||
}
|
||||
#endif
|
||||
return z_tick_get() + MAX(1, dt);
|
||||
}
|
||||
|
|
|
@ -127,7 +127,9 @@ void z_impl_k_timer_start(struct k_timer *timer, k_timeout_t duration,
|
|||
* timer_api test relies on this behavior.
|
||||
*/
|
||||
period.ticks = MAX(period.ticks - 1, 0);
|
||||
duration.ticks = MAX(duration.ticks - 1, 0);
|
||||
if (Z_TICK_ABS(duration.ticks) < 0) {
|
||||
duration.ticks = MAX(duration.ticks - 1, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
(void)z_abort_timeout(&timer->timeout);
|
||||
|
|
|
@ -518,14 +518,21 @@ void test_timer_user_data(void)
|
|||
* k_timer_remaining_get()
|
||||
*/
|
||||
|
||||
void test_timer_remaining_get(void)
|
||||
void test_timer_remaining(void)
|
||||
{
|
||||
u32_t remaining;
|
||||
u32_t dur_ticks = k_ms_to_ticks_ceil32(DURATION);
|
||||
u32_t rem_ms, rem_ticks, exp_ticks;
|
||||
u64_t now;
|
||||
|
||||
k_usleep(1); /* align to tick */
|
||||
|
||||
init_timer_data();
|
||||
k_timer_start(&remain_timer, K_MSEC(DURATION), K_NO_WAIT);
|
||||
busy_wait_ms(DURATION / 2);
|
||||
remaining = k_timer_remaining_get(&remain_timer);
|
||||
now = k_uptime_ticks();
|
||||
rem_ms = k_timer_remaining_get(&remain_timer);
|
||||
rem_ticks = k_timer_remaining_ticks(&remain_timer);
|
||||
exp_ticks = k_timer_expires_ticks(&remain_timer);
|
||||
k_timer_stop(&remain_timer);
|
||||
|
||||
/*
|
||||
|
@ -534,7 +541,47 @@ void test_timer_remaining_get(void)
|
|||
* the value obtained through k_timer_remaining_get() could be larger
|
||||
* than actual remaining time with maximum error equal to one tick.
|
||||
*/
|
||||
zassert_true(remaining <= (DURATION / 2) + k_ticks_to_ms_floor64(1), NULL);
|
||||
zassert_true(rem_ms <= (DURATION / 2) + k_ticks_to_ms_floor64(1),
|
||||
NULL);
|
||||
|
||||
zassert_true(rem_ticks <= dur_ticks / 2, NULL);
|
||||
zassert_true((exp_ticks - now) <= dur_ticks / 2, NULL);
|
||||
}
|
||||
|
||||
void test_timeout_abs(void)
|
||||
{
|
||||
#ifdef CONFIG_TIMEOUT_64BIT
|
||||
const int expiration = 10000000; /* 10M ticks */
|
||||
k_timeout_t t = K_TIMEOUT_ABS_TICKS(10000000), t2;
|
||||
|
||||
/* Check the other generator macros to make sure they produce
|
||||
* the same (whiteboxed) converted values
|
||||
*/
|
||||
t2 = K_TIMEOUT_ABS_MS(k_ticks_to_ms_ceil64(expiration));
|
||||
zassert_true(t2.ticks == t.ticks, NULL);
|
||||
|
||||
t2 = K_TIMEOUT_ABS_US(k_ticks_to_us_ceil64(expiration));
|
||||
zassert_true(t2.ticks == t.ticks, NULL);
|
||||
|
||||
t2 = K_TIMEOUT_ABS_NS(k_ticks_to_ns_ceil64(expiration));
|
||||
zassert_true(t2.ticks == t.ticks, NULL);
|
||||
|
||||
t2 = K_TIMEOUT_ABS_CYC(k_ticks_to_cyc_ceil64(expiration));
|
||||
zassert_true(t2.ticks == t.ticks, NULL);
|
||||
|
||||
/* Now set the timeout and make sure the expiration time is
|
||||
* correct vs. current time. Tick units and tick alignment
|
||||
* makes this math exact: remember to add one to match the
|
||||
* convention (i.e. a timer of "1 tick" will expire at "now
|
||||
* plus 2 ticks", because "now plus one" will always be
|
||||
* somewhat less than a tick).
|
||||
*/
|
||||
k_usleep(1); /* align to tick */
|
||||
k_timer_start(&remain_timer, t, K_FOREVER);
|
||||
zassert_true(k_timer_remaining_ticks(&remain_timer)
|
||||
+ k_uptime_ticks() + 1 == expiration, NULL);
|
||||
k_timer_stop(&remain_timer);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
|
||||
|
@ -570,6 +617,7 @@ void test_main(void)
|
|||
ztest_user_unit_test(test_timer_status_sync),
|
||||
ztest_user_unit_test(test_timer_k_define),
|
||||
ztest_user_unit_test(test_timer_user_data),
|
||||
ztest_user_unit_test(test_timer_remaining_get));
|
||||
ztest_user_unit_test(test_timer_remaining),
|
||||
ztest_user_unit_test(test_timeout_abs));
|
||||
ztest_run_test_suite(timer_api);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue