drivers: timer: extend nrf_rtc_timer

This commit introduces the following changes:

* nrf_rtc_timer is extended with a capability to handle RTC overflow,
allowing it to operate on absolute RTC ticks, rather than relative
ticks.
* overflow handling is ZLI-proof and relies on the sys clock
handler being executed twice every RTC counter's overflow.
* callbacks are given an absolute RTC tick value as a parameter instead
of CC register's value. The absolute RTC tick value is the RTC counter
value set during CC channel configuration extended to 64 bits.
* in case the timer's target time is in the past or is the current tick,
the timer fires as soon as possible, however still from the RTC's ISR
context.
* in case an active timer is set again with the same target time, it is
not scheduled again - only its event data is updated. Otherwise, the
timer is scheduled as usual.
* a scheduled timer can be aborted.
* system clock functions are now using 64 bit values internally.

Signed-off-by: Andrzej Kuroś <andrzej.kuros@nordicsemi.no>
Signed-off-by: Jedrzej Ciupis <jedrzej.ciupis@nordicsemi.no>
Signed-off-by: Krzysztof Chruscinski <krzysztof.chruscinski@nordicsemi.no>
Signed-off-by: Paweł Kwiek <pawel.kwiek@nordicsemi.no>
This commit is contained in:
Jedrzej Ciupis 2021-09-30 15:07:40 +02:00 committed by Carles Cufí
commit fcda8699cb
4 changed files with 474 additions and 136 deletions

View file

@ -13,8 +13,6 @@
#include <drivers/timer/nrf_rtc_timer.h>
#include <sys_clock.h>
#include <hal/nrf_rtc.h>
#include <spinlock.h>
#define EXT_CHAN_COUNT CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT
#define CHAN_COUNT (EXT_CHAN_COUNT + 1)
@ -26,7 +24,8 @@
BUILD_ASSERT(CHAN_COUNT <= RTC_CH_COUNT, "Not enough compare channels");
#define COUNTER_SPAN BIT(24)
#define COUNTER_BIT_WIDTH 24U
#define COUNTER_SPAN BIT(COUNTER_BIT_WIDTH)
#define COUNTER_MAX (COUNTER_SPAN - 1U)
#define COUNTER_HALF_SPAN (COUNTER_SPAN / 2U)
#define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() \
@ -34,18 +33,25 @@ BUILD_ASSERT(CHAN_COUNT <= RTC_CH_COUNT, "Not enough compare channels");
#define MAX_TICKS ((COUNTER_HALF_SPAN - CYC_PER_TICK) / CYC_PER_TICK)
#define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
static struct k_spinlock lock;
#define OVERFLOW_RISK_RANGE_END (COUNTER_SPAN / 16)
#define ANCHOR_RANGE_START (COUNTER_SPAN / 8)
#define ANCHOR_RANGE_END (7 * COUNTER_SPAN / 8)
#define TARGET_TIME_INVALID (UINT64_MAX)
static uint32_t last_count;
static volatile uint32_t overflow_cnt;
static volatile uint64_t anchor;
static uint64_t last_count;
struct z_nrf_rtc_timer_chan_data {
z_nrf_rtc_timer_compare_handler_t callback;
void *user_context;
volatile uint64_t target_time;
};
static struct z_nrf_rtc_timer_chan_data cc_data[CHAN_COUNT];
static atomic_t int_mask;
static atomic_t alloc_mask;
static atomic_t force_isr_mask;
static uint32_t counter_sub(uint32_t a, uint32_t b)
{
@ -82,9 +88,33 @@ static uint32_t counter(void)
return nrf_rtc_counter_get(RTC);
}
uint32_t z_nrf_rtc_timer_read(void)
static uint32_t absolute_time_to_cc(uint64_t absolute_time)
{
return nrf_rtc_counter_get(RTC);
/* 24 least significant bits represent target CC value */
return absolute_time & COUNTER_MAX;
}
static uint32_t full_int_lock(void)
{
uint32_t mcu_critical_state;
if (IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS)) {
mcu_critical_state = __get_PRIMASK();
__disable_irq();
} else {
mcu_critical_state = irq_lock();
}
return mcu_critical_state;
}
static void full_int_unlock(uint32_t mcu_critical_state)
{
if (IS_ENABLED(CONFIG_ZERO_LATENCY_IRQS)) {
__set_PRIMASK(mcu_critical_state);
} else {
irq_unlock(mcu_critical_state);
}
}
uint32_t z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)
@ -93,25 +123,42 @@ uint32_t z_nrf_rtc_timer_compare_evt_address_get(int32_t chan)
return nrf_rtc_event_address_get(RTC, nrf_rtc_compare_event_get(chan));
}
bool z_nrf_rtc_timer_compare_int_lock(int32_t chan)
static bool compare_int_lock(int32_t chan)
{
__ASSERT_NO_MSG(chan && chan < CHAN_COUNT);
atomic_val_t prev = atomic_and(&int_mask, ~BIT(chan));
nrf_rtc_int_disable(RTC, RTC_CHANNEL_INT_MASK(chan));
__DMB();
__ISB();
return prev & BIT(chan);
}
bool z_nrf_rtc_timer_compare_int_lock(int32_t chan)
{
__ASSERT_NO_MSG(chan && chan < CHAN_COUNT);
return compare_int_lock(chan);
}
static void compare_int_unlock(int32_t chan, bool key)
{
if (key) {
atomic_or(&int_mask, BIT(chan));
nrf_rtc_int_enable(RTC, RTC_CHANNEL_INT_MASK(chan));
if (atomic_get(&force_isr_mask) & BIT(chan)) {
NVIC_SetPendingIRQ(RTC_IRQn);
}
}
}
void z_nrf_rtc_timer_compare_int_unlock(int32_t chan, bool key)
{
__ASSERT_NO_MSG(chan && chan < CHAN_COUNT);
if (key) {
atomic_or(&int_mask, BIT(chan));
nrf_rtc_int_enable(RTC, RTC_CHANNEL_INT_MASK(chan));
}
compare_int_unlock(chan, key);
}
uint32_t z_nrf_rtc_timer_compare_read(int32_t chan)
@ -121,41 +168,48 @@ uint32_t z_nrf_rtc_timer_compare_read(int32_t chan)
return nrf_rtc_cc_get(RTC, chan);
}
int z_nrf_rtc_timer_get_ticks(k_timeout_t t)
uint64_t z_nrf_rtc_timer_get_ticks(k_timeout_t t)
{
uint32_t curr_count;
uint64_t curr_time;
int64_t curr_tick;
int64_t result;
int64_t abs_ticks;
do {
curr_count = counter();
curr_time = z_nrf_rtc_timer_read();
curr_tick = sys_clock_tick_get();
} while (curr_count != counter());
} while (curr_time != z_nrf_rtc_timer_read());
abs_ticks = Z_TICK_ABS(t.ticks);
if (abs_ticks < 0) {
/* relative timeout */
return (t.ticks > COUNTER_HALF_SPAN) ?
-EINVAL : ((curr_count + t.ticks) & COUNTER_MAX);
return (t.ticks > COUNTER_SPAN) ?
-EINVAL : (curr_time + t.ticks);
}
/* absolute timeout */
result = abs_ticks - curr_tick;
if ((result > COUNTER_HALF_SPAN) ||
(result < -(int64_t)COUNTER_HALF_SPAN)) {
if (result > COUNTER_SPAN) {
return -EINVAL;
}
return (curr_count + result) & COUNTER_MAX;
return curr_time + result;
}
/* Function safely sets absolute alarm. It assumes that provided value is
* less than COUNTER_HALF_SPAN from now. It detects late setting and also
* handle +1 cycle case.
/** @brief Function safely sets absolute alarm.
*
* It assumes that provided value is less than COUNTER_HALF_SPAN from now.
* It detects late setting and also handle +1 cycle case.
*
* @param[in] chan A channel for which a new CC value is to be set.
*
* @param[in] abs_val An absolute value of CC register to be set.
*
* @returns CC value that was actually set. It is equal to @p abs_val or
* shifted ahead if @p abs_val was too near in the future (+1 case).
*/
static void set_absolute_alarm(int32_t chan, uint32_t abs_val)
static uint32_t set_absolute_alarm(int32_t chan, uint32_t abs_val)
{
uint32_t now;
uint32_t now2;
@ -179,7 +233,6 @@ static void set_absolute_alarm(int32_t chan, uint32_t abs_val)
k_busy_wait(19);
}
/* If requested cc_val is in the past or next tick, set to 2
* ticks from now. RTC may not generate event if CC is set for
* 1 tick from now.
@ -202,39 +255,144 @@ static void set_absolute_alarm(int32_t chan, uint32_t abs_val)
*/
} while ((now2 != now) &&
(counter_sub(cc_val, now2 + 2) > COUNTER_HALF_SPAN));
return cc_val;
}
static void compare_set(int32_t chan, uint32_t cc_value,
static int compare_set_nolocks(int32_t chan, uint64_t target_time,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data)
{
int ret = 0;
uint32_t cc_value = absolute_time_to_cc(target_time);
uint64_t curr_time = z_nrf_rtc_timer_read();
if (curr_time < target_time) {
if (target_time - curr_time > COUNTER_SPAN) {
/* Target time is too distant. */
return -EINVAL;
}
if (target_time != cc_data[chan].target_time) {
/* Target time is valid and is different than currently set.
* Set CC value.
*/
uint32_t cc_set = set_absolute_alarm(chan, cc_value);
target_time += counter_sub(cc_set, cc_value);
}
} else {
/* Force ISR handling when exiting from critical section. */
atomic_or(&force_isr_mask, BIT(chan));
}
cc_data[chan].target_time = target_time;
cc_data[chan].callback = handler;
cc_data[chan].user_context = user_data;
set_absolute_alarm(chan, cc_value);
return ret;
}
void z_nrf_rtc_timer_compare_set(int32_t chan, uint32_t cc_value,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data)
static int compare_set(int32_t chan, uint64_t target_time,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data)
{
bool key;
key = compare_int_lock(chan);
int ret = compare_set_nolocks(chan, target_time, handler, user_data);
compare_int_unlock(chan, key);
return ret;
}
int z_nrf_rtc_timer_set(int32_t chan, uint64_t target_time,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data)
{
__ASSERT_NO_MSG(chan && chan < CHAN_COUNT);
bool key = z_nrf_rtc_timer_compare_int_lock(chan);
return compare_set(chan, target_time, handler, user_data);
}
compare_set(chan, cc_value, handler, user_data);
void z_nrf_rtc_timer_abort(int32_t chan)
{
__ASSERT_NO_MSG(chan && chan < CHAN_COUNT);
z_nrf_rtc_timer_compare_int_unlock(chan, key);
bool key = compare_int_lock(chan);
cc_data[chan].target_time = TARGET_TIME_INVALID;
event_clear(chan);
event_disable(chan);
(void)atomic_and(&force_isr_mask, ~BIT(chan));
compare_int_unlock(chan, key);
}
uint64_t z_nrf_rtc_timer_read(void)
{
uint64_t val = ((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH;
__DMB();
uint32_t cntr = counter();
val += cntr;
if (cntr < OVERFLOW_RISK_RANGE_END) {
/* `overflow_cnt` can have incorrect value due to still unhandled overflow or
* due to possibility that this code preempted overflow interrupt before final write
* of `overflow_cnt`. Update of `anchor` occurs far in time from this moment, so
* `anchor` is considered valid and stable. Because of this timing there is no risk
* of incorrect `anchor` value caused by non-atomic read of 64-bit `anchor`.
*/
if (val < anchor) {
/* Unhandled overflow, detected, let's add correction */
val += COUNTER_SPAN;
}
} else {
/* `overflow_cnt` is considered valid and stable in this range, no need to
* check validity using `anchor`
*/
}
return val;
}
static inline bool in_anchor_range(uint32_t cc_value)
{
return (cc_value >= ANCHOR_RANGE_START) && (cc_value < ANCHOR_RANGE_END);
}
static inline bool anchor_update(uint32_t cc_value)
{
/* Update anchor when far from overflow */
if (in_anchor_range(cc_value)) {
/* In this range `overflow_cnt` is considered valid and stable.
* Write of 64-bit `anchor` is non atomic. However it happens
* far in time from the moment the `anchor` is read in
* `z_nrf_rtc_timer_read`.
*/
anchor = (((uint64_t)overflow_cnt) << COUNTER_BIT_WIDTH) + cc_value;
return true;
}
return false;
}
static void sys_clock_timeout_handler(int32_t chan,
uint32_t cc_value,
uint64_t expire_time,
void *user_data)
{
uint32_t dticks = counter_sub(cc_value, last_count) / CYC_PER_TICK;
uint32_t cc_value = absolute_time_to_cc(expire_time);
uint64_t dticks = (expire_time - last_count) / CYC_PER_TICK;
last_count += dticks * CYC_PER_TICK;
bool anchor_updated = anchor_update(cc_value);
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
/* protection is not needed because we are in the RTC interrupt
* so it won't get preempted by the interrupt.
@ -244,7 +402,80 @@ static void sys_clock_timeout_handler(int32_t chan,
}
sys_clock_announce(IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
dticks : (dticks > 0));
(int32_t)dticks : (dticks > 0));
if (cc_value == get_comparator(chan)) {
/* New value was not set. Set something that can update anchor.
* If anchor was updated we can enable same CC value to trigger
* interrupt after full cycle. Else set event in anchor update
* range. Since anchor was not updated we know that it's very
* far from mid point so setting is done without any protection.
*/
if (!anchor_updated) {
set_comparator(chan, COUNTER_HALF_SPAN);
}
event_enable(chan);
}
}
static bool channel_processing_check_and_clear(int32_t chan)
{
bool result = false;
uint32_t mcu_critical_state = full_int_lock();
if (nrf_rtc_int_enable_check(RTC, RTC_CHANNEL_INT_MASK(chan))) {
/* The processing of channel can be caused by CC match
* or be forced.
*/
result = atomic_and(&force_isr_mask, ~BIT(chan)) ||
nrf_rtc_event_check(RTC, RTC_CHANNEL_EVENT_ADDR(chan));
if (result) {
event_clear(chan);
}
}
full_int_unlock(mcu_critical_state);
return result;
}
static void process_channel(int32_t chan)
{
if (channel_processing_check_and_clear(chan)) {
void *user_context;
uint32_t mcu_critical_state;
uint64_t curr_time;
uint64_t expire_time;
z_nrf_rtc_timer_compare_handler_t handler = NULL;
curr_time = z_nrf_rtc_timer_read();
/* This critical section is used to provide atomic access to
* cc_data structure and prevent higher priority contexts
* (including ZLIs) from overwriting it.
*/
mcu_critical_state = full_int_lock();
/* If target_time is in the past or is equal to current time
* value, execute the handler.
*/
expire_time = cc_data[chan].target_time;
if (curr_time >= expire_time) {
handler = cc_data[chan].callback;
user_context = cc_data[chan].user_context;
cc_data[chan].callback = NULL;
cc_data[chan].target_time = TARGET_TIME_INVALID;
event_disable(chan);
}
full_int_unlock(mcu_critical_state);
if (handler) {
handler(chan, expire_time, user_context);
}
}
}
/* Note: this function has public linkage, and MUST have this
@ -259,34 +490,14 @@ void rtc_nrf_isr(const void *arg)
{
ARG_UNUSED(arg);
if (nrf_rtc_int_enable_check(RTC, NRF_RTC_INT_OVERFLOW_MASK) &&
nrf_rtc_event_check(RTC, NRF_RTC_EVENT_OVERFLOW)) {
nrf_rtc_event_clear(RTC, NRF_RTC_EVENT_OVERFLOW);
overflow_cnt++;
}
for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
if (nrf_rtc_int_enable_check(RTC, RTC_CHANNEL_INT_MASK(chan)) &&
nrf_rtc_event_check(RTC, RTC_CHANNEL_EVENT_ADDR(chan))) {
uint32_t cc_val;
uint32_t now;
z_nrf_rtc_timer_compare_handler_t handler;
event_clear(chan);
event_disable(chan);
cc_val = get_comparator(chan);
now = counter();
/* Higher priority interrupt may already changed cc_val
* which now points to the future. In that case return
* current counter value. It is less precise than
* returning exact CC value but this one is already lost.
*/
if (counter_sub(now, cc_val) > COUNTER_HALF_SPAN) {
cc_val = now;
}
handler = cc_data[chan].callback;
cc_data[chan].callback = NULL;
if (handler) {
handler(chan, cc_val,
cc_data[chan].user_context);
}
}
process_channel(chan);
}
}
@ -312,6 +523,7 @@ void z_nrf_rtc_timer_chan_free(int32_t chan)
atomic_or(&alloc_mask, BIT(chan));
}
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
ARG_UNUSED(idle);
@ -324,7 +536,7 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
ticks = CLAMP(ticks - 1, 0, (int32_t)MAX_TICKS);
uint32_t unannounced = counter_sub(counter(), last_count);
uint32_t unannounced = z_nrf_rtc_timer_read() - last_count;
/* If we haven't announced for more than half the 24-bit wrap
* duration, then force an announce to avoid loss of a wrap
@ -349,8 +561,9 @@ void sys_clock_set_timeout(int32_t ticks, bool idle)
cyc = MAX_CYCLES;
}
cyc += last_count;
compare_set(0, cyc, sys_clock_timeout_handler, NULL);
uint64_t target_time = cyc + last_count;
compare_set(0, target_time, sys_clock_timeout_handler, NULL);
}
uint32_t sys_clock_elapsed(void)
@ -359,16 +572,12 @@ uint32_t sys_clock_elapsed(void)
return 0;
}
return counter_sub(counter(), last_count) / CYC_PER_TICK;
return (z_nrf_rtc_timer_read() - last_count) / CYC_PER_TICK;
}
uint32_t sys_clock_cycle_get_32(void)
{
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t ret = counter_sub(counter(), last_count) + last_count;
k_spin_unlock(&lock, key);
return ret;
return (uint32_t)z_nrf_rtc_timer_read();
}
static int sys_clock_driver_init(const struct device *dev)
@ -384,9 +593,12 @@ static int sys_clock_driver_init(const struct device *dev)
/* TODO: replace with counter driver to access RTC */
nrf_rtc_prescaler_set(RTC, 0);
for (int32_t chan = 0; chan < CHAN_COUNT; chan++) {
cc_data[chan].target_time = TARGET_TIME_INVALID;
nrf_rtc_int_enable(RTC, RTC_CHANNEL_INT_MASK(chan));
}
nrf_rtc_int_enable(RTC, NRF_RTC_INT_OVERFLOW_MASK);
NVIC_ClearPendingIRQ(RTC_IRQn);
IRQ_CONNECT(RTC_IRQn, DT_IRQ(DT_NODELABEL(RTC_LABEL), priority),
@ -401,10 +613,11 @@ static int sys_clock_driver_init(const struct device *dev)
alloc_mask = BIT_MASK(EXT_CHAN_COUNT) << 1;
}
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
compare_set(0, counter() + CYC_PER_TICK,
sys_clock_timeout_handler, NULL);
}
uint32_t initial_timeout = IS_ENABLED(CONFIG_TICKLESS_KERNEL) ?
(COUNTER_HALF_SPAN - 1) :
(counter() + CYC_PER_TICK);
compare_set(0, initial_timeout, sys_clock_timeout_handler, NULL);
z_nrf_clock_control_lf_on(mode);

View file

@ -11,8 +11,25 @@
extern "C" {
#endif
/** @brief Maximum allowed time span that is considered to be in the future.
*/
#define NRF_RTC_TIMER_MAX_SCHEDULE_SPAN BIT(23)
/** @brief RTC timer compare event handler.
*
* Called from RTC ISR context when processing a compare event.
*
* @param id Compare channel ID.
*
* @param expire_time An actual absolute expiration time set for a compare
* channel. It can differ from the requested target time
* and the difference can be used to determine whether the
* time set was delayed.
*
* @param user_data Pointer to a user context data.
*/
typedef void (*z_nrf_rtc_timer_compare_handler_t)(int32_t id,
uint32_t cc_value,
uint64_t expire_time,
void *user_data);
/** @brief Allocate RTC compare channel.
@ -30,11 +47,11 @@ int32_t z_nrf_rtc_timer_chan_alloc(void);
*/
void z_nrf_rtc_timer_chan_free(int32_t chan);
/** @brief Read current RTC counter value.
/** @brief Read current absolute time.
*
* @return Current RTC counter value.
* @return Current absolute time.
*/
uint32_t z_nrf_rtc_timer_read(void);
uint64_t z_nrf_rtc_timer_read(void);
/** @brief Get COMPARE event register address.
*
@ -76,38 +93,52 @@ uint32_t z_nrf_rtc_timer_compare_read(int32_t chan);
/** @brief Try to set compare channel to given value.
*
* Provided value is absolute and cannot be further in future than half span of
* the RTC counter. Function continouosly retries to set compare register until
* value that is written is far enough in the future and will generate an event.
* Because of that, compare register value may be different than the one
* requested. During this operation interrupt from that compare channel is
* disabled. Other interrupts are not locked during this operation.
*
* There is no option to abort the request once it is set. However, it can be
* overwritten.
* Provided value is absolute and cannot be further in the future than
* @c NRF_RTC_TIMER_MAX_SCHEDULE_SPAN. If given value is in the past then an RTC
* interrupt is triggered immediately. Otherwise function continuously retries
* to set compare register until value that is written is far enough in the
* future and will generate an event. Because of that, compare register value
* may be different than the one requested. During this operation interrupt
* from that compare channel is disabled. Other interrupts are not locked during
* this operation.
*
* @param chan Channel ID between 1 and CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT.
*
* @param cc_value Absolute value. Values which are further distanced from
* current counter value than half RTC span are considered in the past.
* @param target_time Absolute target time in ticks.
*
* @param handler User function called in the context of the RTC interrupt.
*
* @param user_data Data passed to the handler.
*
* @retval 0 if the compare channel was set successfully.
* @retval -EINVAL if provided target time was further than
* @c NRF_RTC_TIMER_MAX_SCHEDULE_SPAN ticks in the future.
*/
void z_nrf_rtc_timer_compare_set(int32_t chan, uint32_t cc_value,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data);
int z_nrf_rtc_timer_set(int32_t chan, uint64_t target_time,
z_nrf_rtc_timer_compare_handler_t handler,
void *user_data);
/** @brief Abort a timer requested with @ref z_nrf_rtc_timer_set.
*
* If an abort operation is performed too late it is still possible for an event
* to fire. The user can detect a spurious event by comparing absolute time
* provided in callback and a result of @ref z_nrf_rtc_timer_read. During this
* operation interrupt from that compare channel is disabled. Other interrupts
* are not locked during this operation.
*
* @param chan Channel ID between 1 and CONFIG_NRF_RTC_TIMER_USER_CHAN_COUNT.
*/
void z_nrf_rtc_timer_abort(int32_t chan);
/** @brief Convert system clock time to RTC ticks.
*
* @p t can be absolute or relative. @p t cannot be further from now than half
* of the RTC range (e.g. 256 seconds if RTC is running at 32768 Hz).
* @p t can be absolute or relative. @p t cannot be further into the future
* from now than the RTC range (e.g. 512 seconds if RTC is running at 32768 Hz).
*
* @retval Positive value represents @p t in RTC tick value.
* @retval -EINVAL if @p t is out of range.
*/
int z_nrf_rtc_timer_get_ticks(k_timeout_t t);
uint64_t z_nrf_rtc_timer_get_ticks(k_timeout_t t);
#ifdef __cplusplus
}

View file

@ -5,11 +5,12 @@
*/
#include <ztest.h>
#include <drivers/timer/nrf_rtc_timer.h>
#include <hal/nrf_rtc.h>
#include <hal/nrf_timer.h>
#include <irq.h>
struct test_data {
uint32_t cc_val;
uint64_t target_time;
uint32_t window;
uint32_t delay;
int err;
@ -53,18 +54,33 @@ static void stop_zli_timer0(void)
nrf_timer_task_trigger(NRF_TIMER0, NRF_TIMER_TASK_STOP);
}
static void timeout_handler(int32_t id, uint32_t cc_value, void *user_data)
static void inject_overflow(void)
{
/* Bump overflow counter by 100. */
uint32_t overflow_count = 100;
while (overflow_count--) {
nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_TRIGGER_OVERFLOW);
/* Wait for RTC counter to reach overflow from 0xFFFFF0 and
* get handled.
*/
k_busy_wait(1000);
}
}
static void timeout_handler(int32_t id, uint64_t expire_time, void *user_data)
{
struct test_data *data = user_data;
uint32_t now = z_nrf_rtc_timer_read();
uint32_t diff = (now - cc_value) & 0x00FFFFFF;
uint64_t now = z_nrf_rtc_timer_read();
uint64_t diff = (now - expire_time);
zassert_true(diff <= data->delay,
"Handler called in wrong time (%d), set cc: %d, got cc: %d",
now, data->cc_val, cc_value);
"Handler called in wrong time (%llu), set time: %llu, "
"got time: %llu",
now, data->target_time, expire_time);
if ((cc_value >= data->cc_val) &&
(cc_value <= (data->cc_val + data->window))) {
if ((expire_time >= data->target_time) &&
(expire_time <= (data->target_time + data->window))) {
data->err = 0;
}
timeout_handler_cnt++;
@ -72,15 +88,15 @@ static void timeout_handler(int32_t id, uint32_t cc_value, void *user_data)
static void test_timeout(int32_t chan, k_timeout_t t, bool ext_window)
{
int32_t cc_val = z_nrf_rtc_timer_get_ticks(t);
int64_t ticks = z_nrf_rtc_timer_get_ticks(t);
struct test_data test_data = {
.cc_val = cc_val,
.target_time = ticks,
.window = ext_window ? 100 : (Z_TICK_ABS(t.ticks) ? 0 : 32),
.delay = ext_window ? 100 : 2,
.err = -EINVAL
};
z_nrf_rtc_timer_compare_set(chan, cc_val, timeout_handler, &test_data);
z_nrf_rtc_timer_set(chan, (uint64_t)ticks, timeout_handler, &test_data);
/* wait additional arbitrary time. */
k_busy_wait(1000);
@ -130,10 +146,10 @@ static void test_z_nrf_rtc_timer_compare_evt_address_get(void)
static void test_int_disable_enabled(void)
{
uint32_t now = z_nrf_rtc_timer_read();
uint32_t t = 1000;
uint64_t now = z_nrf_rtc_timer_read();
uint64_t t = 1000;
struct test_data data = {
.cc_val = now + t,
.target_time = now + t,
.window = 1000,
.delay = 2000,
.err = -EINVAL
@ -144,7 +160,7 @@ static void test_int_disable_enabled(void)
chan = z_nrf_rtc_timer_chan_alloc();
zassert_true(chan >= 0, "Failed to allocate RTC channel.");
z_nrf_rtc_timer_compare_set(chan, data.cc_val, timeout_handler, &data);
z_nrf_rtc_timer_set(chan, data.target_time, timeout_handler, &data);
zassert_equal(data.err, -EINVAL, "Unexpected err: %d", data.err);
key = z_nrf_rtc_timer_compare_int_lock(chan);
@ -163,7 +179,7 @@ static void test_int_disable_enabled(void)
static void test_get_ticks(void)
{
k_timeout_t t = K_MSEC(1);
uint32_t exp_ticks = z_nrf_rtc_timer_read() + t.ticks;
uint64_t exp_ticks = z_nrf_rtc_timer_read() + t.ticks;
int ticks;
/* Relative 1ms from now timeout converted to RTC */
@ -186,20 +202,21 @@ static void test_get_ticks(void)
"Unexpected result %d (expected: %d)", ticks, exp_ticks);
/* too far in the future */
t = Z_TIMEOUT_TICKS(sys_clock_tick_get() + 0x00800001);
t = Z_TIMEOUT_TICKS(sys_clock_tick_get() + 0x01000001);
ticks = z_nrf_rtc_timer_get_ticks(t);
zassert_equal(ticks, -EINVAL, "Unexpected ticks: %d", ticks);
}
static void sched_handler(int32_t id, uint32_t cc_val, void *user_data)
static void sched_handler(int32_t id, uint64_t expire_time, void *user_data)
{
int64_t now = sys_clock_tick_get();
int rtc_ticks_now =
z_nrf_rtc_timer_get_ticks(Z_TIMEOUT_TICKS(Z_TICK_ABS(now)));
uint64_t *evt_uptime_us = user_data;
*evt_uptime_us = k_ticks_to_us_floor64(now - (rtc_ticks_now - cc_val));
*evt_uptime_us =
k_ticks_to_us_floor64(now - (rtc_ticks_now - expire_time));
}
static void test_absolute_scheduling(void)
@ -208,7 +225,7 @@ static void test_absolute_scheduling(void)
int64_t now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
uint64_t target_us = now_us + 5678;
uint64_t evt_uptime_us;
int rtc_ticks;
uint64_t rtc_ticks;
int32_t chan;
chan = z_nrf_rtc_timer_chan_alloc();
@ -216,10 +233,9 @@ static void test_absolute_scheduling(void)
/* schedule event in 5678us from now */
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(target_us).ticks));
rtc_ticks = z_nrf_rtc_timer_get_ticks(t);
rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
z_nrf_rtc_timer_compare_set(chan, rtc_ticks,
sched_handler, &evt_uptime_us);
z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
k_busy_wait(5678);
@ -230,10 +246,9 @@ static void test_absolute_scheduling(void)
/* schedule event now. */
now_us = k_ticks_to_us_floor64(sys_clock_tick_get());
t = Z_TIMEOUT_TICKS(Z_TICK_ABS(K_USEC(now_us).ticks));
rtc_ticks = z_nrf_rtc_timer_get_ticks(t);
rtc_ticks = (uint64_t)z_nrf_rtc_timer_get_ticks(t);
z_nrf_rtc_timer_compare_set(chan, rtc_ticks,
sched_handler, &evt_uptime_us);
z_nrf_rtc_timer_set(chan, rtc_ticks, sched_handler, &evt_uptime_us);
k_busy_wait(200);
@ -289,7 +304,7 @@ static void test_stress(void)
z_nrf_rtc_timer_chan_free(chan);
}
static void test_reseting_cc(void)
static void test_resetting_cc(void)
{
uint32_t start = k_uptime_get_32();
uint32_t test_time = 1000;
@ -302,19 +317,18 @@ static void test_reseting_cc(void)
timeout_handler_cnt = 0;
do {
uint32_t now = z_nrf_rtc_timer_read();
uint64_t now = z_nrf_rtc_timer_read();
struct test_data test_data = {
.cc_val = now + 5,
.target_time = now + 5,
.window = 0,
.delay = 0,
.err = -EINVAL
};
/* Set compare but expect that it will never expire because
/* Set timer but expect that it will never expire because
* it will be later on reset.
*/
z_nrf_rtc_timer_compare_set(chan, now + 2,
timeout_handler, &test_data);
z_nrf_rtc_timer_set(chan, now + 2, timeout_handler, &test_data);
/* Arbitrary variable delay to reset CC before expiring first
* request but very close.
@ -322,8 +336,7 @@ static void test_reseting_cc(void)
k_busy_wait(i);
i = (i + 1) % 20;
z_nrf_rtc_timer_compare_set(chan, now + 5,
timeout_handler, &test_data);
z_nrf_rtc_timer_set(chan, now + 5, timeout_handler, &test_data);
k_busy_wait((5 + 1)*31);
cnt++;
} while ((k_uptime_get_32() - start) < test_time);
@ -334,6 +347,86 @@ static void test_reseting_cc(void)
z_nrf_rtc_timer_chan_free(chan);
}
static void overflow_sched_handler(int32_t id, uint64_t expire_time,
void *user_data)
{
uint64_t now = z_nrf_rtc_timer_read();
uint64_t *evt_uptime = user_data;
*evt_uptime = now - expire_time;
}
/* This test is to be executed as the last, due to interference in overflow
* counter, resulting in nRF RTC timer ticks and kernel ticks desynchronization.
*/
static void test_overflow(void)
{
PRINT("RTC ticks before overflow injection: %u\r\n",
(uint32_t)z_nrf_rtc_timer_read());
inject_overflow();
PRINT("RTC ticks after overflow injection: %u\r\n",
(uint32_t)z_nrf_rtc_timer_read());
uint64_t now;
uint64_t target_time;
uint64_t evt_uptime;
int32_t chan;
chan = z_nrf_rtc_timer_chan_alloc();
zassert_true(chan >= 0, "Failed to allocate RTC channel.");
/* Schedule event in 5 ticks from now. */
evt_uptime = UINT64_MAX;
now = z_nrf_rtc_timer_read();
target_time = now + 5;
z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
&evt_uptime);
k_busy_wait(k_ticks_to_us_floor64(5 + 1));
PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
"event occurred at %llu ticks (uptime)\n",
now, target_time, evt_uptime);
zassert_not_equal(UINT64_MAX, evt_uptime,
"Expired timer shall overwrite evt_uptime");
/* Schedule event now. */
evt_uptime = UINT64_MAX;
now = z_nrf_rtc_timer_read();
target_time = now;
z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
&evt_uptime);
k_busy_wait(200);
zassert_not_equal(UINT64_MAX, evt_uptime,
"Expired timer shall overwrite evt_uptime");
PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
"event occurred at %llu ticks (uptime)\n",
now, target_time, evt_uptime);
/* Schedule event far in the past. */
evt_uptime = UINT64_MAX;
now = z_nrf_rtc_timer_read();
target_time = now - 2 * NRF_RTC_TIMER_MAX_SCHEDULE_SPAN;
z_nrf_rtc_timer_set(chan, target_time, overflow_sched_handler,
&evt_uptime);
k_busy_wait(200);
zassert_not_equal(UINT64_MAX, evt_uptime,
"Expired timer shall overwrite evt_uptime");
PRINT("RTC event scheduled at %llu ticks for %llu ticks,"
"event occurred at %llu ticks (uptime)\n",
now, target_time, evt_uptime);
z_nrf_rtc_timer_chan_free(chan);
}
void test_main(void)
{
init_zli_timer0();
@ -346,7 +439,8 @@ void test_main(void)
ztest_unit_test(test_absolute_scheduling),
ztest_unit_test(test_alloc_free),
ztest_unit_test(test_stress),
ztest_unit_test(test_reseting_cc)
ztest_unit_test(test_resetting_cc),
ztest_unit_test(test_overflow)
);
ztest_run_test_suite(test_nrf_rtc_timer);
}

View file

@ -184,7 +184,7 @@ manifest:
groups:
- tools
- name: nrf_hw_models
revision: 42645e87ade210c1cac201ff4b2bffb23cd6e331
revision: b8cea37dbdc8fc58cc14b4e19fa850877a9da520
path: modules/bsim_hw_models/nrf_hw_models
- name: open-amp
revision: cfd050ff38a9d028dc211690b2ec35971128e45e