nanokernel: Rename nano_tick_xxx APIs
Renames the following nanokernel tick APIs. nano_tick_get() -> sys_tick_get() nano_tick_get_32() -> sys_tick_get_32() nano_tick_delta() -> sys_tick_delta() nano_tick_delta_32() -> sys_tick_delta_32() Change-Id: Ie969545335d76df94b4e2d200fef86a93596f5e8 Signed-off-by: Peter Mitsis <peter.mitsis@windriver.com>
This commit is contained in:
parent
7afd7035a2
commit
c40e84c57a
23 changed files with 106 additions and 106 deletions
|
@ -124,10 +124,10 @@ The following kernel clock APIs are provided by :file:`microkernel.h`:
|
|||
The following kernel clock APIs are provided by :file:`microkernel.h`
|
||||
and by :file:`nanokernel.h`:
|
||||
|
||||
:cpp:func:`nano_tick_get()`, :cpp:func:`nano_tick_get_32()`
|
||||
:cpp:func:`sys_tick_get()`, :cpp:func:`sys_tick_get_32()`
|
||||
Read the system clock.
|
||||
|
||||
:cpp:func:`nano_tick_delta()`, :cpp:func:`nano_tick_delta_32()`
|
||||
:cpp:func:`sys_tick_delta()`, :cpp:func:`sys_tick_delta_32()`
|
||||
Compute the elapsed time since an earlier system clock reading.
|
||||
|
||||
:cpp:func:`nano_cycle_get_32()`
|
||||
|
|
|
@ -501,9 +501,9 @@ static int i2c_dw_poll_transfer(struct device *dev,
|
|||
}
|
||||
|
||||
/* Wait for bus idle */
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (regs->ic_status.bits.activity) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
return DEV_FAIL;
|
||||
}
|
||||
}
|
||||
|
@ -524,9 +524,9 @@ static int i2c_dw_poll_transfer(struct device *dev,
|
|||
/* Transmit */
|
||||
while (dw->tx_len > 0) {
|
||||
/* Wait for space in TX FIFO */
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (!regs->ic_status.bits.tfnf) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
ret = DEV_FAIL;
|
||||
goto finish;
|
||||
}
|
||||
|
@ -536,9 +536,9 @@ static int i2c_dw_poll_transfer(struct device *dev,
|
|||
}
|
||||
|
||||
/* Wait for TX FIFO empty to be sure everything is sent. */
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (!regs->ic_status.bits.tfe) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
ret = DEV_FAIL;
|
||||
goto finish;
|
||||
}
|
||||
|
@ -558,9 +558,9 @@ do_receive:
|
|||
|
||||
while (dw->rx_len > 0) {
|
||||
/* Wait for data in RX FIFO*/
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (!regs->ic_status.bits.rfne) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
ret = DEV_FAIL;
|
||||
goto finish;
|
||||
}
|
||||
|
@ -571,9 +571,9 @@ do_receive:
|
|||
|
||||
stop_det:
|
||||
/* Wait for transfer to complete */
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (!regs->ic_raw_intr_stat.bits.stop_det) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
ret = DEV_FAIL;
|
||||
goto finish;
|
||||
}
|
||||
|
@ -581,9 +581,9 @@ stop_det:
|
|||
value = regs->ic_clr_stop_det;
|
||||
|
||||
/* Wait for bus idle */
|
||||
start_time = nano_tick_get_32();
|
||||
start_time = sys_tick_get_32();
|
||||
while (regs->ic_status.bits.activity) {
|
||||
if ((nano_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
if ((sys_tick_get_32() - start_time) > POLLING_TIMEOUT) {
|
||||
ret = DEV_FAIL;
|
||||
goto finish;
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ extern void sys_scheduler_time_slice_set(int32_t t, kpriority_t p);
|
|||
* This routine reads the processor's high precision timer. It reads the
|
||||
* counter register on the timer device. This counter register increments
|
||||
* at a relatively high rate (e.g. 20 MHz), and thus is considered a
|
||||
* "high resolution" timer. This is in contrast to nano_tick_get_32() and
|
||||
* "high resolution" timer. This is in contrast to sys_tick_get_32() and
|
||||
* task_tick_get_32() which return the value of the kernel ticks variable.
|
||||
*
|
||||
* @return current high precision clock value
|
||||
|
|
|
@ -1600,7 +1600,7 @@ extern void nano_task_timer_stop(struct nano_timer *timer);
|
|||
* @return the current system tick count
|
||||
*
|
||||
*/
|
||||
extern int64_t nano_tick_get(void);
|
||||
extern int64_t sys_tick_get(void);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -1609,7 +1609,7 @@ extern int64_t nano_tick_get(void);
|
|||
* @return the current system tick count
|
||||
*
|
||||
*/
|
||||
extern uint32_t nano_tick_get_32(void);
|
||||
extern uint32_t sys_tick_get_32(void);
|
||||
|
||||
/**
|
||||
* @brief Return a high resolution time stamp
|
||||
|
@ -1626,7 +1626,7 @@ extern uint32_t nano_cycle_get_32(void);
|
|||
*
|
||||
* @return tick count since reference time; undefined for first invocation
|
||||
*/
|
||||
extern int64_t nano_tick_delta(int64_t *reftime);
|
||||
extern int64_t sys_tick_delta(int64_t *reftime);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -1636,7 +1636,7 @@ extern int64_t nano_tick_delta(int64_t *reftime);
|
|||
*
|
||||
* @return 32-bit tick count since reference time; undefined for first invocation
|
||||
*/
|
||||
extern uint32_t nano_tick_delta_32(int64_t *reftime);
|
||||
extern uint32_t sys_tick_delta_32(int64_t *reftime);
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -62,7 +62,7 @@ void sys_k_event_logger_put_timed(uint16_t event_id)
|
|||
{
|
||||
uint32_t data[1];
|
||||
|
||||
data[0] = nano_tick_get_32();
|
||||
data[0] = sys_tick_get_32();
|
||||
|
||||
sys_event_logger_put(&sys_k_event_logger, event_id, data,
|
||||
ARRAY_SIZE(data));
|
||||
|
@ -86,7 +86,7 @@ void _sys_k_event_logger_context_switch(void)
|
|||
}
|
||||
|
||||
if (_collector_fiber != _nanokernel.current) {
|
||||
data[0] = nano_tick_get_32();
|
||||
data[0] = sys_tick_get_32();
|
||||
data[1] = (uint32_t)_nanokernel.current;
|
||||
|
||||
/*
|
||||
|
@ -124,7 +124,7 @@ void _sys_k_event_logger_interrupt(void)
|
|||
{
|
||||
uint32_t data[2];
|
||||
|
||||
data[0] = nano_tick_get_32();
|
||||
data[0] = sys_tick_get_32();
|
||||
data[1] = _sys_current_irq_key_get();
|
||||
|
||||
sys_k_event_logger_put(KERNEL_EVENT_LOGGER_INTERRUPT_EVENT_ID, data,
|
||||
|
@ -143,7 +143,7 @@ void _sys_k_event_logger_exit_sleep(void)
|
|||
{
|
||||
uint32_t data[3];
|
||||
|
||||
data[0] = nano_tick_get_32();
|
||||
data[0] = sys_tick_get_32();
|
||||
data[1] = (nano_cycle_get_32() - _sys_k_event_logger_sleep_start_time)
|
||||
/ sys_clock_hw_cycles_per_tick;
|
||||
/* register the cause of exiting sleep mode */
|
||||
|
|
|
@ -318,7 +318,7 @@ void *nano_task_fifo_get_wait_timeout(struct nano_fifo *fifo,
|
|||
}
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
limit = cur_ticks + timeout_in_ticks;
|
||||
|
||||
while (cur_ticks < limit) {
|
||||
|
@ -340,7 +340,7 @@ void *nano_task_fifo_get_wait_timeout(struct nano_fifo *fifo,
|
|||
nano_cpu_atomic_idle(key);
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -274,7 +274,7 @@ void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
|
|||
}
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
limit = cur_ticks + timeout_in_ticks;
|
||||
|
||||
while (cur_ticks < limit) {
|
||||
|
@ -296,7 +296,7 @@ void *nano_task_lifo_get_wait_timeout(struct nano_lifo *lifo,
|
|||
nano_cpu_atomic_idle(key);
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -239,7 +239,7 @@ int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_tic
|
|||
}
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
limit = cur_ticks + timeout_in_ticks;
|
||||
|
||||
while (cur_ticks < limit) {
|
||||
|
@ -260,7 +260,7 @@ int nano_task_sem_take_wait_timeout(struct nano_sem *sem, int32_t timeout_in_tic
|
|||
nano_cpu_atomic_idle(key);
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -48,14 +48,14 @@ void task_sleep(int32_t timeout_in_ticks)
|
|||
int key;
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
limit = cur_ticks + timeout_in_ticks;
|
||||
|
||||
while (cur_ticks < limit) {
|
||||
nano_cpu_atomic_idle(key);
|
||||
|
||||
key = irq_lock();
|
||||
cur_ticks = nano_tick_get();
|
||||
cur_ticks = sys_tick_get();
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
|
|
|
@ -54,7 +54,7 @@ int64_t _sys_clock_tick_count;
|
|||
* @return the current system tick count
|
||||
*
|
||||
*/
|
||||
uint32_t nano_tick_get_32(void)
|
||||
uint32_t sys_tick_get_32(void)
|
||||
{
|
||||
return (uint32_t)_sys_clock_tick_count;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ uint32_t nano_tick_get_32(void)
|
|||
* @return the current system tick count
|
||||
*
|
||||
*/
|
||||
int64_t nano_tick_get(void)
|
||||
int64_t sys_tick_get(void)
|
||||
{
|
||||
int64_t tmp_sys_clock_tick_count;
|
||||
/*
|
||||
|
@ -93,17 +93,17 @@ int64_t nano_tick_get(void)
|
|||
* tick count is the return value. Since the first call is meant to only fill in
|
||||
* the reference time, its return value should be discarded.
|
||||
*
|
||||
* Since a code fragment that wants to use nano_tick_delta passes in its
|
||||
* Since a code fragment that wants to use sys_tick_delta() passes in its
|
||||
* own reference time variable, multiple code fragments can make use of this
|
||||
* function concurrently.
|
||||
*
|
||||
* e.g.
|
||||
* uint64_t reftime;
|
||||
* (void) nano_tick_delta(&reftime); /# prime it #/
|
||||
* (void) sys_tick_delta(&reftime); /# prime it #/
|
||||
* [do stuff]
|
||||
* x = nano_tick_delta(&reftime); /# how long since priming #/
|
||||
* x = sys_tick_delta(&reftime); /# how long since priming #/
|
||||
* [do more stuff]
|
||||
* y = nano_tick_delta(&reftime); /# how long since [do stuff] #/
|
||||
* y = sys_tick_delta(&reftime); /# how long since [do stuff] #/
|
||||
*
|
||||
* @return tick count since reference time; undefined for first invocation
|
||||
*
|
||||
|
@ -137,13 +137,13 @@ static ALWAYS_INLINE int64_t _nano_tick_delta(int64_t *reftime)
|
|||
*
|
||||
* @return tick count since reference time; undefined for first invocation
|
||||
*/
|
||||
int64_t nano_tick_delta(int64_t *reftime)
|
||||
int64_t sys_tick_delta(int64_t *reftime)
|
||||
{
|
||||
return _nano_tick_delta(reftime);
|
||||
}
|
||||
|
||||
|
||||
uint32_t nano_tick_delta_32(int64_t *reftime)
|
||||
uint32_t sys_tick_delta_32(int64_t *reftime)
|
||||
{
|
||||
return (uint32_t)_nano_tick_delta(reftime);
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ void clock_init(void)
|
|||
#ifdef CONFIG_MICROKERNEL
|
||||
start_time = task_tick_get();
|
||||
#else /* CONFIG_NANOKERNEL */
|
||||
nano_tick_delta(&start_time);
|
||||
sys_tick_delta(&start_time);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ clock_time_t clock_time(void)
|
|||
#ifdef CONFIG_MICROKERNEL
|
||||
return task_tick_get_32();
|
||||
#else /* CONFIG_NANOKERNEL */
|
||||
return nano_tick_get_32();
|
||||
return sys_tick_get_32();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
*/
|
||||
static inline int is_timeout_in_range(int32_t orig_ticks, int32_t expected)
|
||||
{
|
||||
int32_t diff = nano_tick_get() - orig_ticks;
|
||||
int32_t diff = sys_tick_get() - orig_ticks;
|
||||
|
||||
#if SHORT_TIMEOUTS
|
||||
/*
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#ifdef CONFIG_NANOKERNEL
|
||||
#define TAKE(x) nano_fiber_sem_take_wait(&x)
|
||||
#define GIVE(x) nano_fiber_sem_give(&x)
|
||||
#define RANDDELAY(x) myDelay(((nano_tick_get_32() * ((x) + 1)) & 0x2f) + 1)
|
||||
#define RANDDELAY(x) myDelay(((sys_tick_get_32() * ((x) + 1)) & 0x2f) + 1)
|
||||
#define SLEEP(x) fiber_sleep(x)
|
||||
#else /* ! CONFIG_NANOKERNEL */
|
||||
#define TAKE(x) task_mutex_lock_wait(x)
|
||||
|
@ -169,14 +169,14 @@ void busy_task_entry(void)
|
|||
*/
|
||||
is_busy_task_awake = 0;
|
||||
SLEEP(1000);
|
||||
ticks_when_awake = nano_tick_get_32();
|
||||
ticks_when_awake = sys_tick_get_32();
|
||||
|
||||
/*
|
||||
* keep the cpu busy for 1000 ticks preventing the system entering
|
||||
* to sleep mode.
|
||||
*/
|
||||
is_busy_task_awake = 1;
|
||||
while (nano_tick_get_32() - ticks_when_awake < 1000) {
|
||||
while (sys_tick_get_32() - ticks_when_awake < 1000) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ void summary_data_printer(void)
|
|||
PRINTF("\x1b[8;1HGENERAL DATA");
|
||||
PRINTF("\x1b[9;1H------------");
|
||||
|
||||
PRINTF("\x1b[10;1HSystem tick count : %d ", nano_tick_get_32());
|
||||
PRINTF("\x1b[10;1HSystem tick count : %d ", sys_tick_get_32());
|
||||
|
||||
/* print dropped event counter */
|
||||
PRINTF("\x1b[11;1HDropped events # : %d ", total_dropped_counter);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#define FORK(x) &forks[x]
|
||||
#define TAKE(x) nano_fiber_sem_take_wait(x)
|
||||
#define GIVE(x) nano_fiber_sem_give(x)
|
||||
#define RANDDELAY(x) myDelay(((nano_tick_get_32() * ((x) +1)) & 0x1f) + 1)
|
||||
#define RANDDELAY(x) myDelay(((sys_tick_get_32() * ((x) + 1)) & 0x1f) + 1)
|
||||
#else /* ! CONFIG_NANOKERNEL */
|
||||
#define FORK(x) forks[x]
|
||||
#define TAKE(x) task_mutex_lock_wait(x)
|
||||
|
|
|
@ -42,14 +42,14 @@
|
|||
#define BENCH_MAX_TICKS (sys_clock_ticks_per_sec - 1)
|
||||
|
||||
typedef int64_t TICK_TYPE;
|
||||
#define TICK_GET(x) (TICK_TYPE)nano_tick_delta(x)
|
||||
#define TICK_GET(x) ((TICK_TYPE) sys_tick_delta(x))
|
||||
|
||||
static inline void TICK_SYNCH(void)
|
||||
{
|
||||
TICK_TYPE reftime;
|
||||
|
||||
(void) nano_tick_delta(&reftime);
|
||||
while (nano_tick_delta(&reftime) == 0) {
|
||||
(void) sys_tick_delta(&reftime);
|
||||
while (sys_tick_delta(&reftime) == 0) {
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ x87 FPU registers are being saved/restored.
|
|||
/* macro used to read system clock value */
|
||||
|
||||
#ifdef CONFIG_NANOKERNEL
|
||||
#define TICK_COUNT_GET() nano_tick_get_32()
|
||||
#define TICK_COUNT_GET() sys_tick_get_32()
|
||||
#else
|
||||
#define TICK_COUNT_GET() task_tick_get_32()
|
||||
#endif
|
||||
|
|
|
@ -29,8 +29,8 @@ nano_cpu_idle
|
|||
should have advanced by one tick.
|
||||
|
||||
irq_lock
|
||||
- 1. Count the number of calls to nano_tick_get_32() before a tick expires.
|
||||
- 2. Once determined, call nano_tick_get_32() many more times than that
|
||||
- 1. Count the number of calls to sys_tick_get_32() before a tick expires.
|
||||
- 2. Once determined, call sys_tick_get_32() many more times than that
|
||||
with interrupts locked. Check that the tick count remains unchanged.
|
||||
|
||||
irq_unlock
|
||||
|
|
|
@ -199,15 +199,15 @@ int nano_cpu_idleTest(void)
|
|||
int i; /* loop variable */
|
||||
|
||||
/* Align to a "tick boundary". */
|
||||
tick = nano_tick_get_32();
|
||||
while (tick == nano_tick_get_32()) {
|
||||
tick = sys_tick_get_32();
|
||||
while (tick == sys_tick_get_32()) {
|
||||
}
|
||||
tick = nano_tick_get_32();
|
||||
tick = sys_tick_get_32();
|
||||
|
||||
for (i = 0; i < 5; i++) { /* Repeat the test five times */
|
||||
nano_cpu_idle();
|
||||
tick++;
|
||||
if (nano_tick_get_32() != tick) {
|
||||
if (sys_tick_get_32() != tick) {
|
||||
return TC_FAIL;
|
||||
}
|
||||
}
|
||||
|
@ -286,12 +286,12 @@ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn,
|
|||
int imask;
|
||||
|
||||
/* Align to a "tick boundary" */
|
||||
tick = nano_tick_get_32();
|
||||
while (nano_tick_get_32() == tick) {
|
||||
tick = sys_tick_get_32();
|
||||
while (sys_tick_get_32() == tick) {
|
||||
}
|
||||
tick++;
|
||||
|
||||
while (nano_tick_get_32() == tick) {
|
||||
while (sys_tick_get_32() == tick) {
|
||||
count++;
|
||||
}
|
||||
|
||||
|
@ -305,12 +305,12 @@ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn,
|
|||
count <<= 4;
|
||||
|
||||
imask = disableRtn(irq);
|
||||
tick = nano_tick_get_32();
|
||||
tick = sys_tick_get_32();
|
||||
for (i = 0; i < count; i++) {
|
||||
nano_tick_get_32();
|
||||
sys_tick_get_32();
|
||||
}
|
||||
|
||||
tick2 = nano_tick_get_32();
|
||||
tick2 = sys_tick_get_32();
|
||||
|
||||
/*
|
||||
* Re-enable interrupts before returning (for both success and failure
|
||||
|
@ -325,10 +325,10 @@ int nanoCpuDisableInterruptsTest(disable_interrupt_func disableRtn,
|
|||
|
||||
/* Now repeat with interrupts unlocked. */
|
||||
for (i = 0; i < count; i++) {
|
||||
nano_tick_get_32();
|
||||
sys_tick_get_32();
|
||||
}
|
||||
|
||||
return (tick == nano_tick_get_32()) ? TC_FAIL : TC_PASS;
|
||||
return (tick == sys_tick_get_32()) ? TC_FAIL : TC_PASS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -649,7 +649,7 @@ static void test_fiber_busy_wait(int ticks, int unused)
|
|||
/* a fiber sleeps and times out, then reports through a fifo */
|
||||
static void test_fiber_sleep(int timeout, int arg2)
|
||||
{
|
||||
int64_t orig_ticks = nano_tick_get();
|
||||
int64_t orig_ticks = sys_tick_get();
|
||||
|
||||
TC_PRINT(" fiber sleeping for %d ticks\n", timeout);
|
||||
fiber_sleep(timeout);
|
||||
|
|
|
@ -117,7 +117,7 @@ static void test_fiber_put_timeout(int fifo, int timeout)
|
|||
static void test_fiber_pend_and_timeout(int data, int unused)
|
||||
{
|
||||
struct timeout_order_data *d = (void *)data;
|
||||
int32_t orig_ticks = nano_tick_get();
|
||||
int32_t orig_ticks = sys_tick_get();
|
||||
void *packet;
|
||||
|
||||
ARG_UNUSED(unused);
|
||||
|
@ -266,14 +266,14 @@ int test_fifo_timeout(void)
|
|||
|
||||
/* test nano_task_fifo_get_wait_timeout() with timeout */
|
||||
timeout = 10;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
packet = nano_task_fifo_get_wait_timeout(&fifo_timeout[0], timeout);
|
||||
if (packet) {
|
||||
TC_ERROR(" *** timeout of %d did not time out.\n", timeout);
|
||||
TC_END_RESULT(TC_FAIL);
|
||||
return TC_FAIL;
|
||||
}
|
||||
if ((nano_tick_get() - orig_ticks) < timeout) {
|
||||
if ((sys_tick_get() - orig_ticks) < timeout) {
|
||||
TC_ERROR(" *** task did not wait long enough on timeout of %d.\n",
|
||||
timeout);
|
||||
TC_END_RESULT(TC_FAIL);
|
||||
|
@ -294,7 +294,7 @@ int test_fifo_timeout(void)
|
|||
TC_PRINT("test nano_task_fifo_get_wait_timeout with timeout > 0\n");
|
||||
|
||||
timeout = 3;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
packet = nano_task_fifo_get_wait_timeout(&fifo_timeout[0], timeout);
|
||||
|
||||
|
@ -318,7 +318,7 @@ int test_fifo_timeout(void)
|
|||
*/
|
||||
|
||||
timeout = 5;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE,
|
||||
test_fiber_put_timeout, (int)&fifo_timeout[0],
|
||||
|
|
|
@ -668,7 +668,7 @@ static void test_fiber_put_timeout(int lifo, int timeout)
|
|||
static void test_fiber_pend_and_timeout(int data, int unused)
|
||||
{
|
||||
struct timeout_order_data *d = (void *)data;
|
||||
int32_t orig_ticks = nano_tick_get();
|
||||
int32_t orig_ticks = sys_tick_get();
|
||||
void *packet;
|
||||
|
||||
ARG_UNUSED(unused);
|
||||
|
@ -817,13 +817,13 @@ static int test_timeout(void)
|
|||
|
||||
/* test nano_task_lifo_get_wait_timeout() with timeout */
|
||||
timeout = 10;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
packet = nano_task_lifo_get_wait_timeout(&lifo_timeout[0], timeout);
|
||||
if (packet) {
|
||||
TC_ERROR(" *** timeout of %d did not time out.\n", timeout);
|
||||
return TC_FAIL;
|
||||
}
|
||||
if ((nano_tick_get() - orig_ticks) < timeout) {
|
||||
if ((sys_tick_get() - orig_ticks) < timeout) {
|
||||
TC_ERROR(" *** task did not wait long enough on timeout of %d.\n",
|
||||
timeout);
|
||||
return TC_FAIL;
|
||||
|
@ -842,7 +842,7 @@ static int test_timeout(void)
|
|||
TC_PRINT("test nano_task_lifo_get_wait_timeout with timeout > 0\n");
|
||||
|
||||
timeout = 3;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
packet = nano_task_lifo_get_wait_timeout(&lifo_timeout[0], timeout);
|
||||
|
||||
|
@ -864,7 +864,7 @@ static int test_timeout(void)
|
|||
*/
|
||||
|
||||
timeout = 5;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE,
|
||||
test_fiber_put_timeout, (int)&lifo_timeout[0],
|
||||
|
|
|
@ -575,7 +575,7 @@ static void test_fiber_give_timeout(int sem, int timeout)
|
|||
static void test_fiber_pend_and_timeout(int data, int unused)
|
||||
{
|
||||
struct timeout_order_data *the_data = (void *)data;
|
||||
int32_t orig_ticks = nano_tick_get();
|
||||
int32_t orig_ticks = sys_tick_get();
|
||||
int rv;
|
||||
|
||||
ARG_UNUSED(unused);
|
||||
|
@ -711,13 +711,13 @@ static int test_timeout(void)
|
|||
|
||||
/* test nano_task_sem_take_wait_timeout() with timeout */
|
||||
timeout = 10;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout);
|
||||
if (rv) {
|
||||
TC_ERROR(" *** timeout of %d did not time out.\n", timeout);
|
||||
return TC_FAIL;
|
||||
}
|
||||
if ((nano_tick_get() - orig_ticks) < timeout) {
|
||||
if ((sys_tick_get() - orig_ticks) < timeout) {
|
||||
TC_ERROR(" *** task did not wait long enough on timeout of %d.\n",
|
||||
timeout);
|
||||
return TC_FAIL;
|
||||
|
@ -736,7 +736,7 @@ static int test_timeout(void)
|
|||
TC_PRINT("test nano_task_sem_take_wait_timeout with timeout > 0\n");
|
||||
|
||||
timeout = 3;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
rv = nano_task_sem_take_wait_timeout(&sem_timeout[0], timeout);
|
||||
|
||||
|
@ -758,7 +758,7 @@ static int test_timeout(void)
|
|||
*/
|
||||
|
||||
timeout = 5;
|
||||
orig_ticks = nano_tick_get();
|
||||
orig_ticks = sys_tick_get();
|
||||
|
||||
task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE,
|
||||
test_fiber_give_timeout, (int)&sem_timeout[0],
|
||||
|
|
|
@ -17,7 +17,7 @@ Timer Expiry
|
|||
Case 2: Timer has not expired
|
||||
Case 3: Wait for a timer to expire
|
||||
|
||||
Expired timers can use the nano_tick_get_32() and nano_tick_delta() routines
|
||||
Expired timers can use the sys_tick_get_32() and sys_tick_delta() routines
|
||||
to check the results against the timer routines.
|
||||
|
||||
This test set does not yet test/verify nano_cycle_get_32()--that must still be
|
||||
|
@ -38,5 +38,5 @@ nano_task_timer_wait
|
|||
|
||||
--------------------------------
|
||||
nanoTimeInit (implicitly done)
|
||||
nano_tick_get_32
|
||||
nano_tick_delta
|
||||
sys_tick_get_32
|
||||
sys_tick_delta
|
||||
|
|
|
@ -22,7 +22,7 @@ This module tests the following timer related routines:
|
|||
nano_timer_init(), nano_fiber_timer_start(), nano_fiber_timer_stop(),
|
||||
nano_fiber_timer_test(), nano_fiber_timer_wait(), nano_task_timer_start(),
|
||||
nano_task_timer_stop(), nano_task_timer_test(), nano_task_timer_wait(),
|
||||
nano_tick_get_32(), nano_cycle_get_32(), nano_tick_delta()
|
||||
sys_tick_get_32(), nano_cycle_get_32(), sys_tick_delta()
|
||||
*/
|
||||
|
||||
#include <tc_util.h>
|
||||
|
@ -93,11 +93,11 @@ void initNanoObjects(void)
|
|||
* This routine can be called from a task or a fiber to wait upon a timer.
|
||||
* It will busy wait until the current tick ends, at which point it will
|
||||
* start and then wait upon a timer. The length of time it spent waiting
|
||||
* gets cross-checked with the nano_tick_get_32() and nanoTimeElapsed() APIs.
|
||||
* gets cross-checked with the sys_tick_get_32() and nanoTimeElapsed() APIs.
|
||||
* All three are expected to match up, but a tolerance of one (1) tick is
|
||||
* considered acceptable.
|
||||
*
|
||||
* This routine can be considered as testing nano_tick_get_32(),
|
||||
* This routine can be considered as testing sys_tick_get_32(),
|
||||
* nanoTimeElapsed() and nanoXXXTimerGetW() successful expiration cases.
|
||||
*
|
||||
* @param startRtn routine to start the timer
|
||||
|
@ -124,18 +124,18 @@ int basicTimerWait(timer_start_func startRtn, timer_getw_func waitRtn,
|
|||
|
||||
TC_PRINT(" - test expected to take four seconds\n");
|
||||
|
||||
tick = nano_tick_get_32();
|
||||
while (nano_tick_get_32() == tick) {
|
||||
tick = sys_tick_get_32();
|
||||
while (sys_tick_get_32() == tick) {
|
||||
/* Align to a tick boundary */
|
||||
}
|
||||
|
||||
tick++;
|
||||
(void) nano_tick_delta(&reftime);
|
||||
(void) sys_tick_delta(&reftime);
|
||||
startRtn(pTimer, ticks); /* Start the timer */
|
||||
result = waitRtn(pTimer); /* Wait for the timer to expire */
|
||||
|
||||
elapsed_32 = nano_tick_delta_32(&reftime);
|
||||
duration = nano_tick_get_32() - tick;
|
||||
elapsed_32 = sys_tick_delta_32(&reftime);
|
||||
duration = sys_tick_get_32() - tick;
|
||||
|
||||
/*
|
||||
* The difference between <duration> and <elapsed> is expected to be zero
|
||||
|
@ -149,19 +149,19 @@ int basicTimerWait(timer_start_func startRtn, timer_getw_func waitRtn,
|
|||
}
|
||||
|
||||
/* Check that the non-wait-timer-get routine works properly. */
|
||||
tick = nano_tick_get_32();
|
||||
while (nano_tick_get_32() == tick) {
|
||||
tick = sys_tick_get_32();
|
||||
while (sys_tick_get_32() == tick) {
|
||||
/* Align to a tick boundary */
|
||||
}
|
||||
|
||||
tick++;
|
||||
(void) nano_tick_delta(&reftime);
|
||||
(void) sys_tick_delta(&reftime);
|
||||
startRtn(pTimer, ticks); /* Start the timer */
|
||||
while ((result = getRtn(pTimer)) == NULL) {
|
||||
busywaited = 1;
|
||||
}
|
||||
elapsed = nano_tick_delta(&reftime);
|
||||
duration = nano_tick_get_32() - tick;
|
||||
elapsed = sys_tick_delta(&reftime);
|
||||
duration = sys_tick_get_32() - tick;
|
||||
|
||||
if ((busywaited != 1) || (result != pTimerData) ||
|
||||
(duration - elapsed > 1) || ((duration - ticks) > 1)) {
|
||||
|
@ -192,8 +192,8 @@ void startTimers(timer_start_func startRtn)
|
|||
{
|
||||
int tick; /* current tick */
|
||||
|
||||
tick = nano_tick_get_32();
|
||||
while (nano_tick_get_32() == tick) {
|
||||
tick = sys_tick_get_32();
|
||||
while (sys_tick_get_32() == tick) {
|
||||
/* Wait for the end of the tick */
|
||||
}
|
||||
|
||||
|
@ -225,8 +225,8 @@ int busyWaitTimers(timer_get_func getRtn)
|
|||
|
||||
TC_PRINT(" - test expected to take five or six seconds\n");
|
||||
|
||||
ticks = nano_tick_get_32() + SIX_SECONDS;
|
||||
while ((numExpired != 4) && (nano_tick_get_32() < ticks)) {
|
||||
ticks = sys_tick_get_32() + SIX_SECONDS;
|
||||
while ((numExpired != 4) && (sys_tick_get_32() < ticks)) {
|
||||
result = getRtn(&timer);
|
||||
if (result != NULL) {
|
||||
numExpired++;
|
||||
|
@ -268,7 +268,7 @@ int busyWaitTimers(timer_get_func getRtn)
|
|||
}
|
||||
}
|
||||
|
||||
return (nano_tick_get_32() < ticks) ? TC_PASS : TC_FAIL;
|
||||
return (sys_tick_get_32() < ticks) ? TC_PASS : TC_FAIL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -299,13 +299,13 @@ int stopTimers(timer_stop_func stopRtn, timer_get_func getRtn)
|
|||
|
||||
TC_PRINT(" - test expected to take six seconds\n");
|
||||
|
||||
startTick = nano_tick_get_32();
|
||||
while (nano_tick_get_32() == startTick) {
|
||||
startTick = sys_tick_get_32();
|
||||
while (sys_tick_get_32() == startTick) {
|
||||
}
|
||||
startTick++;
|
||||
endTick = startTick + SIX_SECONDS;
|
||||
|
||||
while (nano_tick_get_32() < endTick) {
|
||||
while (sys_tick_get_32() < endTick) {
|
||||
if ((getRtn(&timer) != NULL) || (getRtn(&shortTimer) != NULL) ||
|
||||
(getRtn(&midTimer) != NULL) || (getRtn(&longTimer) != NULL)) {
|
||||
return TC_FAIL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue