samples: Add test code for sys_thread_busy_wait() API

Enhances the existing nanokernel context test application to
validate busy waiting.

Change-Id: I08cd74f74ed596f0baa30d879e1d98000b1d4c85
Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Allan Stephens 2015-10-07 14:07:51 -04:00 committed by Anas Nashif
commit dcb31f140d
2 changed files with 86 additions and 2 deletions

View file

@ -30,11 +30,10 @@ or
Sample Output:
tc_start() - Test Nanokernel CPU and context routines
tc_start() - Test Nanokernel CPU and thread routines
Initializing nanokernel objects
Testing nano_cpu_idle()
Testing interrupt locking and unlocking
Testing inline interrupt locking and unlocking
Testing irq_disable() and irq_enable()
Testing sys_thread_self_get() from an ISR and task
Testing sys_execution_context_type_get() from an ISR
@ -42,6 +41,38 @@ Testing sys_execution_context_type_get() from a task
Spawning a fiber from a task
Fiber to test sys_thread_self_get() and sys_execution_context_type_get
Fiber to test fiber_yield()
Testing sys_thread_busy_wait()
fiber busy waiting for 20000 usecs (2 ticks)
fiber busy waiting completed
Testing fiber_sleep()
fiber sleeping for 5 ticks
fiber back from sleep
Testing fiber_delayed_start() without cancellation
fiber (q order: 2, t/o: 50) is running
got fiber (q order: 2, t/o: 50) as expected
fiber (q order: 3, t/o: 75) is running
got fiber (q order: 3, t/o: 75) as expected
fiber (q order: 0, t/o: 100) is running
got fiber (q order: 0, t/o: 100) as expected
fiber (q order: 6, t/o: 125) is running
got fiber (q order: 6, t/o: 125) as expected
fiber (q order: 1, t/o: 150) is running
got fiber (q order: 1, t/o: 150) as expected
fiber (q order: 4, t/o: 175) is running
got fiber (q order: 4, t/o: 175) as expected
fiber (q order: 5, t/o: 200) is running
got fiber (q order: 5, t/o: 200) as expected
Testing fiber_delayed_start() with cancellations
cancelling [q order: 0, t/o: 100, t/o order: 0]
fiber (q order: 3, t/o: 75) is running
got (q order: 3, t/o: 75, t/o order 1074292) as expected
fiber (q order: 0, t/o: 100) is running
got (q order: 0, t/o: 100, t/o order 1074292) as expected
cancelling [q order: 3, t/o: 75, t/o order: 3]
cancelling [q order: 4, t/o: 175, t/o order: 4]
fiber (q order: 4, t/o: 175) is running
got (q order: 4, t/o: 175, t/o order 1074292) as expected
cancelling [q order: 6, t/o: 125, t/o order: 6]
Verifying exception handler installed
excHandlerExecuted: 1
PASS - main.

View file

@ -629,6 +629,36 @@ struct timeout_order_data timeout_order_data[] = {
#define NUM_TIMEOUT_FIBERS ARRAY_SIZE(timeout_order_data)
static char __stack timeout_stacks[NUM_TIMEOUT_FIBERS][FIBER_STACKSIZE];
#ifndef CONFIG_ARM
/* a fiber busy waits, then reports through a fifo */
static void test_fiber_busy_wait(int ticks, int unused)
{
ARG_UNUSED(unused);
uint32_t usecs = ticks * sys_clock_us_per_tick;
TC_PRINT(" fiber busy waiting for %d usecs (%d ticks)\n",
usecs, ticks);
sys_thread_busy_wait(usecs);
TC_PRINT(" fiber busy waiting completed\n");
/*
* Ideally the test should verify that the correct number of ticks
* have elapsed. However, when run under QEMU the tick interrupt
* may be processed on a very irregular basis, meaning that far
* fewer than the expected number of ticks may occur for a given
* number of clock cycles vs. what would ordinarily be expected.
*
* Consequently, the best we can do for now to test busy waiting is
* to invoke the API and verify that it returns. (If it takes way
* too long, or never returns, the main test task may be able to
* time out and report an error.)
*/
nano_fiber_sem_give(&reply_timeout);
}
#endif
/* a fiber sleeps and times out, then reports through a fifo */
static void test_fiber_sleep(int timeout, int arg2)
{
@ -664,6 +694,29 @@ static int test_timeout(void)
int ii;
struct timeout_order_data *data;
/*
* sys_thread_busy_wait() is currently unsupported for ARM
*/
#ifndef CONFIG_ARM
/* test sys_thread_busy_wait() */
TC_PRINT("Testing sys_thread_busy_wait()\n");
timeout = 2;
task_fiber_start(timeout_stacks[0], FIBER_STACKSIZE,
test_fiber_busy_wait, (int)timeout, 0,
FIBER_PRIORITY, 0);
rv = nano_task_sem_take_wait_timeout(&reply_timeout, timeout + 2);
if (!rv) {
rv = TC_FAIL;
TC_ERROR(" *** task timed out waiting for sys_thread_busy_wait()\n");
return TC_FAIL;
}
#endif /* CONFIG_ARM */
/* test fiber_sleep() */
TC_PRINT("Testing fiber_sleep()\n");