diff --git a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c index e12b5000369..6e51321424b 100644 --- a/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c +++ b/tests/kernel/sched/schedule_api/src/test_sched_timeslice_reset.c @@ -19,24 +19,31 @@ static K_THREAD_STACK_ARRAY_DEFINE(tstack, NUM_THREAD, STACK_SIZE); K_SEM_DEFINE(sema, 0, NUM_THREAD); /*elapsed_slice taken by last thread*/ static s64_t elapsed_slice; -/*expected elapsed duration*/ -static s64_t expected_slice[NUM_THREAD] = { - HALF_SLICE_SIZE, /* the ztest native thread taking a half timeslice*/ - SLICE_SIZE, /* the spawned thread taking a full timeslice, reset*/ - SLICE_SIZE /* the spawned thread taking a full timeslice, reset*/ -}; static int thread_idx; static void thread_tslice(void *p1, void *p2, void *p3) { s64_t t = k_uptime_delta(&elapsed_slice); + s64_t expected_slice_min, expected_slice_max; + + if (thread_idx == 0) { + /*thread number 0 releases CPU after HALF_SLICE_SIZE*/ + expected_slice_min = HALF_SLICE_SIZE; + expected_slice_max = HALF_SLICE_SIZE; + } else { + /*other threads are sliced with tick granulity*/ + expected_slice_min = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE)); + expected_slice_max = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE)+1); + } #ifdef CONFIG_DEBUG - TC_PRINT("thread[%d] elapsed slice %lld, ", thread_idx, t); - TC_PRINT("expected %lld\n", expected_slice[thread_idx]); + TC_PRINT("thread[%d] elapsed slice: %lld, expected: <%lld, %lld>\n", + thread_idx, t, expected_slice_min, expected_slice_max); #endif + /** TESTPOINT: timeslice should be reset for each preemptive thread*/ - zassert_true(t <= expected_slice[thread_idx], NULL); + zassert_true(t >= expected_slice_min, NULL); + zassert_true(t <= expected_slice_max, NULL); thread_idx = (thread_idx + 1) % NUM_THREAD; u32_t t32 = k_uptime_get_32(); diff --git a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c index 66823057361..4657207dd53 100644 --- a/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c +++ b/tests/kernel/sched/schedule_api/src/test_slice_scheduling.c @@ -37,20 +37,23 @@ static void thread_tslice(void *p1, void *p2, void *p3) int thread_parameter = ((int)p1 == (NUM_THREAD - 1)) ? '\n' : ((int)p1 + 'A'); + s64_t expected_slice_min = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE)); + s64_t expected_slice_max = __ticks_to_ms(_ms_to_ticks(SLICE_SIZE) + 1); + while (1) { s64_t tdelta = k_uptime_delta(&elapsed_slice); - TC_PRINT("%c", thread_parameter); /* Test Fails if thread exceed allocated time slice or * Any thread is scheduled out of order. */ - zassert_true(((tdelta <= SLICE_SIZE) && + zassert_true(((tdelta >= expected_slice_min) && + (tdelta <= expected_slice_max) && ((int)p1 == thread_idx)), NULL); thread_idx = (thread_idx + 1) % (NUM_THREAD); u32_t t32 = k_uptime_get_32(); /* Keep the current thread busy for more than one slice, - * even though, when timeslice used up the next thread + * even though, when timeslice used up the next thread * should be scheduled in. */ while (k_uptime_get_32() - t32 < BUSY_MS) { @@ -104,9 +107,12 @@ void test_slice_scheduling(void) while (count < ITRERATION_COUNT) { k_uptime_delta(&elapsed_slice); - /* current thread (ztest native) consumed a half timeslice*/ + /* Keep the current thread busy for more than one slice, + * even though, when timeslice used up the next thread + * should be scheduled in. + */ t32 = k_uptime_get_32(); - while (k_uptime_get_32() - t32 < SLICE_SIZE) { + while (k_uptime_get_32() - t32 < BUSY_MS) { #if defined(CONFIG_ARCH_POSIX) posix_halt_cpu(); /*sleep until next irq*/ #else diff --git a/tests/kernel/tickless/tickless_concept/src/main.c b/tests/kernel/tickless/tickless_concept/src/main.c index 6793ce25736..d0b39b97cf3 100644 --- a/tests/kernel/tickless/tickless_concept/src/main.c +++ b/tests/kernel/tickless/tickless_concept/src/main.c @@ -15,14 +15,18 @@ static struct k_thread tdata[NUM_THREAD]; #ifndef CONFIG_TICKLESS_IDLE #define CONFIG_TICKLESS_IDLE_THRESH 20 #endif -/*millisecond per tick*/ -#define MSEC_PER_TICK (__ticks_to_ms(1)) /*sleep duration tickless*/ -#define SLEEP_TICKLESS (CONFIG_TICKLESS_IDLE_THRESH * MSEC_PER_TICK) +#define SLEEP_TICKLESS __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH) + /*sleep duration with tick*/ -#define SLEEP_TICKFUL ((CONFIG_TICKLESS_IDLE_THRESH - 1) * MSEC_PER_TICK) +#define SLEEP_TICKFUL __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH - 1) + /*slice size is set as half of the sleep duration*/ -#define SLICE_SIZE ((CONFIG_TICKLESS_IDLE_THRESH >> 1) * MSEC_PER_TICK) +#define SLICE_SIZE __ticks_to_ms(CONFIG_TICKLESS_IDLE_THRESH >> 1) + +/*maximum slice duration accepted by the test*/ +#define SLICE_SIZE_LIMIT __ticks_to_ms((CONFIG_TICKLESS_IDLE_THRESH >> 1) + 1) + /*align to millisecond boundary*/ #if defined(CONFIG_ARCH_POSIX) #define ALIGN_MS_BOUNDARY() \ @@ -46,11 +50,13 @@ static void thread_tslice(void *p1, void *p2, void *p3) { s64_t t = k_uptime_delta(&elapsed_slice); - TC_PRINT("elapsed slice %lld\n", t); + TC_PRINT("elapsed slice %lld, expected: <%lld, %lld>\n", + t, SLICE_SIZE, SLICE_SIZE_LIMIT); + /**TESTPOINT: verify slicing scheduler behaves as expected*/ zassert_true(t >= SLICE_SIZE, NULL); /*less than one tick delay*/ - zassert_true(t <= (SLICE_SIZE + MSEC_PER_TICK), NULL); + zassert_true(t <= SLICE_SIZE_LIMIT, NULL); u32_t t32 = k_uptime_get_32(); diff --git a/tests/kernel/workq/work_queue/src/main.c b/tests/kernel/workq/work_queue/src/main.c index eb7e6e9d829..0a86c493ea8 100644 --- a/tests/kernel/workq/work_queue/src/main.c +++ b/tests/kernel/workq/work_queue/src/main.c @@ -15,6 +15,10 @@ /* Each work item takes 100ms */ #define WORK_ITEM_WAIT 100 +/* In fact, each work item could take up to this value */ +#define WORK_ITEM_WAIT_ALIGNED \ + __ticks_to_ms(_ms_to_ticks(WORK_ITEM_WAIT) + _TICK_ALIGN) + /* * Wait 50ms between work submissions, to ensure co-op and prempt * preempt thread submit alternatively. @@ -139,7 +143,7 @@ static void test_sequence(void) test_items_submit(); TC_PRINT(" - Waiting for work to finish\n"); - k_sleep((NUM_TEST_ITEMS + 1) * WORK_ITEM_WAIT); + k_sleep(NUM_TEST_ITEMS * WORK_ITEM_WAIT_ALIGNED); check_results(NUM_TEST_ITEMS); reset_results(); @@ -179,7 +183,7 @@ static void test_resubmit(void) k_work_submit(&tests[0].work.work); TC_PRINT(" - Waiting for work to finish\n"); - k_sleep((NUM_TEST_ITEMS + 1) * WORK_ITEM_WAIT); + k_sleep(NUM_TEST_ITEMS * WORK_ITEM_WAIT_ALIGNED); TC_PRINT(" - Checking results\n"); check_results(NUM_TEST_ITEMS); @@ -294,7 +298,7 @@ static void test_delayed_cancel(void) NULL, NULL, NULL, K_HIGHEST_THREAD_PRIO, 0, 0); TC_PRINT(" - Waiting for work to finish\n"); - k_sleep(2 * WORK_ITEM_WAIT); + k_sleep(WORK_ITEM_WAIT_ALIGNED); TC_PRINT(" - Checking results\n"); check_results(0); @@ -331,7 +335,7 @@ static void test_delayed_resubmit(void) k_delayed_work_submit(&tests[0].work, WORK_ITEM_WAIT); TC_PRINT(" - Waiting for work to finish\n"); - k_sleep((NUM_TEST_ITEMS + 1) * WORK_ITEM_WAIT); + k_sleep(NUM_TEST_ITEMS * WORK_ITEM_WAIT_ALIGNED); TC_PRINT(" - Checking results\n"); check_results(NUM_TEST_ITEMS); @@ -378,7 +382,7 @@ static void test_delayed_resubmit_thread(void) NULL, NULL, NULL, K_PRIO_COOP(10), 0, 0); TC_PRINT(" - Waiting for work to finish\n"); - k_sleep(WORK_ITEM_WAIT); + k_sleep(WORK_ITEM_WAIT_ALIGNED); TC_PRINT(" - Checking results\n"); check_results(1); @@ -403,7 +407,7 @@ static void test_delayed(void) test_delayed_submit(); TC_PRINT(" - Waiting for delayed work to finish\n"); - k_sleep((NUM_TEST_ITEMS + 2) * WORK_ITEM_WAIT); + k_sleep(NUM_TEST_ITEMS * WORK_ITEM_WAIT_ALIGNED); TC_PRINT(" - Checking results\n"); check_results(NUM_TEST_ITEMS);