tests: workqueue tests overhaul adding new tests

During inspection of the workqueue tests, I find out testing gaps.
Decided to add new test cases that can improve Zephyr OS testing
quality.
Added new test cases:
1. test_work_item_supplied_with_func
In docs described that work item supplied with a handler function,
prove that it works.
2. test_process_work_items_fifo
Test that system process work items in first-in, first-out manner.
3. test_sched_delayed_work_item
Verify that delayed work item processed after specific period of time
stated by user.
4. test_workqueue_max_number
Test the limit of number of workqueues created
5. test_cancel_processed_work_item Created test to increase branch
coverage.

Modified existing test cases:
1. test_work_submit_handler updated Doxygen tag, added more detailed
description"

Signed-off-by: Maksim Masalski <maksim.masalski@intel.com>
This commit is contained in:
Maksim Masalski 2020-02-21 12:06:54 +03:00 committed by Anas Nashif
commit 7808a78417

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -19,10 +19,23 @@
#define TIMEOUT K_MSEC(TIMEOUT_MS)
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define NUM_OF_WORK 2
#define SYNC_SEM_INIT_VAL (0U)
#define COM_SEM_MAX_VAL (1U)
#define COM_SEM_INIT_VAL (0U)
#define MAX_WORK_Q_NUMBER 10
#define MY_PRIORITY 5
struct k_delayed_work work_item_delayed;
struct k_sem common_sema, sema_fifo_one, sema_fifo_two;
struct k_work work_item, work_item_1, work_item_2;
struct k_work_q work_q_max_number[MAX_WORK_Q_NUMBER];
K_THREAD_STACK_DEFINE(my_stack_area, STACK_SIZE);
K_THREAD_STACK_DEFINE(new_stack_area[MAX_WORK_Q_NUMBER], STACK_SIZE);
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
static K_THREAD_STACK_DEFINE(user_tstack, STACK_SIZE);
static struct k_work_q workq;
struct k_work_q work_q_1;
static struct k_work_q user_workq;
static ZTEST_BMEM struct k_work work[NUM_OF_WORK];
static struct k_delayed_work new_work;
@ -37,6 +50,154 @@ static struct k_sem sync_sema;
static struct k_sem dummy_sema;
static struct k_thread *main_thread;
/**
* @brief Common function using like a handler for workqueue tests
* API call in it means successful execution of that function
*
* @param unused of type k_work to make handler function accepted
* by k_work_init
*
* @return N/A
*/
void common_work_handler(struct k_work *unused)
{
k_sem_give(&sync_sema);
}
/**
* @brief Test work item can take call back function defined by user
* @details
* - Creating a work item, then add handler function to that work item.
* - Then process that work item.
* - To check that handler function executed successfully, we use semaphore
* sync_sema with initial count 0.
* - Handler function gives semaphore, then we wait for that semaphore
* from the test function body.
* - If semaphore was obtained successfully, test passed.
* @ingroup kernel_workqueue_tests
*/
void test_work_item_supplied_with_func(void)
{
u32_t sem_count = 0;
k_sem_reset(&sync_sema);
/**TESTPOINT: init work item with a user-defined function*/
k_work_init(&work_item, common_work_handler);
k_work_submit_to_queue(&workq, &work_item);
k_sem_take(&sync_sema, K_FOREVER);
sem_count = k_sem_count_get(&sync_sema);
zassert_equal(sem_count, COM_SEM_INIT_VAL, NULL);
}
/* Two handler functions fifo_work_first() and fifo_work_second
* are made for two work items to test first in, first out.
* It tests workqueue thread process work items
* in first in, first out manner
*/
void fifo_work_first(struct k_work *unused)
{
k_sem_take(&sema_fifo_one, K_FOREVER);
k_sem_give(&sema_fifo_two);
}
void fifo_work_second(struct k_work *unused)
{
k_sem_take(&sema_fifo_two, K_FOREVER);
}
/**
* @brief Test kernel process work items in fifo way
* @details To test it we use 2 functions-handlers.
* - fifo_work_first() added to the work_item_1 and fifo_work_second()
* added to the work_item_2.
* - We expect that firstly should run work_item_1, and only then
* will run work_item_2.
* - To test it, we initialize semaphore sema_fifo_one
* with count 1(available) and fifo_work_first() takes that semaphore.
* fifo_work_second() is waiting for the semaphore sema_fifo_two,
* which will be given from function fifo_work_first().
* - If work_item_2() will try to be executed before work_item_1(),
* will happen a timeout error.
* - Because sema_fifo_two will be never obtained by fifo_work_second()
* due to K_FOREVER macro in it while waiting for the semaphore.
* @ingroup kernel_workqueue_tests
*/
void test_process_work_items_fifo(void)
{
k_work_init(&work_item_1, fifo_work_first);
k_work_init(&work_item_2, fifo_work_second);
/**TESTPOINT: submit work items to the queue in fifo manner*/
k_work_submit_to_queue(&workq, &work_item_1);
k_work_submit_to_queue(&workq, &work_item_2);
}
/**
* @brief Test kernel support scheduling work item that is to be processed
* after user defined period of time
* @details
* - For that test is using semaphore sync_sema, with initial count 0.
* - In that test we measure actual spent time and compare it with time
* which was measured by function k_delayed_work_remaining_get().
* - Using system clocks we measure actual spent time
* in the period between delayed work submitted and delayed work
* executed.
* - To know that delayed work was executed, we use semaphore.
* - Right after semaphore was given from handler function, we stop
* measuring actual time.
* - Then compare results.
* @ingroup kernel_workqueue_tests
*/
void test_sched_delayed_work_item(void)
{
s32_t ms_remain, ms_spent, start_time, stop_time, cycles_spent;
s32_t ms_delta = 10;
k_sem_reset(&sync_sema);
/* TESTPOINT: init delayed work to be processed */
/* only after specific period of time */
k_delayed_work_init(&work_item_delayed, common_work_handler);
start_time = k_cycle_get_32();
k_delayed_work_submit_to_queue(&workq, &work_item_delayed, TIMEOUT);
ms_remain = k_delayed_work_remaining_get(&work_item_delayed);
k_sem_take(&sync_sema, K_FOREVER);
stop_time = k_cycle_get_32();
cycles_spent = stop_time - start_time;
ms_spent = (u32_t)k_cyc_to_ms_floor32(cycles_spent);
zassert_within(ms_spent, ms_remain, ms_delta, NULL);
}
/**
* @brief Test application can define workqueues without any limit
* @details
* - We can define any number of workqueus using a var of type struct k_work_q.
* - Define and initialize maximum possible real number of the workqueues
* available according to the stack size.
* - Test defines and initializes max number of the workqueues and starts them.
* @ingroup kernel_workqueue_tests
*/
void test_workqueue_max_number(void)
{
u32_t work_q_num = 0;
for (u32_t i = 0; i < MAX_WORK_Q_NUMBER; i++) {
work_q_num++;
k_work_q_start(&work_q_max_number[i], new_stack_area[i],
K_THREAD_STACK_SIZEOF(new_stack_area[i]),
MY_PRIORITY);
}
zassert_true(work_q_num == MAX_WORK_Q_NUMBER,
"Max number of the defined work queues not reached, "
"real number of the created work queues is "
"%d, expected %d", work_q_num, MAX_WORK_Q_NUMBER);
}
static void work_sleepy(struct k_work *w)
{
k_sleep(TIMEOUT);
@ -114,8 +275,9 @@ static void twork_resubmit(void *data)
*/
k_queue_remove(&(new_work.work_q->queue), &(new_work.work));
zassert_equal(k_delayed_work_submit_to_queue(work_q, &new_work, K_NO_WAIT),
-EINVAL, NULL);
zassert_equal(k_delayed_work_submit_to_queue(work_q, &new_work,
K_NO_WAIT),
-EINVAL, NULL);
k_sem_give(&sync_sema);
}
@ -148,7 +310,8 @@ static void tdelayed_work_submit_1(struct k_work_q *work_q,
/**TESTPOINT: check remaining timeout after submit */
zassert_true(time_remaining <= k_ticks_to_ms_floor64(timeout_ticks +
_TICK_ALIGN), NULL);
_TICK_ALIGN),
NULL);
timeout_ticks -= z_ms_to_ticks(15);
@ -179,7 +342,8 @@ static void tdelayed_work_cancel(void *data)
if (work_q) {
ret = k_delayed_work_submit_to_queue(work_q,
&delayed_work_sleepy, TIMEOUT);
&delayed_work_sleepy,
TIMEOUT);
ret |= k_delayed_work_submit_to_queue(work_q, &delayed_work[0],
TIMEOUT);
ret |= k_delayed_work_submit_to_queue(work_q, &delayed_work[1],
@ -219,7 +383,8 @@ static void tdelayed_work_cancel(void *data)
k_sleep(TIMEOUT);
/**TESTPOINT: check pending when work completed*/
zassert_false(k_work_pending(
(struct k_work *)&delayed_work_sleepy), NULL);
(struct k_work *)&delayed_work_sleepy),
NULL);
/**TESTPOINT: delayed work cancel when completed*/
ret = k_delayed_work_cancel(&delayed_work_sleepy);
zassert_not_equal(ret, 0, NULL);
@ -242,33 +407,38 @@ static void ttriggered_work_submit(void *data)
k_work_poll_init(&triggered_work[i], work_handler);
/**TESTPOINT: check pending after triggered work init*/
zassert_false(k_work_pending(
(struct k_work *)&triggered_work[i]), NULL);
(struct k_work *)&triggered_work[i]),
NULL);
if (work_q) {
/**TESTPOINT: triggered work submit to queue*/
zassert_true(k_work_poll_submit_to_queue(work_q,
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
zassert_true(k_work_poll_submit_to_queue(
work_q,
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
} else {
/**TESTPOINT: triggered work submit to system queue*/
zassert_true(k_work_poll_submit(
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
}
/**TESTPOINT: check pending after triggered work submit*/
zassert_true(k_work_pending(
(struct k_work *)&triggered_work[i]) == 0, NULL);
(struct k_work *)&triggered_work[i]) == 0,
NULL);
}
for (int i = 0; i < NUM_OF_WORK; i++) {
/**TESTPOINT: trigger work execution*/
zassert_true(k_poll_signal_raise(&triggered_work_signal[i], 1)
== 0, NULL);
zassert_true(k_poll_signal_raise(
&triggered_work_signal[i], 1) == 0,
NULL);
/**TESTPOINT: check pending after sending signal */
zassert_true(k_work_pending(
(struct k_work *)&triggered_work[i]) != 0, NULL);
(struct k_work *)&triggered_work[i]) != 0,
NULL);
}
}
@ -297,21 +467,27 @@ static void ttriggered_work_cancel(void *data)
if (work_q) {
ret = k_work_poll_submit_to_queue(work_q,
&triggered_work_sleepy, &triggered_work_sleepy_event, 1,
K_FOREVER);
&triggered_work_sleepy,
&triggered_work_sleepy_event,
1, K_FOREVER);
ret |= k_work_poll_submit_to_queue(work_q,
&triggered_work[0], &triggered_work_event[0], 1,
K_FOREVER);
&triggered_work[0],
&triggered_work_event[0], 1,
K_FOREVER);
ret |= k_work_poll_submit_to_queue(work_q,
&triggered_work[1], &triggered_work_event[1], 1,
K_FOREVER);
&triggered_work[1],
&triggered_work_event[1], 1,
K_FOREVER);
} else {
ret = k_work_poll_submit(&triggered_work_sleepy,
&triggered_work_sleepy_event, 1, K_FOREVER);
&triggered_work_sleepy_event, 1,
K_FOREVER);
ret |= k_work_poll_submit(&triggered_work[0],
&triggered_work_event[0], 1, K_FOREVER);
&triggered_work_event[0], 1,
K_FOREVER);
ret |= k_work_poll_submit(&triggered_work[1],
&triggered_work_event[1], 1, K_FOREVER);
&triggered_work_event[1], 1,
K_FOREVER);
}
/* Check if all submission succeeded */
zassert_true(ret == 0, NULL);
@ -411,9 +587,7 @@ void test_user_workq_granted_access(void)
/**
* @brief Test work submission to work queue
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
@ -480,7 +654,8 @@ void test_work_resubmit_to_queue(void)
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(), k_work_submit()
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
void test_work_submit_to_queue_isr(void)
{
@ -496,7 +671,8 @@ void test_work_submit_to_queue_isr(void)
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(), k_work_submit()
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
void test_work_submit_thread(void)
{
@ -512,7 +688,8 @@ void test_work_submit_thread(void)
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(), k_work_submit()
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
void test_work_submit_isr(void)
{
@ -533,10 +710,18 @@ static void work_handler_resubmit(struct k_work *w)
}
/**
* @brief Test work submission to queue from handler context
*
* @brief Test work submission to queue from handler context, resubmitting
* a work item during execution of its callback
* @details
* - That test uses sync_sema semaphore with initial count 0.
* - That test verifies that it is possible during execution of the handler
* function, resubmit a work item from that handler function.
* - twork_submit_1() initializes a work item with handler function.
* - Then handler function gives a semaphore sync_sema.
* - Then in test main body using for() loop, we are waiting for that semaphore.
* - When semaphore obtained, handler function checks count of the semaphore
* (now it is again 0) and submits work one more time.
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
@ -861,13 +1046,50 @@ void test_triggered_work_cancel_isr(void)
}
}
void new_common_work_handler(struct k_work *unused)
{
k_sem_give(&sync_sema);
k_sem_take(&sema_fifo_two, K_FOREVER);
}
/**
* @brief Test cancel already processed work item
* @details That test is created to increase coverage and to check that we can
* cancel already processed delayed work item.
* @ingroup kernel_workqueue_tests
* @see k_delayed_work_cancel()
*/
void test_cancel_processed_work_item(void)
{
int ret;
k_sem_reset(&sync_sema);
k_sem_reset(&sema_fifo_two);
k_delayed_work_init(&work_item_delayed, common_work_handler);
ret = k_delayed_work_cancel(&work_item_delayed);
zassert_true(ret == -EINVAL, NULL);
k_delayed_work_submit_to_queue(&workq, &work_item_delayed, TIMEOUT);
k_sem_take(&sync_sema, K_FOREVER);
k_sem_give(&sema_fifo_two);
/**TESTPOINT: try to delay already processed work item*/
ret = k_delayed_work_cancel(&work_item_delayed);
zassert_true(ret == -EALREADY, NULL);
k_sleep(TIMEOUT);
}
void test_main(void)
{
main_thread = k_current_get();
k_thread_access_grant(main_thread, &sync_sema, &user_workq.thread,
&user_workq.queue,
&user_tstack);
k_sem_init(&sync_sema, 0, NUM_OF_WORK);
k_sem_init(&sync_sema, SYNC_SEM_INIT_VAL, NUM_OF_WORK);
k_sem_init(&sema_fifo_one, COM_SEM_MAX_VAL, COM_SEM_MAX_VAL);
k_sem_init(&sema_fifo_two, COM_SEM_INIT_VAL, COM_SEM_MAX_VAL);
k_thread_system_pool_assign(k_current_get());
ztest_test_suite(workqueue_api,
@ -876,8 +1098,7 @@ void test_main(void)
ztest_user_unit_test(test_user_workq_start_before_submit),
ztest_unit_test(test_user_workq_granted_access_setup),
ztest_user_unit_test(test_user_workq_granted_access),
/* End order-important tests */
/* End order-important tests */
ztest_1cpu_unit_test(test_work_submit_to_multipleq),
ztest_unit_test(test_work_resubmit_to_queue),
ztest_1cpu_unit_test(test_work_submit_to_queue_thread),
@ -902,6 +1123,11 @@ void test_main(void)
ztest_1cpu_unit_test(test_triggered_work_cancel_from_queue_thread),
ztest_1cpu_unit_test(test_triggered_work_cancel_from_queue_isr),
ztest_1cpu_unit_test(test_triggered_work_cancel_thread),
ztest_1cpu_unit_test(test_triggered_work_cancel_isr));
ztest_1cpu_unit_test(test_triggered_work_cancel_isr),
ztest_unit_test(test_work_item_supplied_with_func),
ztest_unit_test(test_process_work_items_fifo),
ztest_unit_test(test_sched_delayed_work_item),
ztest_unit_test(test_workqueue_max_number),
ztest_unit_test(test_cancel_processed_work_item));
ztest_run_test_suite(workqueue_api);
}