kernel: Introduce k_work_poll

This commit adds new k_work_poll interface. It allows to
submit given work to a workqueue automatically when one of the
watched pollable objects changes its state.

Signed-off-by: Piotr Zięcik <piotr.ziecik@nordicsemi.no>
Signed-off-by: Peter A. Bigot <pab@pabigot.com>
This commit is contained in:
Piotr Zięcik 2019-09-27 09:16:25 +02:00 committed by Carles Cufí
commit 19d8349aa5
9 changed files with 1042 additions and 53 deletions

View file

@ -122,6 +122,41 @@ queue, unless the work item has already been removed and processed by the
workqueue's thread. Consequently, once a work item's timeout has expired workqueue's thread. Consequently, once a work item's timeout has expired
the work item is always processed by the workqueue and cannot be canceled. the work item is always processed by the workqueue and cannot be canceled.
Triggered Work
**************
The :cpp:func:`k_work_poll_submit()` interface schedules a triggered work
item in response to a **poll event** (see :ref:`polling_v2`), that will
call a user-defined function when a monitored resource becomes available
or poll signal is raised, or a timeout occurs.
In contrast to :cpp:func:`k_poll()`, the triggered work does not require
a dedicated thread waiting or actively polling for a poll event.
A triggered work item is a standard work item that has the following
added properties:
* A pointer to an array of poll events that will trigger work item
submissions to the workqueue
* A size of the array containing poll events.
A triggered work item is initialized and submitted to a workqueue in a similar
manner to a standard work item, although dedicated kernel APIs are used.
When a submit request is made, the kernel begins observing kernel objects
specified by the poll events. Once at least one of the observed kernel
object's changes state, the work item is submitted to the specified workqueue,
where it remains pending until it is processed in the standard manner.
.. important::
The triggered work item as well as the referenced array of poll events
have to be valid and cannot be modified for a complete triggered work
item lifecycle, from submission to work item execution or cancellation.
An ISR or a thread may **cancel** a triggered work item it has submitted
as long as it is still waiting for a poll event. In such case, the kernel
stops waiting for attached poll events and the specified work is not executed.
Otherwise the cancellation cannot be performed.
System Workqueue System Workqueue
***************** *****************

View file

@ -2681,8 +2681,9 @@ __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data, s32_t timeo
/** @} */ /** @} */
struct k_work; struct k_work;
struct k_work_poll;
/* private, used by k_poll */ /* private, used by k_poll and k_work_poll */
typedef int (*_poller_cb_t)(struct k_poll_event *event, u32_t state); typedef int (*_poller_cb_t)(struct k_poll_event *event, u32_t state);
struct _poller { struct _poller {
volatile bool is_polling; volatile bool is_polling;
@ -2734,6 +2735,16 @@ struct k_delayed_work {
struct k_work_q *work_q; struct k_work_q *work_q;
}; };
struct k_work_poll {
struct k_work work;
struct _poller poller;
struct k_poll_event *events;
int num_events;
k_work_handler_t real_handler;
struct _timeout timeout;
int poll_result;
};
extern struct k_work_q k_sys_work_q; extern struct k_work_q k_sys_work_q;
/** /**
@ -3066,6 +3077,116 @@ static inline s32_t k_delayed_work_remaining_get(struct k_delayed_work *work)
return __ticks_to_ms(z_timeout_remaining(&work->timeout)); return __ticks_to_ms(z_timeout_remaining(&work->timeout));
} }
/**
* @brief Initialize a triggered work item.
*
* This routine initializes a workqueue triggered work item, prior to
* its first use.
*
* @param work Address of triggered work item.
* @param handler Function to invoke each time work item is processed.
*
* @return N/A
*/
extern void k_work_poll_init(struct k_work_poll *work,
k_work_handler_t handler);
/**
* @brief Submit a triggered work item.
*
* This routine schedules work item @a work to be processed by workqueue
* @a work_q when one of the given @a events is signaled. The routine
* initiates internal poller for the work item and then returns to the caller.
* Only when one of the watched events happen the work item is actually
* submitted to the workqueue and becomes pending.
*
* Submitting a previously submitted triggered work item that is still
* waiting for the event cancels the existing submission and reschedules it
* the using the new event list. Note that this behavior is inherently subject
* to race conditions with the pre-existig triggered work item and work queue,
* so care must be taken to synchronize such resubmissions externally.
*
* @note Can be called by ISRs.
*
* @warning
* Provided array of events as well as a triggered work item must be placed
* in persistent memory (valid until work handler execution or work
* cancellation) and cannot be modified after submission.
*
* @param work_q Address of workqueue.
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
* @param timeout Timeout after which the work will be scheduled for
* execution even if not triggered.
*
*
* @retval 0 Work item started watching for events.
* @retval -EINVAL Work item is being processed or has completed its work.
* @retval -EADDRINUSE Work item is pending on a different workqueue.
*/
extern int k_work_poll_submit_to_queue(struct k_work_q *work_q,
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout);
/**
* @brief Submit a triggered work item to the system workqueue.
*
* This routine schedules work item @a work to be processed by system
* workqueue when one of the given @a events is signaled. The routine
* initiates internal poller for the work item and then returns to the caller.
* Only when one of the watched events happen the work item is actually
* submitted to the workqueue and becomes pending.
*
* Submitting a previously submitted triggered work item that is still
* waiting for the event cancels the existing submission and reschedules it
* the using the new event list. Note that this behavior is inherently subject
* to race conditions with the pre-existig triggered work item and work queue,
* so care must be taken to synchronize such resubmissions externally.
*
* @note Can be called by ISRs.
*
* @warning
* Provided array of events as well as a triggered work item must not be
* modified until the item has been processed by the workqueue.
*
* @param work Address of delayed work item.
* @param events An array of pointers to events which trigger the work.
* @param num_events The number of events in the array.
* @param timeout Timeout after which the work will be scheduled for
* execution even if not triggered.
*
* @retval 0 Work item started watching for events.
* @retval -EINVAL Work item is being processed or has completed its work.
* @retval -EADDRINUSE Work item is pending on a different workqueue.
*/
static inline int k_work_poll_submit(struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout)
{
return k_work_poll_submit_to_queue(&k_sys_work_q, work,
events, num_events, timeout);
}
/**
* @brief Cancel a triggered work item.
*
* This routine cancels the submission of triggered work item @a work.
* A triggered work item can only be canceled if no event triggered work
* submission.
*
* @note Can be called by ISRs.
*
* @param work Address of delayed work item.
*
* @retval 0 Work tiem canceled.
* @retval -EINVAL Work item is being processed or has completed its work.
*/
extern int k_work_poll_cancel(struct k_work_poll *work);
/** @} */ /** @} */
/** /**
* @defgroup mutex_apis Mutex APIs * @defgroup mutex_apis Mutex APIs

View file

@ -252,7 +252,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
.thread = _current, .thread = _current,
.cb = k_poll_poller_cb }; .cb = k_poll_poller_cb };
__ASSERT(!z_is_in_isr(), ""); __ASSERT(!z_arch_is_in_isr(), "");
__ASSERT(events != NULL, "NULL events\n"); __ASSERT(events != NULL, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n"); __ASSERT(num_events > 0, "zero events\n");
@ -482,3 +482,211 @@ static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *signal)
#endif #endif
static void triggered_work_handler(struct k_work *work)
{
k_work_handler_t handler;
struct k_work_poll *twork =
CONTAINER_OF(work, struct k_work_poll, work);
/*
* If callback is not set, the k_work_poll_submit_to_queue()
* already cleared event registrations.
*/
if (twork->poller.cb != NULL) {
k_spinlock_key_t key;
key = k_spin_lock(&lock);
clear_event_registrations(twork->events,
twork->num_events, key);
k_spin_unlock(&lock, key);
}
/* Drop work ownership and execute real handler. */
handler = twork->real_handler;
twork->poller.thread = NULL;
handler(work);
}
static void triggered_work_expiration_handler(struct _timeout *timeout)
{
struct k_work_poll *twork =
CONTAINER_OF(timeout, struct k_work_poll, timeout);
struct k_work_q *work_q =
CONTAINER_OF(twork->poller.thread, struct k_work_q, thread);
twork->poller.is_polling = false;
twork->poll_result = -EAGAIN;
k_work_submit_to_queue(work_q, &twork->work);
}
static int triggered_work_poller_cb(struct k_poll_event *event, u32_t status)
{
struct _poller *poller = event->poller;
if (poller->is_polling && poller->thread) {
struct k_work_poll *twork =
CONTAINER_OF(poller, struct k_work_poll, poller);
struct k_work_q *work_q =
CONTAINER_OF(poller->thread, struct k_work_q, thread);
z_abort_timeout(&twork->timeout);
twork->poll_result = 0;
k_work_submit_to_queue(work_q, &twork->work);
}
return 0;
}
static int triggered_work_cancel(struct k_work_poll *work,
k_spinlock_key_t key)
{
/* Check if the work waits for event. */
if (work->poller.is_polling && work->poller.cb != NULL) {
/* Remove timeout associated with the work. */
z_abort_timeout(&work->timeout);
/*
* Prevent work execution if event arrives while we will be
* clearing registrations.
*/
work->poller.cb = NULL;
/* Clear registrations and work ownership. */
clear_event_registrations(work->events, work->num_events, key);
work->poller.thread = NULL;
return 0;
}
/*
* If we reached here, the work is either being registered in
* the k_work_poll_submit_to_queue(), executed or is pending.
* Only in the last case we have a chance to cancel it, but
* unfortunately there is no public API performing this task.
*/
return -EINVAL;
}
void k_work_poll_init(struct k_work_poll *work,
k_work_handler_t handler)
{
k_work_init(&work->work, triggered_work_handler);
work->events = NULL;
work->poller.thread = NULL;
work->real_handler = handler;
z_init_timeout(&work->timeout);
}
int k_work_poll_submit_to_queue(struct k_work_q *work_q,
struct k_work_poll *work,
struct k_poll_event *events,
int num_events,
s32_t timeout)
{
int events_registered;
k_spinlock_key_t key;
__ASSERT(work_q != NULL, "NULL work_q\n");
__ASSERT(work != NULL, "NULL work\n");
__ASSERT(events != NULL, "NULL events\n");
__ASSERT(num_events > 0, "zero events\n");
/* Take overship of the work if it is possible. */
key = k_spin_lock(&lock);
if (work->poller.thread != NULL) {
if (work->poller.thread == &work_q->thread) {
int retval;
retval = triggered_work_cancel(work, key);
if (retval < 0) {
k_spin_unlock(&lock, key);
return retval;
}
} else {
k_spin_unlock(&lock, key);
return -EADDRINUSE;
}
}
work->poller.is_polling = true;
work->poller.thread = &work_q->thread;
work->poller.cb = NULL;
k_spin_unlock(&lock, key);
/* Save list of events. */
work->events = events;
work->num_events = num_events;
/* Clear result */
work->poll_result = -EINPROGRESS;
/* Register events */
events_registered = register_events(events, num_events,
&work->poller, false);
key = k_spin_lock(&lock);
if (work->poller.is_polling && timeout != K_NO_WAIT) {
/*
* Poller is still polling.
* No event is ready and all are watched.
*/
__ASSERT(num_events == events_registered,
"Some events were not registered!\n");
/* Setup timeout if such action is requested */
if (timeout != K_FOREVER) {
z_add_timeout(&work->timeout,
triggered_work_expiration_handler,
z_ms_to_ticks(timeout));
}
/* From now, any event will result in submitted work. */
work->poller.cb = triggered_work_poller_cb;
k_spin_unlock(&lock, key);
return 0;
}
/*
* The K_NO_WAIT timeout was specified or at least one event was ready
* at registration time or changed state since registration. Hopefully,
* the poller->cb was not set, so work was not submitted to workqueue.
*/
/*
* If poller is still polling, no watched event occurred. This means
* we reached here due to K_NO_WAIT timeout "expiration".
*/
if (work->poller.is_polling) {
work->poller.is_polling = false;
work->poll_result = -EAGAIN;
} else {
work->poll_result = 0;
}
/* Clear registrations. */
clear_event_registrations(events, events_registered, key);
k_spin_unlock(&lock, key);
/* Submit work. */
k_work_submit_to_queue(work_q, &work->work);
return 0;
}
int k_work_poll_cancel(struct k_work_poll *work)
{
k_spinlock_key_t key;
int retval;
/* Check if the work was submitted. */
if (work == NULL || work->poller.thread == NULL) {
return -EINVAL;
}
key = k_spin_lock(&lock);
retval = triggered_work_cancel(work, key);
k_spin_unlock(&lock, key);
return retval;
}

View file

@ -1,5 +1,6 @@
CONFIG_ZTEST=y CONFIG_ZTEST=y
CONFIG_QEMU_TICKLESS_WORKAROUND=y CONFIG_QEMU_TICKLESS_WORKAROUND=y
CONFIG_POLL=y
# Not a single test case here is SMP-safe. Save the cycles needed for # Not a single test case here is SMP-safe. Save the cycles needed for
# all the ztest_1cpu spinning. # all the ztest_1cpu spinning.

View file

@ -23,8 +23,7 @@
* Wait 50ms between work submissions, to ensure co-op and prempt * Wait 50ms between work submissions, to ensure co-op and prempt
* preempt thread submit alternatively. * preempt thread submit alternatively.
*/ */
#define SUBMIT_WAIT 50 #define SUBMIT_WAIT 50
#define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACKSIZE) #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACKSIZE)
/* How long to wait for the full test suite to complete. Allow for a /* How long to wait for the full test suite to complete. Allow for a
@ -32,22 +31,33 @@
*/ */
#define CHECK_WAIT ((NUM_TEST_ITEMS + 1) * WORK_ITEM_WAIT_ALIGNED) #define CHECK_WAIT ((NUM_TEST_ITEMS + 1) * WORK_ITEM_WAIT_ALIGNED)
struct test_item { struct delayed_test_item {
int key; int key;
struct k_delayed_work work; struct k_delayed_work work;
}; };
struct triggered_test_item {
int key;
struct k_work_poll work;
struct k_poll_signal signal;
struct k_poll_event event;
};
static K_THREAD_STACK_DEFINE(co_op_stack, STACK_SIZE); static K_THREAD_STACK_DEFINE(co_op_stack, STACK_SIZE);
static struct k_thread co_op_data; static struct k_thread co_op_data;
static struct test_item tests[NUM_TEST_ITEMS]; static struct delayed_test_item delayed_tests[NUM_TEST_ITEMS];
static struct triggered_test_item triggered_tests[NUM_TEST_ITEMS];
static int results[NUM_TEST_ITEMS]; static int results[NUM_TEST_ITEMS];
static int num_results; static int num_results;
static int expected_poll_result;
static void work_handler(struct k_work *work) static void work_handler(struct k_work *work)
{ {
struct test_item *ti = CONTAINER_OF(work, struct test_item, work); struct delayed_test_item *ti =
CONTAINER_OF(work, struct delayed_test_item, work);
TC_PRINT(" - Running test item %d\n", ti->key); TC_PRINT(" - Running test item %d\n", ti->key);
k_sleep(WORK_ITEM_WAIT); k_sleep(WORK_ITEM_WAIT);
@ -56,16 +66,16 @@ static void work_handler(struct k_work *work)
} }
/** /**
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* @see k_work_init() * @see k_work_init()
*/ */
static void test_items_init(void) static void delayed_test_items_init(void)
{ {
int i; int i;
for (i = 0; i < NUM_TEST_ITEMS; i++) { for (i = 0; i < NUM_TEST_ITEMS; i++) {
tests[i].key = i + 1; delayed_tests[i].key = i + 1;
k_work_init(&tests[i].work.work, work_handler); k_work_init(&delayed_tests[i].work.work, work_handler);
} }
} }
@ -92,16 +102,16 @@ static void coop_work_main(int arg1, int arg2)
for (i = 1; i < NUM_TEST_ITEMS; i += 2) { for (i = 1; i < NUM_TEST_ITEMS; i += 2) {
TC_PRINT(" - Submitting work %d from coop thread\n", i + 1); TC_PRINT(" - Submitting work %d from coop thread\n", i + 1);
k_work_submit(&tests[i].work.work); k_work_submit(&delayed_tests[i].work.work);
k_sleep(SUBMIT_WAIT); k_sleep(SUBMIT_WAIT);
} }
} }
/** /**
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* @see k_work_submit() * @see k_work_submit()
*/ */
static void test_items_submit(void) static void delayed_test_items_submit(void)
{ {
int i; int i;
@ -111,7 +121,7 @@ static void test_items_submit(void)
for (i = 0; i < NUM_TEST_ITEMS; i += 2) { for (i = 0; i < NUM_TEST_ITEMS; i += 2) {
TC_PRINT(" - Submitting work %d from preempt thread\n", i + 1); TC_PRINT(" - Submitting work %d from preempt thread\n", i + 1);
k_work_submit(&tests[i].work.work); k_work_submit(&delayed_tests[i].work.work);
k_sleep(SUBMIT_WAIT); k_sleep(SUBMIT_WAIT);
} }
} }
@ -136,17 +146,17 @@ static void check_results(int num_tests)
/** /**
* @brief Test work queue items submission sequence * @brief Test work queue items submission sequence
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_work_init(), k_work_submit() * @see k_work_init(), k_work_submit()
*/ */
static void test_sequence(void) static void test_sequence(void)
{ {
TC_PRINT(" - Initializing test items\n"); TC_PRINT(" - Initializing test items\n");
test_items_init(); delayed_test_items_init();
TC_PRINT(" - Submitting test items\n"); TC_PRINT(" - Submitting test items\n");
test_items_submit(); delayed_test_items_submit();
TC_PRINT(" - Waiting for work to finish\n"); TC_PRINT(" - Waiting for work to finish\n");
k_sleep(CHECK_WAIT); k_sleep(CHECK_WAIT);
@ -159,7 +169,8 @@ static void test_sequence(void)
static void resubmit_work_handler(struct k_work *work) static void resubmit_work_handler(struct k_work *work)
{ {
struct test_item *ti = CONTAINER_OF(work, struct test_item, work); struct delayed_test_item *ti =
CONTAINER_OF(work, struct delayed_test_item, work);
k_sleep(WORK_ITEM_WAIT); k_sleep(WORK_ITEM_WAIT);
@ -174,7 +185,7 @@ static void resubmit_work_handler(struct k_work *work)
/** /**
* @brief Test work queue item resubmission * @brief Test work queue item resubmission
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_work_submit() * @see k_work_submit()
*/ */
@ -182,11 +193,11 @@ static void test_resubmit(void)
{ {
TC_PRINT("Starting resubmit test\n"); TC_PRINT("Starting resubmit test\n");
tests[0].key = 1; delayed_tests[0].key = 1;
tests[0].work.work.handler = resubmit_work_handler; delayed_tests[0].work.work.handler = resubmit_work_handler;
TC_PRINT(" - Submitting work\n"); TC_PRINT(" - Submitting work\n");
k_work_submit(&tests[0].work.work); k_work_submit(&delayed_tests[0].work.work);
TC_PRINT(" - Waiting for work to finish\n"); TC_PRINT(" - Waiting for work to finish\n");
k_sleep(CHECK_WAIT); k_sleep(CHECK_WAIT);
@ -198,7 +209,8 @@ static void test_resubmit(void)
static void delayed_work_handler(struct k_work *work) static void delayed_work_handler(struct k_work *work)
{ {
struct test_item *ti = CONTAINER_OF(work, struct test_item, work); struct delayed_test_item *ti =
CONTAINER_OF(work, struct delayed_test_item, work);
TC_PRINT(" - Running delayed test item %d\n", ti->key); TC_PRINT(" - Running delayed test item %d\n", ti->key);
@ -208,7 +220,7 @@ static void delayed_work_handler(struct k_work *work)
/** /**
* @brief Test delayed work queue init * @brief Test delayed work queue init
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init() * @see k_delayed_work_init()
*/ */
@ -217,8 +229,9 @@ static void test_delayed_init(void)
int i; int i;
for (i = 0; i < NUM_TEST_ITEMS; i++) { for (i = 0; i < NUM_TEST_ITEMS; i++) {
tests[i].key = i + 1; delayed_tests[i].key = i + 1;
k_delayed_work_init(&tests[i].work, delayed_work_handler); k_delayed_work_init(&delayed_tests[i].work,
delayed_work_handler);
} }
} }
@ -235,7 +248,7 @@ static void coop_delayed_work_main(int arg1, int arg2)
for (i = 1; i < NUM_TEST_ITEMS; i += 2) { for (i = 1; i < NUM_TEST_ITEMS; i += 2) {
TC_PRINT(" - Submitting delayed work %d from" TC_PRINT(" - Submitting delayed work %d from"
" coop thread\n", i + 1); " coop thread\n", i + 1);
k_delayed_work_submit(&tests[i].work, k_delayed_work_submit(&delayed_tests[i].work,
(i + 1) * WORK_ITEM_WAIT); (i + 1) * WORK_ITEM_WAIT);
} }
} }
@ -243,7 +256,7 @@ static void coop_delayed_work_main(int arg1, int arg2)
/** /**
* @brief Test delayed workqueue submit * @brief Test delayed workqueue submit
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init(), k_delayed_work_submit() * @see k_delayed_work_init(), k_delayed_work_submit()
*/ */
@ -258,7 +271,7 @@ static void test_delayed_submit(void)
for (i = 0; i < NUM_TEST_ITEMS; i += 2) { for (i = 0; i < NUM_TEST_ITEMS; i += 2) {
TC_PRINT(" - Submitting delayed work %d from" TC_PRINT(" - Submitting delayed work %d from"
" preempt thread\n", i + 1); " preempt thread\n", i + 1);
zassert_true(k_delayed_work_submit(&tests[i].work, zassert_true(k_delayed_work_submit(&delayed_tests[i].work,
(i + 1) * WORK_ITEM_WAIT) == 0, NULL); (i + 1) * WORK_ITEM_WAIT) == 0, NULL);
} }
@ -269,23 +282,24 @@ static void coop_delayed_work_cancel_main(int arg1, int arg2)
ARG_UNUSED(arg1); ARG_UNUSED(arg1);
ARG_UNUSED(arg2); ARG_UNUSED(arg2);
k_delayed_work_submit(&tests[1].work, WORK_ITEM_WAIT); k_delayed_work_submit(&delayed_tests[1].work, WORK_ITEM_WAIT);
TC_PRINT(" - Cancel delayed work from coop thread\n"); TC_PRINT(" - Cancel delayed work from coop thread\n");
k_delayed_work_cancel(&tests[1].work); k_delayed_work_cancel(&delayed_tests[1].work);
#if defined(CONFIG_POLL) #if defined(CONFIG_POLL)
k_delayed_work_submit(&tests[2].work, 0 /* Submit immediately */); k_delayed_work_submit(&delayed_tests[2].work,
0 /* Submit immediately */);
TC_PRINT(" - Cancel pending delayed work from coop thread\n"); TC_PRINT(" - Cancel pending delayed work from coop thread\n");
k_delayed_work_cancel(&tests[2].work); k_delayed_work_cancel(&delayed_tests[2].work);
#endif #endif
} }
/** /**
* @brief Test work queue delayed cancel * @brief Test work queue delayed cancel
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init(), k_delayed_work_submit(), * @see k_delayed_work_init(), k_delayed_work_submit(),
* k_delayed_work_cancel() * k_delayed_work_cancel()
@ -294,10 +308,10 @@ static void test_delayed_cancel(void)
{ {
TC_PRINT("Starting delayed cancel test\n"); TC_PRINT("Starting delayed cancel test\n");
k_delayed_work_submit(&tests[0].work, WORK_ITEM_WAIT); k_delayed_work_submit(&delayed_tests[0].work, WORK_ITEM_WAIT);
TC_PRINT(" - Cancel delayed work from preempt thread\n"); TC_PRINT(" - Cancel delayed work from preempt thread\n");
k_delayed_work_cancel(&tests[0].work); k_delayed_work_cancel(&delayed_tests[0].work);
k_thread_create(&co_op_data, co_op_stack, STACK_SIZE, k_thread_create(&co_op_data, co_op_stack, STACK_SIZE,
(k_thread_entry_t)coop_delayed_work_cancel_main, (k_thread_entry_t)coop_delayed_work_cancel_main,
@ -312,7 +326,8 @@ static void test_delayed_cancel(void)
static void delayed_resubmit_work_handler(struct k_work *work) static void delayed_resubmit_work_handler(struct k_work *work)
{ {
struct test_item *ti = CONTAINER_OF(work, struct test_item, work); struct delayed_test_item *ti =
CONTAINER_OF(work, struct delayed_test_item, work);
results[num_results++] = ti->key; results[num_results++] = ti->key;
@ -326,7 +341,7 @@ static void delayed_resubmit_work_handler(struct k_work *work)
/** /**
* @brief Test delayed resubmission of work queue item * @brief Test delayed resubmission of work queue item
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init(), k_delayed_work_submit() * @see k_delayed_work_init(), k_delayed_work_submit()
*/ */
@ -334,11 +349,12 @@ static void test_delayed_resubmit(void)
{ {
TC_PRINT("Starting delayed resubmit test\n"); TC_PRINT("Starting delayed resubmit test\n");
tests[0].key = 1; delayed_tests[0].key = 1;
k_delayed_work_init(&tests[0].work, delayed_resubmit_work_handler); k_delayed_work_init(&delayed_tests[0].work,
delayed_resubmit_work_handler);
TC_PRINT(" - Submitting delayed work\n"); TC_PRINT(" - Submitting delayed work\n");
k_delayed_work_submit(&tests[0].work, WORK_ITEM_WAIT); k_delayed_work_submit(&delayed_tests[0].work, WORK_ITEM_WAIT);
TC_PRINT(" - Waiting for work to finish\n"); TC_PRINT(" - Waiting for work to finish\n");
k_sleep(CHECK_WAIT); k_sleep(CHECK_WAIT);
@ -354,7 +370,7 @@ static void coop_delayed_work_resubmit(void)
for (i = 0; i < NUM_TEST_ITEMS; i++) { for (i = 0; i < NUM_TEST_ITEMS; i++) {
TC_PRINT(" - Resubmitting delayed work with 1 ms\n"); TC_PRINT(" - Resubmitting delayed work with 1 ms\n");
k_delayed_work_submit(&tests[0].work, 1); k_delayed_work_submit(&delayed_tests[0].work, 1);
/* Busy wait 1 ms to force a clash with workqueue */ /* Busy wait 1 ms to force a clash with workqueue */
#if defined(CONFIG_ARCH_POSIX) #if defined(CONFIG_ARCH_POSIX)
@ -372,7 +388,7 @@ static void coop_delayed_work_resubmit(void)
/** /**
* @brief Test delayed resubmission of work queue thread * @brief Test delayed resubmission of work queue thread
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init() * @see k_delayed_work_init()
*/ */
@ -380,8 +396,8 @@ static void test_delayed_resubmit_thread(void)
{ {
TC_PRINT("Starting delayed resubmit from coop thread test\n"); TC_PRINT("Starting delayed resubmit from coop thread test\n");
tests[0].key = 1; delayed_tests[0].key = 1;
k_delayed_work_init(&tests[0].work, delayed_work_handler); k_delayed_work_init(&delayed_tests[0].work, delayed_work_handler);
k_thread_create(&co_op_data, co_op_stack, STACK_SIZE, k_thread_create(&co_op_data, co_op_stack, STACK_SIZE,
(k_thread_entry_t)coop_delayed_work_resubmit, (k_thread_entry_t)coop_delayed_work_resubmit,
@ -398,7 +414,7 @@ static void test_delayed_resubmit_thread(void)
/** /**
* @brief Test delayed work items * @brief Test delayed work items
* *
* @ingroup kernel_workqueue_tests * @ingroup kernel_workqueue_delayed_tests
* *
* @see k_delayed_work_init(), k_delayed_work_submit() * @see k_delayed_work_init(), k_delayed_work_submit()
*/ */
@ -420,6 +436,325 @@ static void test_delayed(void)
reset_results(); reset_results();
} }
static void triggered_work_handler(struct k_work *work)
{
struct triggered_test_item *ti =
CONTAINER_OF(work, struct triggered_test_item, work);
TC_PRINT(" - Running triggered test item %d\n", ti->key);
zassert_equal(ti->work.poll_result, expected_poll_result,
"res %d expect %d", ti->work.poll_result, expected_poll_result);
results[num_results++] = ti->key;
}
/**
* @brief Test triggered work queue init
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init()
*/
static void test_triggered_init(void)
{
int i;
for (i = 0; i < NUM_TEST_ITEMS; i++) {
triggered_tests[i].key = i + 1;
k_work_poll_init(&triggered_tests[i].work,
triggered_work_handler);
k_poll_signal_init(&triggered_tests[i].signal);
k_poll_event_init(&triggered_tests[i].event,
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&triggered_tests[i].signal);
}
}
/**
* @brief Test triggered workqueue submit
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_submit(s32_t timeout)
{
int i;
for (i = 0; i < NUM_TEST_ITEMS; i++) {
TC_PRINT(" - Submitting triggered work %d\n", i + 1);
zassert_true(k_work_poll_submit(&triggered_tests[i].work,
&triggered_tests[i].event,
1, timeout) == 0, NULL);
}
}
/**
* @brief Trigger triggered workqueue execution
*
* @ingroup kernel_workqueue_triggered_tests
*/
static void test_triggered_trigger(void)
{
int i;
for (i = 0; i < NUM_TEST_ITEMS; i++) {
TC_PRINT(" - Triggering work %d\n", i + 1);
zassert_true(k_poll_signal_raise(&triggered_tests[i].signal,
1) == 0, NULL);
}
}
/**
* @brief Test triggered work items
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are triggered, they should indicate an event. */
expected_poll_result = 0;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(K_FOREVER);
TC_PRINT(" - Triggering test items execution\n");
test_triggered_trigger();
/* Items should be executed when we will be sleeping. */
k_sleep(WORK_ITEM_WAIT);
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/**
* @brief Test already triggered work items
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_already_triggered(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are triggered, they should indicate an event. */
expected_poll_result = 0;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Triggering test items execution\n");
test_triggered_trigger();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(K_FOREVER);
/* Items should be executed when we will be sleeping. */
k_sleep(WORK_ITEM_WAIT);
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
static void triggered_resubmit_work_handler(struct k_work *work)
{
struct triggered_test_item *ti =
CONTAINER_OF(work, struct triggered_test_item, work);
results[num_results++] = ti->key;
if (ti->key < NUM_TEST_ITEMS) {
ti->key++;
TC_PRINT(" - Resubmitting triggered work\n");
k_poll_signal_reset(&triggered_tests[0].signal);
zassert_true(k_work_poll_submit(&triggered_tests[0].work,
&triggered_tests[0].event,
1, K_FOREVER) == 0, NULL);
}
}
/**
* @brief Test resubmission of triggered work queue item
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_resubmit(void)
{
int i;
TC_PRINT("Starting triggered resubmit test\n");
/* As work items are triggered, they should indicate an event. */
expected_poll_result = 0;
triggered_tests[0].key = 1;
k_work_poll_init(&triggered_tests[0].work,
triggered_resubmit_work_handler);
k_poll_signal_init(&triggered_tests[0].signal);
k_poll_event_init(&triggered_tests[0].event,
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&triggered_tests[0].signal);
TC_PRINT(" - Submitting triggered work\n");
zassert_true(k_work_poll_submit(&triggered_tests[0].work,
&triggered_tests[0].event,
1, K_FOREVER) == 0, NULL);
for (i = 0; i < NUM_TEST_ITEMS; i++) {
TC_PRINT(" - Triggering test item execution (iteration: %d)\n",
i + 1);
zassert_true(k_poll_signal_raise(&triggered_tests[0].signal,
1) == 0, NULL);
k_sleep(WORK_ITEM_WAIT);
}
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/**
* @brief Test triggered work items with K_NO_WAIT timeout
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_no_wait(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are triggered, they should indicate an event. */
expected_poll_result = 0;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Triggering test items execution\n");
test_triggered_trigger();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(K_NO_WAIT);
/* Items should be executed when we will be sleeping. */
k_sleep(WORK_ITEM_WAIT);
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/**
* @brief Test expired triggered work items with K_NO_WAIT timeout
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_no_wait_expired(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are not triggered, they should be marked as expired. */
expected_poll_result = -EAGAIN;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(K_NO_WAIT);
/* Items should be executed when we will be sleeping. */
k_sleep(WORK_ITEM_WAIT);
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/**
* @brief Test triggered work items with arbitrary timeout
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_wait(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are triggered, they should indicate an event. */
expected_poll_result = 0;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Triggering test items execution\n");
test_triggered_trigger();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(2 * SUBMIT_WAIT);
/* Items should be executed when we will be sleeping. */
k_sleep(SUBMIT_WAIT);
TC_PRINT(" - Checking results\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/**
* @brief Test expired triggered work items with arbitrary timeout
*
* @ingroup kernel_workqueue_triggered_tests
*
* @see k_work_poll_init(), k_work_poll_submit()
*/
static void test_triggered_wait_expired(void)
{
TC_PRINT("Starting triggered test\n");
/* As work items are not triggered, they should time out. */
expected_poll_result = -EAGAIN;
TC_PRINT(" - Initializing triggered test items\n");
test_triggered_init();
TC_PRINT(" - Submitting triggered test items\n");
test_triggered_submit(2 * SUBMIT_WAIT);
/* Items should not be executed when we will be sleeping here. */
k_sleep(SUBMIT_WAIT);
TC_PRINT(" - Checking results (before timeout)\n");
check_results(0);
/* Items should be executed when we will be sleeping here. */
k_sleep(SUBMIT_WAIT);
TC_PRINT(" - Checking results (after timeout)\n");
check_results(NUM_TEST_ITEMS);
reset_results();
}
/*test case main entry*/ /*test case main entry*/
void test_main(void) void test_main(void)
{ {
@ -430,7 +765,14 @@ void test_main(void)
ztest_1cpu_unit_test(test_delayed), ztest_1cpu_unit_test(test_delayed),
ztest_1cpu_unit_test(test_delayed_resubmit), ztest_1cpu_unit_test(test_delayed_resubmit),
ztest_1cpu_unit_test(test_delayed_resubmit_thread), ztest_1cpu_unit_test(test_delayed_resubmit_thread),
ztest_1cpu_unit_test(test_delayed_cancel) ztest_1cpu_unit_test(test_delayed_cancel),
ztest_1cpu_unit_test(test_triggered),
ztest_1cpu_unit_test(test_already_triggered),
ztest_1cpu_unit_test(test_triggered_resubmit),
ztest_1cpu_unit_test(test_triggered_no_wait),
ztest_1cpu_unit_test(test_triggered_no_wait_expired),
ztest_1cpu_unit_test(test_triggered_wait),
ztest_1cpu_unit_test(test_triggered_wait_expired)
); );
ztest_run_test_suite(workqueue); ztest_run_test_suite(workqueue);
} }

View file

@ -1,7 +1,4 @@
tests: tests:
kernel.workqueue: kernel.workqueue:
min_flash: 34
tags: kernel tags: kernel
kernel.workqueue.poll:
tags: kernel
extra_configs:
- CONFIG_POLL=y

View file

@ -3,4 +3,5 @@ CONFIG_IRQ_OFFLOAD=y
CONFIG_HEAP_MEM_POOL_SIZE=1024 CONFIG_HEAP_MEM_POOL_SIZE=1024
CONFIG_THREAD_NAME=y CONFIG_THREAD_NAME=y
CONFIG_TEST_USERSPACE=y CONFIG_TEST_USERSPACE=y
CONFIG_POLL=y
CONFIG_MP_NUM_CPUS=1 CONFIG_MP_NUM_CPUS=1

View file

@ -26,6 +26,12 @@ static struct k_work_q user_workq;
static ZTEST_BMEM struct k_work work[NUM_OF_WORK]; static ZTEST_BMEM struct k_work work[NUM_OF_WORK];
static struct k_delayed_work new_work; static struct k_delayed_work new_work;
static struct k_delayed_work delayed_work[NUM_OF_WORK], delayed_work_sleepy; static struct k_delayed_work delayed_work[NUM_OF_WORK], delayed_work_sleepy;
static struct k_work_poll triggered_work[NUM_OF_WORK];
static struct k_poll_event triggered_work_event[NUM_OF_WORK];
static struct k_poll_signal triggered_work_signal[NUM_OF_WORK];
static struct k_work_poll triggered_work_sleepy;
static struct k_poll_event triggered_work_sleepy_event;
static struct k_poll_signal triggered_work_sleepy_signal;
static struct k_sem sync_sema; static struct k_sem sync_sema;
static struct k_sem dummy_sema; static struct k_sem dummy_sema;
static struct k_thread *main_thread; static struct k_thread *main_thread;
@ -204,6 +210,136 @@ static void tdelayed_work_cancel(void *data)
/*work items not cancelled: delayed_work[1], delayed_work_sleepy*/ /*work items not cancelled: delayed_work[1], delayed_work_sleepy*/
} }
static void ttriggered_work_submit(void *data)
{
struct k_work_q *work_q = (struct k_work_q *)data;
for (int i = 0; i < NUM_OF_WORK; i++) {
k_poll_signal_init(&triggered_work_signal[i]);
k_poll_event_init(&triggered_work_event[i],
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&triggered_work_signal[i]);
/**TESTPOINT: init via k_work_poll_init*/
k_work_poll_init(&triggered_work[i], work_handler);
/**TESTPOINT: check pending after triggered work init*/
zassert_false(k_work_pending(
(struct k_work *)&triggered_work[i]), NULL);
if (work_q) {
/**TESTPOINT: triggered work submit to queue*/
zassert_true(k_work_poll_submit_to_queue(work_q,
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
} else {
/**TESTPOINT: triggered work submit to system queue*/
zassert_true(k_work_poll_submit(
&triggered_work[i],
&triggered_work_event[i], 1,
K_FOREVER) == 0, NULL);
}
/**TESTPOINT: check pending after triggered work submit*/
zassert_true(k_work_pending(
(struct k_work *)&triggered_work[i]) == 0, NULL);
}
for (int i = 0; i < NUM_OF_WORK; i++) {
/**TESTPOINT: trigger work execution*/
zassert_true(k_poll_signal_raise(&triggered_work_signal[i], 1)
== 0, NULL);
/**TESTPOINT: check pending after sending signal */
zassert_true(k_work_pending(
(struct k_work *)&triggered_work[i]) != 0, NULL);
}
}
static void ttriggered_work_cancel(void *data)
{
struct k_work_q *work_q = (struct k_work_q *)data;
int ret;
for (int i = 0; i < NUM_OF_WORK; i++) {
k_poll_signal_init(&triggered_work_signal[i]);
k_poll_event_init(&triggered_work_event[i],
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&triggered_work_signal[i]);
k_work_poll_init(&triggered_work[i], work_handler);
}
k_poll_signal_init(&triggered_work_sleepy_signal);
k_poll_event_init(&triggered_work_sleepy_event,
K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&triggered_work_sleepy_signal);
k_work_poll_init(&triggered_work_sleepy, work_sleepy);
if (work_q) {
ret = k_work_poll_submit_to_queue(work_q,
&triggered_work_sleepy, &triggered_work_sleepy_event, 1,
K_FOREVER);
ret |= k_work_poll_submit_to_queue(work_q,
&triggered_work[0], &triggered_work_event[0], 1,
K_FOREVER);
ret |= k_work_poll_submit_to_queue(work_q,
&triggered_work[1], &triggered_work_event[1], 1,
K_FOREVER);
} else {
ret = k_work_poll_submit(&triggered_work_sleepy,
&triggered_work_sleepy_event, 1, K_FOREVER);
ret |= k_work_poll_submit(&triggered_work[0],
&triggered_work_event[0], 1, K_FOREVER);
ret |= k_work_poll_submit(&triggered_work[1],
&triggered_work_event[1], 1, K_FOREVER);
}
/* Check if all submission succeeded */
zassert_true(ret == 0, NULL);
/**TESTPOINT: triggered work cancel when waiting for event*/
ret = k_work_poll_cancel(&triggered_work[0]);
zassert_true(ret == 0, NULL);
/**TESTPOINT: check pending after triggerd work cancel*/
ret = k_work_pending((struct k_work *)&triggered_work[0]);
zassert_true(ret == 0, NULL);
/* Trigger work #1 */
ret = k_poll_signal_raise(&triggered_work_signal[1], 1);
zassert_true(ret == 0, NULL);
/**TESTPOINT: check pending after sending signal */
ret = k_work_pending((struct k_work *)&triggered_work[1]);
zassert_true(ret != 0, NULL);
/**TESTPOINT: triggered work cancel when pending for event*/
ret = k_work_poll_cancel(&triggered_work[1]);
zassert_true(ret == -EINVAL, NULL);
/* Trigger sleepy work */
ret = k_poll_signal_raise(&triggered_work_sleepy_signal, 1);
zassert_true(ret == 0, NULL);
if (!k_is_in_isr()) {
/*wait for completed work_sleepy and triggered_work[1]*/
k_sleep(2 * TIMEOUT);
/**TESTPOINT: check pending when work completed*/
ret = k_work_pending((struct k_work *)&triggered_work_sleepy);
zassert_true(ret == 0, NULL);
/**TESTPOINT: delayed work cancel when completed*/
ret = k_work_poll_cancel(&triggered_work_sleepy);
zassert_true(ret == -EINVAL, NULL);
}
/*work items not cancelled: triggered_work[1], triggered_work_sleepy*/
}
/*test cases*/ /*test cases*/
/** /**
* @brief Test work queue start before submit * @brief Test work queue start before submit
@ -510,6 +646,145 @@ void test_delayed_work_cancel_isr(void)
} }
} }
/**
* @brief Test triggered work submission to queue
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_init(), k_work_pending(),
* k_work_poll_submit_to_queue(),
* k_work_poll_submit()
*/
void test_triggered_work_submit_to_queue_thread(void)
{
k_sem_reset(&sync_sema);
ttriggered_work_submit(&workq);
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work submission to queue in ISR context
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_init(), k_work_pending(),
* k_work_poll_submit_to_queue(),
* k_work_poll_submit()
*/
void test_triggered_work_submit_to_queue_isr(void)
{
k_sem_reset(&sync_sema);
irq_offload(ttriggered_work_submit, (void *)&workq);
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work submission
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_init(), k_work_pending(),
* k_work_poll_submit_to_queue(),
* k_work_poll_submit()
*/
void test_triggered_work_submit_thread(void)
{
k_sem_reset(&sync_sema);
ttriggered_work_submit(NULL);
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work submission from ISR context
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_init(), k_work_pending(),
* k_work_poll_submit_to_queue(),
* k_work_poll_submit()
*/
void test_triggered_work_submit_isr(void)
{
k_sem_reset(&sync_sema);
irq_offload(ttriggered_work_submit, NULL);
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work cancel from work queue
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_cancel(), k_work_pending()
*/
void test_triggered_work_cancel_from_queue_thread(void)
{
k_sem_reset(&sync_sema);
ttriggered_work_cancel(&workq);
/*wait for work items that could not be cancelled*/
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work cancel from work queue from ISR context
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_cancel(), k_work_pending()
*/
void test_triggered_work_cancel_from_queue_isr(void)
{
k_sem_reset(&sync_sema);
irq_offload(ttriggered_work_cancel, &workq);
/*wait for work items that could not be cancelled*/
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work cancel
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_cancel(), k_work_pending()
*/
void test_triggered_work_cancel_thread(void)
{
k_sem_reset(&sync_sema);
ttriggered_work_cancel(NULL);
/*wait for work items that could not be cancelled*/
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
/**
* @brief Test triggered work cancel from ISR context
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_poll_cancel(), k_work_pending()
*/
void test_triggered_work_cancel_isr(void)
{
k_sem_reset(&sync_sema);
irq_offload(ttriggered_work_cancel, NULL);
/*wait for work items that could not be cancelled*/
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
void test_main(void) void test_main(void)
{ {
@ -542,6 +817,14 @@ void test_main(void)
ztest_1cpu_unit_test(test_delayed_work_cancel_from_queue_thread), ztest_1cpu_unit_test(test_delayed_work_cancel_from_queue_thread),
ztest_1cpu_unit_test(test_delayed_work_cancel_from_queue_isr), ztest_1cpu_unit_test(test_delayed_work_cancel_from_queue_isr),
ztest_1cpu_unit_test(test_delayed_work_cancel_thread), ztest_1cpu_unit_test(test_delayed_work_cancel_thread),
ztest_1cpu_unit_test(test_delayed_work_cancel_isr)); ztest_1cpu_unit_test(test_delayed_work_cancel_isr),
ztest_1cpu_unit_test(test_triggered_work_submit_to_queue_thread),
ztest_1cpu_unit_test(test_triggered_work_submit_to_queue_isr),
ztest_1cpu_unit_test(test_triggered_work_submit_thread),
ztest_1cpu_unit_test(test_triggered_work_submit_isr),
ztest_1cpu_unit_test(test_triggered_work_cancel_from_queue_thread),
ztest_1cpu_unit_test(test_triggered_work_cancel_from_queue_isr),
ztest_1cpu_unit_test(test_triggered_work_cancel_thread),
ztest_1cpu_unit_test(test_triggered_work_cancel_isr));
ztest_run_test_suite(workqueue_api); ztest_run_test_suite(workqueue_api);
} }

View file

@ -1,3 +1,4 @@
tests: tests:
kernel.workqueue: kernel.workqueue:
min_flash: 34
tags: kernel userspace tags: kernel userspace