kernel: provide functional equivalent to old userspace work queue API

The new API cannot be used from userspace because it is not merely a
wrapper around existing userspace-capable objects (threads and
queues), but instead requires much more complex and lower-level access
to memory that can't be touched from userspace.  The vast majority of
work queue users are operating from privileged mode, so there's little
motivation to go through the pain and complexity of converting all
functions to system calls.

Copy the necessary pieces of the existing userspace work queue API out
and expose them with new names and types:

* k_work_handler_t becomes k_work_user_handler_t
* k_work becomes k_work_user
* k_work_q becomes k_work_user_q

etc.  Because the replacement API cannot use the same types new API
names are also introduced to make it more clear that the userspace
work queue API is a separate functionality.

Signed-off-by: Peter Bigot <peter.bigot@nordicsemi.no>
This commit is contained in:
Peter Bigot 2021-01-15 10:52:38 -06:00 committed by Anas Nashif
commit 4e3b92609b
8 changed files with 472 additions and 10 deletions

View file

@ -4202,6 +4202,173 @@ static inline k_ticks_t k_delayed_work_remaining_ticks(
/** @} */
struct k_work_user;
/**
* @addtogroup thread_apis
* @{
*/
/**
* @typedef k_work_user_handler_t
* @brief Work item handler function type for user work queues.
*
* A work item's handler function is executed by a user workqueue's thread
* when the work item is processed by the workqueue.
*
* @param work Address of the work item.
*
* @return N/A
*/
typedef void (*k_work_user_handler_t)(struct k_work_user *work);
/**
* @cond INTERNAL_HIDDEN
*/
struct k_work_user_q {
struct k_queue queue;
struct k_thread thread;
};
enum {
K_WORK_USER_STATE_PENDING, /* Work item pending state */
};
struct k_work_user {
void *_reserved; /* Used by k_queue implementation. */
k_work_user_handler_t handler;
atomic_t flags;
};
/**
* INTERNAL_HIDDEN @endcond
*/
#define Z_WORK_USER_INITIALIZER(work_handler) \
{ \
.handler = work_handler, \
}
/**
* @brief Initialize a statically-defined user work item.
*
* This macro can be used to initialize a statically-defined user work
* item, prior to its first use. For example,
*
* @code static K_WORK_USER_DEFINE(<work>, <work_handler>); @endcode
*
* @param work Symbol name for work item object
* @param work_handler Function to invoke each time work item is processed.
*/
#define K_WORK_USER_DEFINE(work, work_handler) \
struct k_work_user work = Z_WORK_USER_INITIALIZER(work_handler)
/**
* @brief Initialize a userspace work item.
*
* This routine initializes a user workqueue work item, prior to its
* first use.
*
* @param work Address of work item.
* @param handler Function to invoke each time work item is processed.
*
* @return N/A
*/
static inline void k_work_user_init(struct k_work_user *work,
k_work_user_handler_t handler)
{
*work = (struct k_work_user)Z_WORK_USER_INITIALIZER(handler);
}
/**
* @brief Check if a userspace work item is pending.
*
* This routine indicates if user work item @a work is pending in a workqueue's
* queue.
*
* @note Checking if the work is pending gives no guarantee that the
* work will still be pending when this information is used. It is up to
* the caller to make sure that this information is used in a safe manner.
*
* @note Can be called by ISRs.
*
* @param work Address of work item.
*
* @return true if work item is pending, or false if it is not pending.
*/
static inline bool k_work_user_is_pending(struct k_work_user *work)
{
return atomic_test_bit(&work->flags, K_WORK_USER_STATE_PENDING);
}
/**
* @brief Submit a work item to a user mode workqueue
*
* Submits a work item to a workqueue that runs in user mode. A temporary
* memory allocation is made from the caller's resource pool which is freed
* once the worker thread consumes the k_work item. The workqueue
* thread must have memory access to the k_work item being submitted. The caller
* must have permission granted on the work_q parameter's queue object.
*
* @note Can be called by ISRs.
*
* @param work_q Address of workqueue.
* @param work Address of work item.
*
* @retval -EBUSY if the work item was already in some workqueue
* @retval -ENOMEM if no memory for thread resource pool allocation
* @retval 0 Success
*/
static inline int k_work_user_submit_to_queue(struct k_work_user_q *work_q,
struct k_work_user *work)
{
int ret = -EBUSY;
if (!atomic_test_and_set_bit(&work->flags,
K_WORK_USER_STATE_PENDING)) {
ret = k_queue_alloc_append(&work_q->queue, work);
/* Couldn't insert into the queue. Clear the pending bit
* so the work item can be submitted again
*/
if (ret != 0) {
atomic_clear_bit(&work->flags,
K_WORK_USER_STATE_PENDING);
}
}
return ret;
}
/**
* @brief Start a workqueue in user mode
*
* This works identically to k_work_queue_start() except it is callable from
* user mode, and the worker thread created will run in user mode. The caller
* must have permissions granted on both the work_q parameter's thread and
* queue objects, and the same restrictions on priority apply as
* k_thread_create().
*
* @param work_q Address of workqueue.
* @param stack Pointer to work queue thread's stack space, as defined by
* K_THREAD_STACK_DEFINE()
* @param stack_size Size of the work queue thread's stack (in bytes), which
* should either be the same constant passed to
* K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
* @param prio Priority of the work queue's thread.
* @param name optional thread name. If not null a copy is made into the
* thread's name buffer.
*
* @return N/A
*/
extern void k_work_user_queue_start(struct k_work_user_q *work_q,
k_thread_stack_t *stack,
size_t stack_size, int prio,
const char *name);
/** @} */
#endif /* !CONFIG_KERNEL_WORK1 */
/**

View file

@ -24,6 +24,9 @@ zephyr_sources(
)
zephyr_sources_ifdef(CONFIG_KERNEL_WORK1 work_q.c)
if(${CONFIG_USERSPACE})
zephyr_sources_ifdef(CONFIG_KERNEL_WORK2 user_work.c)
endif()
zephyr_sources_ifdef(CONFIG_CBPRINTF_COMPLETE cbprintf_complete.c)
zephyr_sources_ifdef(CONFIG_CBPRINTF_NANO cbprintf_nano.c)

59
lib/os/user_work.c Normal file
View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2018 Intel Corporation
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
static void z_work_user_q_main(void *work_q_ptr, void *p2, void *p3)
{
struct k_work_user_q *work_q = work_q_ptr;
ARG_UNUSED(p2);
ARG_UNUSED(p3);
while (true) {
struct k_work_user *work;
k_work_user_handler_t handler;
work = k_queue_get(&work_q->queue, K_FOREVER);
if (work == NULL) {
continue;
}
handler = work->handler;
__ASSERT(handler != NULL, "handler must be provided");
/* Reset pending state so it can be resubmitted by handler */
if (atomic_test_and_clear_bit(&work->flags,
K_WORK_USER_STATE_PENDING)) {
handler(work);
}
/* Make sure we don't hog up the CPU if the FIFO never (or
* very rarely) gets empty.
*/
k_yield();
}
}
void k_work_user_queue_start(struct k_work_user_q *work_q, k_thread_stack_t *stack,
size_t stack_size, int prio, const char *name)
{
k_queue_init(&work_q->queue);
/* Created worker thread will inherit object permissions and memory
* domain configuration of the caller
*/
k_thread_create(&work_q->thread, stack, stack_size, z_work_user_q_main,
work_q, NULL, NULL, prio, K_USER | K_INHERIT_PERMS,
K_FOREVER);
k_object_access_grant(&work_q->queue, &work_q->thread);
if (name != NULL) {
k_thread_name_set(&work_q->thread, name);
}
k_thread_start(&work_q->thread);
}

View file

@ -15,11 +15,19 @@ static K_THREAD_STACK_DEFINE(workq_stack, STACK_SIZE);
struct k_sem sync_sema;
#ifdef CONFIG_USERSPACE
static struct k_work_q user_workq;
#if CONFIG_USERSPACE
static struct k_work_user_q user_workq;
static K_THREAD_STACK_DEFINE(user_workq_stack, STACK_SIZE);
static FP_BMEM struct k_work user_work_item;
static FP_BMEM struct k_work_user user_work_item;
void user_workq_func(struct k_work_user *unused)
{
ARG_UNUSED(unused);
k_sem_give(&sync_sema);
}
#endif
void workq_func(struct k_work *unused)
@ -59,7 +67,7 @@ void delayed_workq_thread(void *arg1, void *arg2, void *arg3)
k_sem_take(&sync_sema, K_FOREVER);
}
#ifdef CONFIG_USERSPACE
#if CONFIG_USERSPACE
void simple_user_workq_thread(void *arg1, void *arg2, void *arg3)
{
ARG_UNUSED(arg1);
@ -67,8 +75,8 @@ void simple_user_workq_thread(void *arg1, void *arg2, void *arg3)
ARG_UNUSED(arg3);
k_sem_reset(&sync_sema);
k_work_init(&user_work_item, workq_func);
k_work_submit_to_user_queue(&user_workq, &user_work_item);
k_work_user_init(&user_work_item, user_workq_func);
k_work_user_submit_to_queue(&user_workq, &user_work_item);
k_sem_take(&sync_sema, K_FOREVER);
}
@ -98,10 +106,10 @@ void run_workq(void)
k_thread_join(tid, K_FOREVER);
#ifdef CONFIG_USERSPACE
k_work_q_user_start(&user_workq, user_workq_stack,
K_THREAD_STACK_SIZEOF(user_workq_stack),
CONFIG_MAIN_THREAD_PRIORITY);
#if CONFIG_USERSPACE
k_work_user_queue_start(&user_workq, user_workq_stack,
K_THREAD_STACK_SIZEOF(user_workq_stack),
CONFIG_MAIN_THREAD_PRIORITY, NULL);
k_mem_domain_add_thread(&footprint_mem_domain, &user_workq.thread);
k_thread_access_grant(&user_workq.thread, &user_workq_stack);

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(user_work)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -0,0 +1,9 @@
CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_HEAP_MEM_POOL_SIZE=1024
CONFIG_THREAD_NAME=y
CONFIG_TEST_USERSPACE=y
CONFIG_POLL=y
CONFIG_MP_NUM_CPUS=1
CONFIG_MAX_THREAD_BYTES=3
CONFIG_KERNEL_WORK2=y

View file

@ -0,0 +1,202 @@
/*
* Copyright (c) 2016, 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief Workqueue Tests
* @defgroup kernel_workqueue_tests Workqueue
* @ingroup all_tests
* @{
* @}
*/
#include <ztest.h>
#include <irq_offload.h>
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define NUM_OF_WORK 2
#define SYNC_SEM_INIT_VAL (0U)
static K_THREAD_STACK_DEFINE(user_tstack, STACK_SIZE);
static struct k_work_user_q user_workq;
static ZTEST_BMEM struct k_work_user work[NUM_OF_WORK];
static struct k_sem sync_sema;
static struct k_sem dummy_sema;
static struct k_thread *main_thread;
/**
* @brief Common function using like a handler for workqueue tests
* API call in it means successful execution of that function
*
* @param unused of type k_work to make handler function accepted
* by k_work_init
*
* @return N/A
*/
static void common_work_handler(struct k_work_user *unused)
{
k_sem_give(&sync_sema);
}
static void test_k_work_user_init(void)
{
K_WORK_USER_DEFINE(local, common_work_handler);
zassert_equal(local.handler, common_work_handler, NULL);
zassert_equal(local.flags, 0, NULL);
}
/**
* @brief Test k_work_user_submit_to_queue API
*
* @details Function k_work_user_submit_to_queue() will return
* -EBUSY: if the work item was already in some workqueue and
* -ENOMEM: if no memory for thread resource pool allocation.
* Create two situation to meet the error return value.
*
* @see k_work_user_submit_to_queue()
* @ingroup kernel_workqueue_tests
*/
static void test_k_work_user_submit_to_queue_fail(void)
{
int ret = 0;
k_sem_reset(&sync_sema);
k_work_user_init(&work[0], common_work_handler);
k_work_user_init(&work[1], common_work_handler);
/* TESTPOINT: When a work item be added to a workqueue, its flag will
* be in pending state, before the work item be processed, it cannot
* be append to a workqueue another time.
*/
k_work_user_submit_to_queue(&user_workq, &work[0]);
zassert_true(k_work_user_is_pending(&work[0]), NULL);
k_work_user_submit_to_queue(&user_workq, &work[0]);
/* Test the work item's callback function can only be invoked once */
k_sem_take(&sync_sema, K_FOREVER);
zassert_true(k_queue_is_empty(&user_workq.queue), NULL);
zassert_false(k_work_user_is_pending(&work[0]), NULL);
/* use up the memory in resource pool */
for (int i = 0; i < 100; i++) {
ret = k_queue_alloc_append(&user_workq.queue, &work[1]);
if (ret == -ENOMEM) {
break;
}
}
k_work_user_submit_to_queue(&user_workq, &work[0]);
/* if memory is used up, the work cannot be append into the workqueue */
zassert_false(k_work_user_is_pending(&work[0]), NULL);
}
static void work_handler(struct k_work_user *w)
{
/* Just to show an API call on this will succeed */
k_sem_init(&dummy_sema, 0, 1);
k_sem_give(&sync_sema);
}
static void twork_submit_1(struct k_work_user_q *work_q, struct k_work_user *w,
k_work_user_handler_t handler)
{
/**TESTPOINT: init via k_work_init*/
k_work_user_init(w, handler);
/**TESTPOINT: check pending after work init*/
zassert_false(k_work_user_is_pending(w), NULL);
/**TESTPOINT: work submit to queue*/
zassert_false(k_work_user_submit_to_queue(work_q, w),
"failed to submit to queue");
}
static void twork_submit(const void *data)
{
struct k_work_user_q *work_q = (struct k_work_user_q *)data;
for (int i = 0; i < NUM_OF_WORK; i++) {
twork_submit_1(work_q, &work[i], work_handler);
}
}
/**
* @brief Test user mode work queue start before submit
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_q_user_start()
*/
static void test_work_user_queue_start_before_submit(void)
{
k_work_user_queue_start(&user_workq, user_tstack, STACK_SIZE,
CONFIG_MAIN_THREAD_PRIORITY, "user.wq");
}
/**
* @brief Setup object permissions before test_user_workq_granted_access()
*
* @ingroup kernel_workqueue_tests
*/
static void test_user_workq_granted_access_setup(void)
{
/* Subsequent test cases will have access to the dummy_sema,
* but not the user workqueue since it already started.
*/
k_object_access_grant(&dummy_sema, main_thread);
}
/**
* @brief Test user mode grant workqueue permissions
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_q_object_access_grant()
*/
static void test_user_workq_granted_access(void)
{
k_object_access_grant(&dummy_sema, &user_workq.thread);
}
/**
* @brief Test work submission to work queue (user mode)
*
* @ingroup kernel_workqueue_tests
*
* @see k_work_init(), k_work_is_pending(), k_work_submit_to_queue(),
* k_work_submit()
*/
static void test_user_work_submit_to_queue_thread(void)
{
k_sem_reset(&sync_sema);
twork_submit(&user_workq);
for (int i = 0; i < NUM_OF_WORK; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
}
void test_main(void)
{
main_thread = k_current_get();
k_thread_access_grant(main_thread, &sync_sema, &user_workq.thread,
&user_workq.queue,
&user_tstack);
k_sem_init(&sync_sema, SYNC_SEM_INIT_VAL, NUM_OF_WORK);
k_thread_system_pool_assign(k_current_get());
ztest_test_suite(workqueue_api,
/* Do not disturb the ordering of these test cases */
ztest_user_unit_test(test_work_user_queue_start_before_submit),
ztest_unit_test(test_user_workq_granted_access_setup),
ztest_user_unit_test(test_user_workq_granted_access),
/* End order-important tests */
ztest_unit_test(test_k_work_user_init),
ztest_1cpu_user_unit_test(test_user_work_submit_to_queue_thread),
ztest_user_unit_test(test_k_work_user_submit_to_queue_fail)
);
ztest_run_test_suite(workqueue_api);
}

View file

@ -0,0 +1,6 @@
tests:
kernel.work.user:
filter: CONFIG_KERNEL_WORK2
min_flash: 34
filter: CONFIG_ARCH_HAS_USERSPACE
tags: kernel userspace