kernel: remove old work queue implementation

Now that the old API has been reimplemented with the new API remove
the old implementation and its tests.

Signed-off-by: Peter Bigot <peter.bigot@nordicsemi.no>
This commit is contained in:
Peter Bigot 2020-11-24 16:47:47 -06:00 committed by Anas Nashif
commit b706a5e999
16 changed files with 6 additions and 2225 deletions

View file

@ -1973,7 +1973,6 @@ PREDEFINED = "CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT" \
"CONFIG_FPU" \
"CONFIG_FPU_SHARING" \
"CONFIG_HEAP_MEM_POOL_SIZE" \
"CONFIG_KERNEL_WORK2" \
"CONFIG_MMU" \
"CONFIG_NET_L2_ETHERNET_MGMT" \
"CONFIG_NET_MGMT_EVENT" \

View file

@ -2496,451 +2496,6 @@ extern struct k_work_q k_sys_work_q;
* INTERNAL_HIDDEN @endcond
*/
#ifdef CONFIG_KERNEL_WORK1
/**
* @addtogroup thread_apis
* @{
*/
/**
* @typedef k_work_handler_t
* @brief Work item handler function type.
*
* A work item's handler function is executed by a workqueue's thread
* when the work item is processed by the workqueue.
*
* @param work Address of the work item.
*
* @return N/A
*/
typedef void (*k_work_handler_t)(struct k_work *work);
/**
* @cond INTERNAL_HIDDEN
*/
struct k_work_q {
struct k_queue queue;
struct k_thread thread;
};
enum {
K_WORK_STATE_PENDING, /* Work item pending state */
};
struct k_work {
void *_reserved; /* Used by k_queue implementation. */
k_work_handler_t handler;
atomic_t flags[1];
};
struct k_delayed_work {
struct k_work work;
struct _timeout timeout;
struct k_work_q *work_q;
};
/**
* INTERNAL_HIDDEN @endcond
*/
#define Z_WORK_INITIALIZER(work_handler) \
{ \
._reserved = NULL, \
.handler = work_handler, \
.flags = { 0 } \
}
/**
* @brief Initialize a work item.
*
* This routine initializes a workqueue work item, prior to its first use.
*
* @param work Address of work item.
* @param handler Function to invoke each time work item is processed.
*
* @return N/A
*/
static inline void k_work_init(struct k_work *work, k_work_handler_t handler)
{
*work = (struct k_work)Z_WORK_INITIALIZER(handler);
}
/**
* @brief Submit a work item.
*
* This routine submits work item @p work to be processed by workqueue @p
* work_q. If the work item is already pending in @p work_q or any other
* workqueue as a result of an earlier submission, this routine has no
* effect on the work item. If the work item has already been processed, or
* is currently being processed, its work is considered complete and the
* work item can be resubmitted.
*
* @warning
* A submitted work item must not be modified until it has been processed
* by the workqueue.
*
* @note Can be called by ISRs.
*
* @param work_q Address of workqueue.
* @param work Address of work item.
*
* @return N/A
*/
static inline void k_work_submit_to_queue(struct k_work_q *work_q,
struct k_work *work)
{
if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_PENDING)) {
k_queue_append(&work_q->queue, work);
}
}
/**
* @brief Submit a work item to a user mode workqueue
*
* Submits a work item to a workqueue that runs in user mode. A temporary
* memory allocation is made from the caller's resource pool which is freed
* once the worker thread consumes the k_work item. The workqueue
* thread must have memory access to the k_work item being submitted. The caller
* must have permission granted on the work_q parameter's queue object.
*
* Otherwise this works the same as k_work_submit_to_queue().
*
* @note Can be called by ISRs.
*
* @param work_q Address of workqueue.
* @param work Address of work item.
*
* @retval -EBUSY if the work item was already in some workqueue
* @retval -ENOMEM if no memory for thread resource pool allocation
* @retval 0 Success
*/
static inline int k_work_submit_to_user_queue(struct k_work_q *work_q,
struct k_work *work)
{
int ret = -EBUSY;
if (!atomic_test_and_set_bit(work->flags, K_WORK_STATE_PENDING)) {
ret = k_queue_alloc_append(&work_q->queue, work);
/* Couldn't insert into the queue. Clear the pending bit
* so the work item can be submitted again
*/
if (ret != 0) {
atomic_clear_bit(work->flags, K_WORK_STATE_PENDING);
}
}
return ret;
}
/**
* @brief Check if a work item is pending.
*
* This routine indicates if work item @a work is pending in a workqueue's
* queue.
*
* @note Checking if the work is pending gives no guarantee that the
* work will still be pending when this information is used. It is up to
* the caller to make sure that this information is used in a safe manner.
*
* @note Can be called by ISRs.
*
* @param work Address of work item.
*
* @return true if work item is pending, or false if it is not pending.
*/
static inline bool k_work_pending(struct k_work *work)
{
return atomic_test_bit(work->flags, K_WORK_STATE_PENDING);
}
/**
* @brief Check if a delayed work item is pending.
*
* This routine indicates if the work item @a work is pending in a workqueue's
* queue or waiting for the delay timeout.
*
* @note Checking if the delayed work is pending gives no guarantee that the
* work will still be pending when this information is used. It is up to
* the caller to make sure that this information is used in a safe manner.
*
* @note Can be called by ISRs.
*
* @param work Address of delayed work item.
*
* @return true if work item is waiting for the delay to expire or pending on a
* work queue, or false if it is not pending.
*/
bool k_delayed_work_pending(struct k_delayed_work *work);
/**
* @brief Start a workqueue.
*
* This routine starts workqueue @a work_q. The workqueue spawns its work
* processing thread, which runs forever.
*
* @param work_q Address of workqueue.
* @param stack Pointer to work queue thread's stack space, as defined by
* K_THREAD_STACK_DEFINE()
* @param stack_size Size of the work queue thread's stack (in bytes), which
* should either be the same constant passed to
* K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
* @param prio Priority of the work queue's thread.
*
* @return N/A
*/
extern void k_work_q_start(struct k_work_q *work_q,
k_thread_stack_t *stack,
size_t stack_size, int prio);
/**
* @brief Start a workqueue in user mode
*
* This works identically to k_work_q_start() except it is callable from user
* mode, and the worker thread created will run in user mode.
* The caller must have permissions granted on both the work_q parameter's
* thread and queue objects, and the same restrictions on priority apply as
* k_thread_create().
*
* @param work_q Address of workqueue.
* @param stack Pointer to work queue thread's stack space, as defined by
* K_THREAD_STACK_DEFINE()
* @param stack_size Size of the work queue thread's stack (in bytes), which
* should either be the same constant passed to
* K_THREAD_STACK_DEFINE() or the value of K_THREAD_STACK_SIZEOF().
* @param prio Priority of the work queue's thread.
*
* @return N/A
*/
extern void k_work_q_user_start(struct k_work_q *work_q,
k_thread_stack_t *stack,
size_t stack_size, int prio);
#define Z_DELAYED_WORK_INITIALIZER(work_handler) \
{ \
.work = Z_WORK_INITIALIZER(work_handler), \
.timeout = { \
.node = {},\
.fn = NULL, \
.dticks = 0, \
}, \
.work_q = NULL, \
}
/**
* @brief Initialize a delayed work item.
*
* This routine initializes a workqueue delayed work item, prior to
* its first use.
*
* @param work Address of delayed work item.
* @param handler Function to invoke each time work item is processed.
*
* @return N/A
*/
static inline void k_delayed_work_init(struct k_delayed_work *work,
k_work_handler_t handler)
{
*work = (struct k_delayed_work)Z_DELAYED_WORK_INITIALIZER(handler);
}
/**
* @brief Submit a delayed work item.
*
* This routine schedules work item @a work to be processed by workqueue
* @a work_q after a delay of @a delay milliseconds. The routine initiates
* an asynchronous countdown for the work item and then returns to the caller.
* Only when the countdown completes is the work item actually submitted to
* the workqueue and becomes pending.
*
* Submitting a previously submitted delayed work item that is still counting
* down or is pending cancels the existing submission and restarts the
* countdown using the new delay. Note that this behavior is inherently
* subject to race conditions with the pre-existing timeouts and work queue,
* so care must be taken to synchronize such resubmissions externally.
*
* Attempts to submit a work item to a queue after it has been submitted to a
* different queue will fail with @c -EALREADY until k_delayed_work_cancel()
* is successfully invoked on the work item to clear its internal state.
*
* @warning
* A delayed work item must not be modified until it has been processed
* by the workqueue.
*
* @note Can be called by ISRs.
*
* @param work_q Address of workqueue.
* @param work Address of delayed work item.
* @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL
* * if a previously submitted work item had to be cancelled and the
* cancellation failed; or
* * Work item is being processed or has completed its work.
* @retval -EADDRINUSE Work item was submitted to a different workqueue.
*/
extern int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
k_timeout_t delay);
/**
* @brief Cancel a delayed work item.
*
* This routine cancels the submission of delayed work item @a work. Whether
* the work item can be successfully cancelled depends on its state.
*
* @note Can be called by ISRs.
*
* @note When @c -EALREADY is returned the caller cannot distinguish whether
* the work item handler is still being invoked by the work queue thread or
* has completed.
*
* @param work Address of delayed work item.
*
* @retval 0
* * Work item countdown cancelled before the item was submitted to its
* queue; or
* * Work item was removed from its queue before it was processed.
* @retval -EINVAL
* * Work item has never been submitted; or
* * Work item has been successfully cancelled; or
* * Timeout handler is in the process of submitting the work item to its
* queue; or
* * Work queue thread has removed the work item from the queue but has not
* called its handler.
* @retval -EALREADY
* * Work queue thread has removed the work item from the queue and cleared
* its pending flag; or
* * Work queue thread is invoking the item handler; or
* * Work item handler has completed.
*/
extern int k_delayed_work_cancel(struct k_delayed_work *work);
/**
* @brief Submit a work item to the system workqueue.
*
* This routine submits work item @a work to be processed by the system
* workqueue. If the work item is already pending in the system workqueue or
* any other workqueue as a result of an earlier submission, this routine
* has no effect on the work item. If the work item has already been
* processed, or is currently being processed, its work is considered
* complete and the work item can be resubmitted.
*
* @warning
* Work items submitted to the system workqueue should avoid using handlers
* that block or yield since this may prevent the system workqueue from
* processing other work items in a timely manner.
*
* @note Can be called by ISRs.
*
* @param work Address of work item.
*
* @return N/A
*/
static inline void k_work_submit(struct k_work *work)
{
k_work_submit_to_queue(&k_sys_work_q, work);
}
/**
* @brief Submit a delayed work item to the system workqueue.
*
* This routine schedules work item @a work to be processed by the system
* workqueue after a delay of @a delay milliseconds. The routine initiates
* an asynchronous countdown for the work item and then returns to the caller.
* Only when the countdown completes is the work item actually submitted to
* the workqueue and becomes pending.
*
* Submitting a previously submitted delayed work item that is still
* counting down cancels the existing submission and restarts the countdown
* using the new delay. If the work item is currently pending on the
* workqueue's queue because the countdown has completed it is too late to
* resubmit the item, and resubmission fails without impacting the work item.
* If the work item has already been processed, or is currently being processed,
* its work is considered complete and the work item can be resubmitted.
*
* Attempts to submit a work item to a queue after it has been submitted to a
* different queue will fail with @c -EALREADY until k_delayed_work_cancel()
* is invoked on the work item to clear its internal state.
*
* @warning
* Work items submitted to the system workqueue should avoid using handlers
* that block or yield since this may prevent the system workqueue from
* processing other work items in a timely manner.
*
* @note Can be called by ISRs.
*
* @param work Address of delayed work item.
* @param delay Delay before submitting the work item
*
* @retval 0 Work item countdown started.
* @retval -EINVAL Work item is being processed or has completed its work.
* @retval -EADDRINUSE Work item was submitted to a different workqueue.
*/
static inline int k_delayed_work_submit(struct k_delayed_work *work,
k_timeout_t delay)
{
return k_delayed_work_submit_to_queue(&k_sys_work_q, work, delay);
}
/**
* @brief Get time when a delayed work will be scheduled
*
* This routine computes the system uptime when a delayed work gets
* executed. If the delayed work is not waiting to be scheduled, it
* returns current system time.
*
* @param work Delayed work item.
*
* @return Uptime of execution (in ticks).
*/
static inline k_ticks_t k_delayed_work_expires_ticks(
const struct k_delayed_work *work)
{
return z_timeout_expires(&work->timeout);
}
/**
* @brief Get time remaining before a delayed work gets scheduled, in
* system ticks
*
* This routine computes the time remaining before a delayed work gets
* executed. If the delayed work is not waiting to be scheduled, it
* returns zero.
*
* @param work Delayed work item.
*
* @return Remaining time (in ticks).
*/
static inline k_ticks_t k_delayed_work_remaining_ticks(
const struct k_delayed_work *work)
{
return z_timeout_remaining(&work->timeout);
}
/**
* @brief Get time remaining before a delayed work gets scheduled.
*
* This routine computes the (approximate) time remaining before a
* delayed work gets executed. If the delayed work is not waiting to be
* scheduled, it returns zero.
*
* @param work Delayed work item.
*
* @return Remaining time (in milliseconds).
*/
static inline int32_t k_delayed_work_remaining_get(const struct k_delayed_work *work)
{
return k_ticks_to_ms_floor32(z_timeout_remaining(&work->timeout));
}
/** @} */
#endif /* CONFIG_KERNEL_WORK1 */
/**
* @defgroup mutex_apis Mutex APIs
* @ingroup kernel_apis
@ -3275,8 +2830,6 @@ static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem)
/** @} */
#ifndef CONFIG_KERNEL_WORK1
/**
* @cond INTERNAL_HIDDEN
*/
@ -4369,8 +3922,6 @@ extern void k_work_user_queue_start(struct k_work_user_q *work_q,
/** @} */
#endif /* !CONFIG_KERNEL_WORK1 */
/**
* @cond INTERNAL_HIDDEN
*/

View file

@ -23,6 +23,7 @@ list(APPEND kernel_files
thread.c
version.c
condvar.c
work.c
smp.c
banner.c
)
@ -44,9 +45,6 @@ set_target_properties(
__ZEPHYR_SUPERVISOR__
)
target_sources_ifdef(CONFIG_KERNEL_WORK1 kernel PRIVATE work_q.c)
target_sources_ifdef(CONFIG_KERNEL_WORK2 kernel PRIVATE work.c)
target_sources_ifdef(CONFIG_STACK_CANARIES kernel PRIVATE compiler_stack_protect.c)
target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer.c)
target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)

View file

@ -884,22 +884,4 @@ config THREAD_LOCAL_STORAGE
help
This option enables thread local storage (TLS) support in kernel.
choice KERNEL_WORK
prompt "Which work queue implementation to use"
default KERNEL_WORK2
config KERNEL_WORK1
bool "Select the original racy work API"
help
This selects the original k_work_* implementation, and excludes the
new simplementation.
config KERNEL_WORK2
bool "Select alternative work API"
help
This disables the original k_work_* implementation and replaces it
with a new one.
endchoice # KERNEL_WORK
endmenu

View file

@ -27,19 +27,10 @@ static int k_sys_work_q_init(const struct device *dev)
.no_yield = IS_ENABLED(CONFIG_SYSTEM_WORKQUEUE_NO_YIELD),
};
#ifdef CONFIG_KERNEL_WORK1
k_work_q_start(&k_sys_work_q,
sys_work_q_stack,
K_KERNEL_STACK_SIZEOF(sys_work_q_stack),
CONFIG_SYSTEM_WORKQUEUE_PRIORITY);
k_thread_name_set(&k_sys_work_q.thread, "sysworkq");
#else /* CONFIG_KERNEL_WORK1 */
k_work_queue_start(&k_sys_work_q,
sys_work_q_stack,
K_KERNEL_STACK_SIZEOF(sys_work_q_stack),
CONFIG_SYSTEM_WORKQUEUE_PRIORITY, &cfg);
#endif /* CONFIG_KERNEL_WORK1 */
return 0;
}

View file

@ -1,139 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
* Copyright (c) 2016 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* Workqueue support functions
*/
#include <kernel_structs.h>
#include <wait_q.h>
#include <spinlock.h>
#include <errno.h>
#include <stdbool.h>
#include <sys/check.h>
#define WORKQUEUE_THREAD_NAME "workqueue"
#ifdef CONFIG_SYS_CLOCK_EXISTS
static struct k_spinlock lock;
#endif
extern void z_work_q_main(void *work_q_ptr, void *p2, void *p3);
void k_work_q_start(struct k_work_q *work_q, k_thread_stack_t *stack,
size_t stack_size, int prio)
{
k_queue_init(&work_q->queue);
(void)k_thread_create(&work_q->thread, stack, stack_size, z_work_q_main,
work_q, NULL, NULL, prio, 0, K_NO_WAIT);
k_thread_name_set(&work_q->thread, WORKQUEUE_THREAD_NAME);
}
#ifdef CONFIG_SYS_CLOCK_EXISTS
static void work_timeout(struct _timeout *t)
{
struct k_delayed_work *w = CONTAINER_OF(t, struct k_delayed_work,
timeout);
/* submit work to workqueue */
k_work_submit_to_queue(w->work_q, &w->work);
}
static int work_cancel(struct k_delayed_work *work)
{
if (k_work_pending(&work->work)) {
/* Remove from the queue if already submitted */
if (!k_queue_remove(&work->work_q->queue, &work->work)) {
return -EINVAL;
}
} else {
int err = z_abort_timeout(&work->timeout);
if (err) {
return -EALREADY;
}
}
/* Detach from workqueue */
work->work_q = NULL;
atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
return 0;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
k_timeout_t delay)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int err = 0;
/* Work cannot be active in multiple queues */
if (work->work_q != NULL && work->work_q != work_q) {
err = -EADDRINUSE;
goto done;
}
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = work_cancel(work);
/* -EALREADY may indicate the work has already completed so
* this is likely a recurring work. It may also indicate that
* the work handler is still executing. But it's neither
* delayed nor pending, so it can be rescheduled.
*/
if (err == -EALREADY) {
err = 0;
} else if (err < 0) {
goto done;
}
}
/* Attach workqueue so the timeout callback can submit it */
work->work_q = work_q;
/* Submit work directly if no delay. Note that this is a
* blocking operation, so release the lock first.
*/
if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) {
k_spin_unlock(&lock, key);
k_work_submit_to_queue(work_q, &work->work);
return 0;
}
/* Add timeout */
z_add_timeout(&work->timeout, work_timeout, delay);
done:
k_spin_unlock(&lock, key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
k_spinlock_key_t key = k_spin_lock(&lock);
int ret = -EINVAL;
if (work->work_q != NULL) {
ret = work_cancel(work);
}
k_spin_unlock(&lock, key);
return ret;
}
bool k_delayed_work_pending(struct k_delayed_work *work)
{
return !z_is_inactive_timeout(&work->timeout) ||
k_work_pending(&work->work);
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */

View file

@ -23,11 +23,6 @@ zephyr_sources(
heap-validate.c
)
zephyr_sources_ifdef(CONFIG_KERNEL_WORK1 work_q.c)
if(${CONFIG_USERSPACE})
zephyr_sources_ifdef(CONFIG_KERNEL_WORK2 user_work.c)
endif()
zephyr_sources_ifdef(CONFIG_CBPRINTF_COMPLETE cbprintf_complete.c)
zephyr_sources_ifdef(CONFIG_CBPRINTF_NANO cbprintf_nano.c)
@ -37,7 +32,7 @@ zephyr_sources_ifdef(CONFIG_RING_BUFFER ring_buffer.c)
zephyr_sources_ifdef(CONFIG_ASSERT assert.c)
zephyr_sources_ifdef(CONFIG_USERSPACE mutex.c)
zephyr_sources_ifdef(CONFIG_USERSPACE mutex.c user_work.c)
zephyr_sources_ifdef(CONFIG_SCHED_DEADLINE p4wq.c)

View file

@ -1942,13 +1942,8 @@ int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
if (tcp_window_full(conn)) {
/* Trigger resend if the timer is not active */
#ifdef CONFIG_KERNEL_WORK1
if (!k_delayed_work_remaining_get(&conn->send_data_timer)) {
NET_DBG("Window full, trigger resend");
tcp_resend_data(&conn->send_data_timer.work);
}
#else
/* HACK: use new API with legacy wrapper.
/* TODO: use k_work_delayable for send_data_timer so we don't
* have to directly access the internals of the legacy object.
*
* NOTE: It is not permitted to access any fields of k_work or
* k_work_delayable directly. This replacement does so, but
@ -1969,9 +1964,8 @@ int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
* conn is embedded, and calling that function directly here
* and in the work handler.
*/
(void)k_work_schedule_to_queue(&tcp_work_q,
&conn->send_data_timer.work, K_NO_WAIT);
#endif
(void)k_work_schedule_for_queue(&tcp_work_q,
&conn->send_data_timer.work, K_NO_WAIT);
ret = -EAGAIN;
goto out;

View file

@ -6,4 +6,3 @@ CONFIG_TEST_USERSPACE=y
CONFIG_POLL=y
CONFIG_MP_NUM_CPUS=1
CONFIG_MAX_THREAD_BYTES=3
CONFIG_KERNEL_WORK2=y

View file

@ -1,6 +1,5 @@
tests:
kernel.work.user:
filter: CONFIG_KERNEL_WORK2
min_flash: 34
filter: CONFIG_ARCH_HAS_USERSPACE
tags: kernel userspace

View file

@ -8,4 +8,3 @@ CONFIG_NUM_COOP_PRIORITIES=4
CONFIG_NUM_PREEMPT_PRIORITIES=4
CONFIG_SYSTEM_WORKQUEUE_PRIORITY=-3
CONFIG_ZTEST_THREAD_PRIORITY=-2
CONFIG_KERNEL_WORK2=y

View file

@ -1,5 +1,4 @@
tests:
kernel.work.api:
filter: CONFIG_KERNEL_WORK2
min_flash: 34
tags: kernel

View file

@ -1,8 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(work_queue_api)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -1,9 +0,0 @@
CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_HEAP_MEM_POOL_SIZE=1024
CONFIG_THREAD_NAME=y
CONFIG_TEST_USERSPACE=y
CONFIG_POLL=y
CONFIG_MP_NUM_CPUS=1
CONFIG_MAX_THREAD_BYTES=3
CONFIG_KERNEL_WORK1=y

File diff suppressed because it is too large Load diff

View file

@ -1,5 +0,0 @@
tests:
kernel.workqueue.api:
filter: CONFIG_KERNEL_WORK1
min_flash: 34
tags: kernel userspace