From dc34e7c6f64b55640ecfb5d3549b6c9ed7665b1c Mon Sep 17 00:00:00 2001 From: Peter Bigot Date: Wed, 28 Oct 2020 11:24:05 -0500 Subject: [PATCH] kernel: add new work queue implementation This commit provides a complete reimplementation of the work queue infrastructure intended to eliminate the race conditions and feature gaps in the existing implementation. Both bare and delayable work structures are supported. Items can be submitted; delayable items can be scheduled for submission at a future time. Items can be delayed, queued, and running all at the same time. A running item can also be canceling. The new implementation: * replaces "pending" with "busy" which identifies the active states; * supports canceling delayed and submitted items; * prevents resubmission of a item being canceled until cancellation completes; * supports waiting for cancellation to complete; * supports flushing a work item (waiting for the last submission to complete without preventing resubmission); * supports waiting for a queue to drain (only allows resubmission from the work thread); * supports stopping a work queue in conjunction with draining it; * prevents handler-reentrancy during resubmission. Signed-off-by: Peter Bigot --- include/kernel.h | 1001 ++++++++++++++++++++++++++++++++++++-- kernel/CMakeLists.txt | 1 + kernel/Kconfig | 9 + kernel/system_work_q.c | 14 +- kernel/work.c | 1042 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 2035 insertions(+), 32 deletions(-) create mode 100644 kernel/work.c diff --git a/include/kernel.h b/include/kernel.h index 83a262ba072..9870e8a8ee2 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -2482,9 +2482,21 @@ __syscall int k_stack_pop(struct k_stack *stack, stack_data_t *data, /** @} */ -#ifdef CONFIG_KERNEL_WORK1 +/** + * @cond INTERNAL_HIDDEN + */ struct k_work; +struct k_work_q; +struct k_work_queue_config; +struct k_delayed_work; +extern struct k_work_q k_sys_work_q; + +/** + * INTERNAL_HIDDEN @endcond + */ + +#ifdef CONFIG_KERNEL_WORK1 /** * @addtogroup thread_apis @@ -2529,8 +2541,6 @@ struct k_delayed_work { struct k_work_q *work_q; }; -extern struct k_work_q k_sys_work_q; - /** * INTERNAL_HIDDEN @endcond */ @@ -2542,20 +2552,6 @@ extern struct k_work_q k_sys_work_q; .flags = { 0 } \ } -/** - * @brief Initialize a statically-defined work item. - * - * This macro can be used to initialize a statically-defined workqueue work - * item, prior to its first use. For example, - * - * @code static K_WORK_DEFINE(, ); @endcode - * - * @param work Symbol name for work item object - * @param work_handler Function to invoke each time work item is processed. - */ -#define K_WORK_DEFINE(work, work_handler) \ - struct k_work work = Z_WORK_INITIALIZER(work_handler) - /** * @brief Initialize a work item. * @@ -2733,20 +2729,6 @@ extern void k_work_q_user_start(struct k_work_q *work_q, .work_q = NULL, \ } -/** - * @brief Initialize a statically-defined delayed work item. - * - * This macro can be used to initialize a statically-defined workqueue - * delayed work item, prior to its first use. For example, - * - * @code static K_DELAYED_WORK_DEFINE(, ); @endcode - * - * @param work Symbol name for delayed work item object - * @param work_handler Function to invoke each time work item is processed. - */ -#define K_DELAYED_WORK_DEFINE(work, work_handler) \ - struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler) - /** * @brief Initialize a delayed work item. * @@ -3293,6 +3275,935 @@ static inline unsigned int z_impl_k_sem_count_get(struct k_sem *sem) /** @} */ +#ifndef CONFIG_KERNEL_WORK1 + +/** + * @cond INTERNAL_HIDDEN + */ + +struct k_work_delayable; +struct k_work_sync; + +/** + * INTERNAL_HIDDEN @endcond + */ + +/** + * @addtogroup thread_apis + * @{ + */ + +/** @brief The signature for a work item handler function. + * + * The function will be invoked by the thread animating a work queue. + * + * @param work the work item that provided the handler. + */ +typedef void (*k_work_handler_t)(struct k_work *work); + +/** @brief Initialize a (non-delayable) work structure. + * + * This must be invoked before submitting a work structure for the first time. + * It need not be invoked again on the same work structure. It can be + * re-invoked to change the associated handler, but this must be done when the + * work item is idle. + * + * @note Safe to invoke from ISRs. + * + * @param work the work structure to be initialized. + * + * @param handler the handler to be invoked by the work item. + */ +void k_work_init(struct k_work *work, + k_work_handler_t handler); + +/** @brief Busy state flags from the work item. + * + * A zero return value indicates the work item appears to be idle. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * is checked. Use locks where appropriate. + * + * @param work pointer to the work item. + * + * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, + * K_WORK_RUNNING, and K_WORK_CANCELING. + */ +int k_work_busy_get(const struct k_work *work); + +/** @brief Test whether a work item is currently pending. + * + * Wrapper to determine whether a work item is in a non-idle dstate. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * is checked. Use locks where appropriate. + * + * @param work pointer to the work item. + * + * @return true if and only if k_work_busy_get() returns a non-zero value. + */ +static inline bool k_work_is_pending(const struct k_work *work); + +/** @brief Submit a work item to a queue. + * + * @note Safe to invoke from ISRs. + * + * @param queue pointer to the work queue on which the item should run. If + * NULL the queue from the most recent submission will be used. + * + * @param work pointer to the work item. + * + * @retval 0 if work was already submitted to a queue + * @retval 1 if work was not submitted and has been queued to @p queue + * @retval 2 if work was running and has been queued to the queue that was + * running it + * @retval -EBUSY + * * if work submission was rejected because the work item is cancelling; or + * * @p queue is draining; or + * * @p queue is plugged. + * @retval -EINVAL if @p queue is null and the work item has never been run. + */ +int k_work_submit_to_queue(struct k_work_q *queue, + struct k_work *work); + +/** @brief Submit a work item to the system queue. + * + * @note Safe to invoke from ISRs. + * + * @param work pointer to the work item. + * + * @return as with k_work_submit_to_queue(). + */ +static inline int k_work_submit(struct k_work *work) +{ + return k_work_submit_to_queue(&k_sys_work_q, work); +} + +/** @brief Wait for last-submitted instance to complete. + * + * Resubmissions may occur while waiting, including chained submissions (from + * within the handler). + * + * @note Be careful of caller and work queue thread relative priority. If + * this function sleeps it will not return until the work queue thread + * completes the tasks that allow this thread to resume. + * + * @note Behavior is undefined if this function is invoked on @p work from a + * work queue running @p work. + * + * @param work pointer to the work item. + * + * @param sync pointer to an opaque item containing state related to the + * pending cancellation. The object must persist until the call returns, and + * be accessible from both the caller thread and the work queue thread. The + * object must not be used for any other flush or cancel operation until this + * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object + * must be allocated in coherent memory. + * + * @retval true if call had to wait for completion + * @retval false if work was already idle + */ +bool k_work_flush(struct k_work *work, + struct k_work_sync *sync); + +/** @brief Cancel a work item. + * + * This attempts to prevent a pending (non-delayable) work item from being + * processed by removing it from the work queue. If the item is being + * processed, the work item will continue to be processed, but resubmissions + * are rejected until cancellation completes. + * + * If this returns zero cancellation is complete, otherwise something + * (probably a work queue thread) is still referencing the item. + * + * See also k_work_cancel_sync(). + * + * @note Safe to invoke from ISRs. + * + * @param work pointer to the work item. + * + * @return the k_work_busy_get() status indicating the state of the item after all + * cancellation steps performed by this call are completed. + */ +int k_work_cancel(struct k_work *work); + +/** @brief Cancel a work item and wait for it to complete. + * + * Same as k_work_cancel() but does not return until cancellation is complete. + * This can be invoked by a thread after k_work_cancel() to synchronize with a + * previous cancellation. + * + * On return the work structure will be idle unless something submits it after + * the cancellation was complete. + * + * @note Be careful of caller and work queue thread relative priority. If + * this function sleeps it will not return until the work queue thread + * completes the tasks that allow this thread to resume. + * + * @note Behavior is undefined if this function is invoked on @p work from a + * work queue running @p work. + * + * @param work pointer to the work item. + * + * @param sync pointer to an opaque item containing state related to the + * pending cancellation. The object must persist until the call returns, and + * be accessible from both the caller thread and the work queue thread. The + * object must not be used for any other flush or cancel operation until this + * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object + * must be allocated in coherent memory. + * + * @retval true if work was not idle (call had to wait for cancellation to + * complete); + * @retval false otherwise + */ +bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync); + +/** @brief Initialize a work queue. + * + * This configures the work queue thread and starts it running. The function + * should not be re-invoked on a queue. + * + * @param queue pointer to the queue structure. + * + * @param stack pointer to the work thread stack area. + * + * @param stack_size size of the the work thread stack area, in bytes. + * + * @param prio initial thread priority + * + * @param cfg optional additional configuration parameters. Pass @c + * NULL if not required, to use the defaults documented in + * k_work_queue_config. + */ +void k_work_queue_start(struct k_work_q *queue, + k_thread_stack_t *stack, size_t stack_size, + int prio, const struct k_work_queue_config *cfg); + +/** @brief Access the thread that animates a work queue. + * + * This is necessary to grant a work queue thread access to things the work + * items it will process are expected to use. + * + * @param queue pointer to the queue structure. + * + * @return the thread associated with the work queue. + */ +static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue); + +/** @brief Wait until the work queue has drained, optionally plugging it. + * + * This blocks submission to the work queue except when coming from queue + * thread, and blocks the caller until no more work items are available in the + * queue. + * + * If @p plug is true then submission will continue to be blocked after the + * drain operation completes until k_work_queue_unplug() is invoked. + * + * Note that work items that are delayed are not yet associated with their + * work queue. They must be cancelled externally if a goal is to ensure the + * work queue remains empty. The @p plug feature can be used to prevent + * delayed items from being submitted after the drain completes. + * + * @param queue pointer to the queue structure. + * + * @param plug if true the work queue will continue to block new submissions + * after all items have drained. + * + * @retval 1 if call had to wait for the drain to complete + * @retval 0 if call did not have to wait + * @retval negative if wait was interrupted or failed + */ +int k_work_queue_drain(struct k_work_q *queue, bool plug); + +/** @brief Release a work queue to accept new submissions. + * + * This releases the block on new submissions placed when k_work_queue_drain() + * is invoked with the @p plug option enabled. If this is invoked before the + * drain completes new items may be submitted as soon as the drain completes. + * + * @note Safe to invoke from ISRs. + * + * @param queue pointer to the queue structure. + * + * @retval 0 if successfully unplugged + * @retval -EALREADY if the work queue was not plugged. + */ +int k_work_queue_unplug(struct k_work_q *queue); + +/** @brief Initialize a delayable work structure. + * + * This must be invoked before scheduling a delayable work structure for the + * first time. It need not be invoked again on the same work structure. It + * can be re-invoked to change the associated handler, but this must be done + * when the work item is idle. + * + * @note Safe to invoke from ISRs. + * + * @param dwork the delayable work structure to be initialized. + * + * @param handler the handler to be invoked by the work item. + */ +void k_work_init_delayable(struct k_work_delayable *dwork, + k_work_handler_t handler); + +/** + * @brief Get the parent delayable work structure from a work pointer. + * + * This function is necessary when a @c k_work_handler_t function is passed to + * k_work_schedule_for_queue() and the handler needs to access data from the + * container of the containing `k_work_delayable`. + * + * @param work Address passed to the work handler + * + * @return Address of the containing @c k_work_delayable structure. + */ +static inline struct k_work_delayable * +k_work_delayable_from_work(struct k_work *work); + +/** @brief Busy state flags from the delayable work item. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * can be inspected. Use locks where appropriate. + * + * @param dwork pointer to the delayable work item. + * + * @return a mask of flags K_WORK_DELAYED, K_WORK_QUEUED, K_WORK_RUNNING, and + * K_WORK_CANCELING. A zero return value indicates the work item appears to + * be idle. + */ +int k_work_delayable_busy_get(const struct k_work_delayable *dwork); + +/** @brief Test whether a delayed work item is currently pending. + * + * Wrapper to determine whether a delayed work item is in a non-idle state. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * can be inspected. Use locks where appropriate. + * + * @param dwork pointer to the delayable work item. + * + * @return true if and only if k_work_delayable_busy_get() returns a non-zero + * value. + */ +static inline bool k_work_delayable_is_pending( + const struct k_work_delayable *dwork); + +/** @brief Get the absolute tick count at which a scheduled delayable work + * will be submitted. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * can be inspected. Use locks where appropriate. + * + * @param dwork pointer to the delayable work item. + * + * @return the tick count when the timer that will schedule the work item will + * expire, or the current tick count if the work is not scheduled. + */ +static inline k_ticks_t k_work_delayable_expires_get( + const struct k_work_delayable *dwork); + +/** @brief Get the number of ticks until a scheduled delayable work will be + * submitted. + * + * @note Safe to invoke from ISRs. + * + * @note This is a live snapshot of state, which may change before the result + * can be inspected. Use locks where appropriate. + * + * @param dwork pointer to the delayable work item. + * + * @return the number of ticks until the timer that will schedule the work + * item will expire, or zero if the item is not scheduled. + */ +static inline k_ticks_t k_work_delayable_remaining_get( + const struct k_work_delayable *dwork); + +/** @brief Submit an idle work item to a queue after a delay. + * + * Unlike k_work_reschedule_for_queue() this is a no-op if the work item is + * already scheduled or submitted, even if @p delay is @c K_NO_WAIT. + * + * @note Safe to invoke from ISRs. + * + * @param queue the queue on which the work item should be submitted after the + * delay. + * + * @param dwork pointer to the delayable work item. + * + * @param delay the time to wait before submitting the work item. If @c + * K_NO_WAIT and the work is not pending this is equivalent to + * k_work_submit_to_queue(). + * + * @retval 0 if work was already scheduled or submitted. + * @retval 1 if work has been scheduled. + */ +int k_work_schedule_for_queue(struct k_work_q *queue, + struct k_work_delayable *dwork, + k_timeout_t delay); + +/** @brief Submit an idle work item to the system work queue after a + * delay. + * + * This is a thin wrapper around k_work_schedule_for_queue(), with all the API + * characteristcs of that function. + * + * @param dwork pointer to the delayable work item. + * + * @param delay the time to wait before submitting the work item. If @c + * K_NO_WAIT this is equivalent to k_work_submit_to_queue(). + * + * @return as with k_work_schedule_for_queue(). + */ +static inline int k_work_schedule(struct k_work_delayable *dwork, + k_timeout_t delay) +{ + return k_work_schedule_for_queue(&k_sys_work_q, dwork, delay); +} + +/** @brief Reschedule a work item to a queue after a delay. + * + * Unlike k_work_schedule_for_queue() this function can change the deadline of + * a scheduled work item, and will schedule a work item that isn't idle + * (e.g. is submitted or running). This function does not affect ("unsubmit") + * a work item that has been submitted to a queue. + * + * @note Safe to invoke from ISRs. + * + * @param queue the queue on which the work item should be submitted after the + * delay. + * + * @param dwork pointer to the delayable work item. + * + * @param delay the time to wait before submitting the work item. If @c + * K_NO_WAIT this is equivalent to k_work_submit_to_queue() after canceling + * any previous scheduled submission. + * + * @note If delay is @c K_NO_WAIT ("no delay") the return values are as with + * k_work_submit_to_queue(). + * + * @retval 0 if delay is @c K_NO_WAIT and work was already on a queue + * @retval 1 if + * * delay is @c K_NO_WAIT and work was not submitted but has now been queued + * to @p queue; or + * * delay not @c K_NO_WAIT and work has been scheduled + * @retval 2 if delay is @c K_NO_WAIT and work was running and has been queued + * to the queue that was running it + */ +int k_work_reschedule_for_queue(struct k_work_q *queue, + struct k_work_delayable *dwork, + k_timeout_t delay); + +/** @brief Reschedule a work item to the system work queue after a + * delay. + * + * This is a thin wrapper around k_work_reschedule_for_queue(), with all the + * API characteristcs of that function. + * + * @param dwork pointer to the delayable work item. + * + * @param delay the time to wait before submitting the work item. + * + * @return as with k_work_reschedule_for_queue(). + */ +static inline int k_work_reschedule(struct k_work_delayable *dwork, + k_timeout_t delay) +{ + return k_work_reschedule_for_queue(&k_sys_work_q, dwork, delay); +} + +/** @brief Flush delayable work. + * + * If the work is scheduled, it is immediately submitted. Then the caller + * blocks until the work completes, as with k_work_flush(). + * + * @note Be careful of caller and work queue thread relative priority. If + * this function sleeps it will not return until the work queue thread + * completes the tasks that allow this thread to resume. + * + * @note Behavior is undefined if this function is invoked on @p dwork from a + * work queue running @p dwork. + * + * @param dwork pointer to the delayable work item. + * + * @param sync pointer to an opaque item containing state related to the + * pending cancellation. The object must persist until the call returns, and + * be accessible from both the caller thread and the work queue thread. The + * object must not be used for any other flush or cancel operation until this + * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object + * must be allocated in coherent memory. + * + * @retval true if call had to wait for completion + * @retval false if work was already idle + */ +bool k_work_flush_delayable(struct k_work_delayable *dwork, + struct k_work_sync *sync); + +/** @brief Cancel delayable work. + * + * Similar to k_work_cancel() but for delayable work. If the work is + * scheduled or submitted it is canceled. This function does not wait for the + * cancellation to complete. + * + * @note Safe to invoke from ISRs. + * + * @note The work may still be running when this returns. Use + * k_work_flush_delayable() or k_work_cancel_delayable_sync() to ensure it is + * not running. + * + * @note Canceling delayable work does not prevent rescheduling it. It does + * prevent submitting it until the cancellation completes. + * + * @param dwork pointer to the delayable work item. + * + * @return the k_work_delayable_busy_get() status indicating the state of the + * item after all cancellation steps performed by this call are completed. + */ +int k_work_cancel_delayable(struct k_work_delayable *dwork); + +/** @brief Cancel delayable work and wait. + * + * Like k_work_cancel_delayable() but waits until the work becomes idle. + * + * @note Canceling delayable work does not prevent rescheduling it. It does + * prevent submitting it until the cancellation completes. + * + * @note Be careful of caller and work queue thread relative priority. If + * this function sleeps it will not return until the work queue thread + * completes the tasks that allow this thread to resume. + * + * @note Behavior is undefined if this function is invoked on @p dwork from a + * work queue running @p dwork. + * + * @param dwork pointer to the delayable work item. + * + * @param sync pointer to an opaque item containing state related to the + * pending cancellation. The object must persist until the call returns, and + * be accessible from both the caller thread and the work queue thread. The + * object must not be used for any other flush or cancel operation until this + * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object + * must be allocated in coherent memory. + * + * @retval true if work was not idle (call had to wait for cancellation to + * complete); + * @retval false otherwise + */ +bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, + struct k_work_sync *sync); + +enum { +/** + * @cond INTERNAL_HIDDEN + */ + + /* The atomic API is used for all work and queue flags fields to + * enforce sequential consistency in SMP environments. + */ + + /* Bits that represent the work item states. At least nine of the + * combinations are distinct valid stable states. + */ + K_WORK_RUNNING_BIT = 0, + K_WORK_CANCELING_BIT = 1, + K_WORK_QUEUED_BIT = 2, + K_WORK_DELAYED_BIT = 3, + + K_WORK_MASK = BIT(K_WORK_DELAYED_BIT) | BIT(K_WORK_QUEUED_BIT) + | BIT(K_WORK_RUNNING_BIT) | BIT(K_WORK_CANCELING_BIT), + + /* Static work flags */ + K_WORK_DELAYABLE_BIT = 8, + K_WORK_DELAYABLE = BIT(K_WORK_DELAYABLE_BIT), + + /* Dynamic work queue flags */ + K_WORK_QUEUE_STARTED_BIT = 0, + K_WORK_QUEUE_STARTED = BIT(K_WORK_QUEUE_STARTED_BIT), + K_WORK_QUEUE_BUSY_BIT = 1, + K_WORK_QUEUE_BUSY = BIT(K_WORK_QUEUE_BUSY_BIT), + K_WORK_QUEUE_DRAIN_BIT = 2, + K_WORK_QUEUE_DRAIN = BIT(K_WORK_QUEUE_DRAIN_BIT), + K_WORK_QUEUE_PLUGGED_BIT = 3, + K_WORK_QUEUE_PLUGGED = BIT(K_WORK_QUEUE_PLUGGED_BIT), + + /* Static work queue flags */ + K_WORK_QUEUE_NO_YIELD_BIT = 8, + K_WORK_QUEUE_NO_YIELD = BIT(K_WORK_QUEUE_NO_YIELD_BIT), + +/** + * INTERNAL_HIDDEN @endcond + */ + /* Transient work flags */ + + /** @brief Flag indicating a work item that is running under a work + * queue thread. + * + * Accessed via k_work_busy_get(). May co-occur with other flags. + */ + K_WORK_RUNNING = BIT(K_WORK_RUNNING_BIT), + + /** @brief Flag indicating a work item that is being canceled. + * + * Accessed via k_work_busy_get(). May co-occur with other flags. + */ + K_WORK_CANCELING = BIT(K_WORK_CANCELING_BIT), + + /** @brief Flag indicating a work item that has been submitted to a + * queue but has not started running. + * + * Accessed via k_work_busy_get(). May co-occur with other flags. + */ + K_WORK_QUEUED = BIT(K_WORK_QUEUED_BIT), + + /** @brief Flag indicating a delayed work item that is scheduled for + * submission to a queue. + * + * Accessed via k_work_busy_get(). May co-occur with other flags. + */ + K_WORK_DELAYED = BIT(K_WORK_DELAYED_BIT), +}; + +/** @brief A structure used to submit work. */ +struct k_work { + /* All fields are protected by the work module spinlock. No fields + * are to be accessed except through kernel API. + */ + + /* Node to link into k_work_q pending list. */ + sys_snode_t node; + + /* The function to be invoked by the work queue thread. */ + k_work_handler_t handler; + + /* The queue on which the work item was last submitted. */ + struct k_work_q *queue; + + /* State of the work item. + * + * The item can be DELAYED, QUEUED, and RUNNING simultaneously. + * + * It can be RUNNING and CANCELING simultaneously. + */ + uint32_t flags; +}; + +#define Z_WORK_INITIALIZER(work_handler) { \ + .handler = work_handler, \ +} + +/** @brief A structure used to submit work after a delay. */ +struct k_work_delayable { + /* The work item. */ + struct k_work work; + + /* Timeout used to submit work after a delay. */ + struct _timeout timeout; + + /* The queue to which the work should be submitted. */ + struct k_work_q *queue; +}; + +#define Z_WORK_DELAYABLE_INITIALIZER(work_handler) { \ + .work = { \ + .handler = work_handler, \ + .flags = K_WORK_DELAYABLE, \ + }, \ +} + +/** + * @brief Initialize a statically-defined delayable work item. + * + * This macro can be used to initialize a statically-defined delayable + * work item, prior to its first use. For example, + * + * @code static K_WORK_DELAYABLE_DEFINE(, ); @endcode + * + * Note that if the runtime dependencies support initialization with + * k_work_init_delayable() using that will eliminate the initialized + * object in ROM that is produced by this macro and copied in at + * system startup. + * + * @param work Symbol name for delayable work item object + * @param work_handler Function to invoke each time work item is processed. + */ +#define K_WORK_DELAYABLE_DEFINE(work, work_handler) \ + struct k_work_delayable work \ + = Z_WORK_DELAYABLE_INITIALIZER(work_handler) + +/** + * @cond INTERNAL_HIDDEN + */ + +/* Record used to wait for work to flush. + * + * The work item is inserted into the queue that will process (or is + * processing) the item, and will be processed as soon as the item + * completes. When the flusher is processed the semaphore will be + * signaled, releasing the thread waiting for the flush. + */ +struct z_work_flusher { + struct k_work work; + struct k_sem sem; +}; + +/* Record used to wait for work to complete a cancellation. + * + * The work item is inserted into a global queue of pending cancels. + * When a cancelling work item goes idle any matching waiters are + * removed from pending_cancels and are woken. + */ +struct z_work_canceller { + sys_snode_t node; + struct k_work *work; + struct k_sem sem; +}; + +/** + * INTERNAL_HIDDEN @endcond + */ + +/** @brief A structure holding internal state for a pending synchronous + * operation on a work item or queue. + * + * Instances of this type are provided by the caller for invocation of + * k_work_flush(), k_work_cancel_sync() and sibling flush and cancel APIs. A + * referenced object must persist until the call returns, and be accessible + * from both the caller thread and the work queue thread. + * + * @note If CONFIG_KERNEL_COHERENCE is enabled the object must be allocated in + * coherent memory; see arch_mem_coherent(). The stack on these architectures + * is generally not coherent. be stack-allocated. Violations are detected by + * runtime assertion. + */ +struct k_work_sync { + union { + struct z_work_flusher flusher; + struct z_work_canceller canceller; + }; +}; + +/** @brief A structure holding optional configuration items for a work + * queue. + * + * This structure, and values it references, are not retained by + * k_work_queue_start(). + */ +struct k_work_queue_config { + /** The name to be given to the work queue thread. + * + * If left null the thread will not have a name. + */ + const char *name; + + /** Control whether the work queue thread should yield between + * items. + * + * Yielding between items helps guarantee the work queue + * thread does not starve other threads, including cooperative + * ones released by a work item. This is the default behavior. + * + * Set this to @c true to prevent the work queue thread from + * yielding between items. This may be appropriate when a + * sequence of items should complete without yielding + * control. + */ + bool no_yield; +}; + +/** @brief A structure used to hold work until it can be processed. */ +struct k_work_q { + /* The thread that animates the work. */ + struct k_thread thread; + + /* All the following fields must be accessed only while the + * work module spinlock is held. + */ + + /* List of k_work items to be worked. */ + sys_slist_t pending; + + /* Wait queue for idle work thread. */ + _wait_q_t notifyq; + + /* Wait queue for threads waiting for the queue to drain. */ + _wait_q_t drainq; + + /* Flags describing queue state. */ + uint32_t flags; +}; + +/* Provide the implementation for inline functions declared above */ + +static inline bool k_work_is_pending(const struct k_work *work) +{ + return k_work_busy_get(work) != 0; +} + +static inline struct k_work_delayable * +k_work_delayable_from_work(struct k_work *work) +{ + return CONTAINER_OF(work, struct k_work_delayable, work); +} + +static inline bool k_work_delayable_is_pending( + const struct k_work_delayable *dwork) +{ + return k_work_delayable_busy_get(dwork) != 0; +} + +static inline k_ticks_t k_work_delayable_expires_get( + const struct k_work_delayable *dwork) +{ + return z_timeout_expires(&dwork->timeout); +} + +static inline k_ticks_t k_work_delayable_remaining_get( + const struct k_work_delayable *dwork) +{ + return z_timeout_remaining(&dwork->timeout); +} + +static inline k_tid_t k_work_queue_thread_get(struct k_work_q *queue) +{ + return &queue->thread; +} + +/* Legacy wrappers */ + +/* to be deprecated */ +static inline bool k_work_pending(const struct k_work *work) +{ + return k_work_is_pending(work); +} + +/* to be deprecated */ +static inline void k_work_q_start(struct k_work_q *work_q, + k_thread_stack_t *stack, + size_t stack_size, int prio) +{ + k_work_queue_start(work_q, stack, stack_size, prio, NULL); +} + +/* to be deprecated */ +struct k_delayed_work { + struct k_work_delayable work; +}; + +/* to be deprecated */ +#define Z_DELAYED_WORK_INITIALIZER(work_handler) { \ + .work = Z_WORK_DELAYABLE_INITIALIZER(work_handler), \ +} + +/* to be deprecated */ +static inline void k_delayed_work_init(struct k_delayed_work *work, + k_work_handler_t handler) +{ + k_work_init_delayable(&work->work, handler); +} + +/* to be deprecated */ +static inline int k_delayed_work_submit_to_queue(struct k_work_q *work_q, + struct k_delayed_work *work, + k_timeout_t delay) +{ + int rc = k_work_reschedule_for_queue(work_q, &work->work, delay); + + /* Legacy API doesn't distinguish success cases. */ + return (rc >= 0) ? 0 : rc; +} + +/* to be deprecated */ +static inline int k_delayed_work_submit(struct k_delayed_work *work, + k_timeout_t delay) +{ + int rc = k_work_reschedule(&work->work, delay); + + /* Legacy API doesn't distinguish success cases. */ + return (rc >= 0) ? 0 : rc; +} + +/* to be deprecated */ +static inline int k_delayed_work_cancel(struct k_delayed_work *work) +{ + bool pending = k_work_delayable_is_pending(&work->work); + int rc = k_work_cancel_delayable(&work->work); + + /* Old return value rules: + * + * 0 if: + * * Work item countdown cancelled before the item was submitted to + * its queue; or + * * Work item was removed from its queue before it was processed. + * + * -EINVAL if: + * * Work item has never been submitted; or + * * Work item has been successfully cancelled; or + * * Timeout handler is in the process of submitting the work item to + * its queue; or + * * Work queue thread has removed the work item from the queue but + * has not called its handler. + * + * -EALREADY if: + * * Work queue thread has removed the work item from the queue and + * cleared its pending flag; or + * * Work queue thread is invoking the item handler; or + * * Work item handler has completed. + * + + * We can't reconstruct those states, so call it successful only when + * a pending item is no longer pending, -EINVAL if it was pending and + * still is, and cancel, and -EALREADY if it wasn't pending (so + * presumably cancellation should have had no effect, assuming we + * didn't hit a race condition). + */ + if (pending) { + return (rc == 0) ? 0 : -EINVAL; + } + + return -EALREADY; +} + +/* to be deprecated */ +static inline bool k_delayed_work_pending(struct k_delayed_work *work) +{ + return k_work_delayable_is_pending(&work->work); +} + +/* to be deprecated */ +static inline int32_t k_delayed_work_remaining_get(struct k_delayed_work *work) +{ + k_ticks_t rem = k_work_delayable_remaining_get(&work->work); + + /* Probably should be ceil32, but was floor32 */ + return k_ticks_to_ms_floor32(rem); +} + +/* to be deprecated, not used in-tree */ +static inline k_ticks_t k_delayed_work_expires_ticks( + struct k_delayed_work *work) +{ + return k_work_delayable_expires_get(&work->work); +} + +/* to be deprecated, not used in-tree */ +static inline k_ticks_t k_delayed_work_remaining_ticks( + struct k_delayed_work *work) +{ + return k_work_delayable_remaining_get(&work->work); +} + +/** @} */ + +#endif /* !CONFIG_KERNEL_WORK1 */ + /** * @cond INTERNAL_HIDDEN */ @@ -3317,6 +4228,34 @@ struct k_work_poll { * @{ */ +/** + * @brief Initialize a statically-defined work item. + * + * This macro can be used to initialize a statically-defined workqueue work + * item, prior to its first use. For example, + * + * @code static K_WORK_DEFINE(, ); @endcode + * + * @param work Symbol name for work item object + * @param work_handler Function to invoke each time work item is processed. + */ +#define K_WORK_DEFINE(work, work_handler) \ + struct k_work work = Z_WORK_INITIALIZER(work_handler) + +/** + * @brief Initialize a statically-defined delayed work item. + * + * This macro can be used to initialize a statically-defined workqueue + * delayed work item, prior to its first use. For example, + * + * @code static K_DELAYED_WORK_DEFINE(, ); @endcode + * + * @param work Symbol name for delayed work item object + * @param work_handler Function to invoke each time work item is processed. + */ +#define K_DELAYED_WORK_DEFINE(work, work_handler) \ + struct k_delayed_work work = Z_DELAYED_WORK_INITIALIZER(work_handler) + /** * @brief Initialize a triggered work item. * diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index e12f66fa5e9..68b41420e8c 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -45,6 +45,7 @@ set_target_properties( ) target_sources_ifdef(CONFIG_KERNEL_WORK1 kernel PRIVATE work_q.c) +target_sources_ifdef(CONFIG_KERNEL_WORK2 kernel PRIVATE work.c) target_sources_ifdef(CONFIG_STACK_CANARIES kernel PRIVATE compiler_stack_protect.c) target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer.c) diff --git a/kernel/Kconfig b/kernel/Kconfig index 230e1d2b9f2..9d3620a5660 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -396,6 +396,15 @@ config SYSTEM_WORKQUEUE_PRIORITY priority. This means that any work handler, once started, won't be preempted by any other thread until finished. +config SYSTEM_WORKQUEUE_NO_YIELD + bool "Select whether system work queue yields" + help + By default, the system work queue yields between each work item, to + prevent other threads from being starved. Selecting this removes + this yield, which may be useful if the work queue thread is + cooperative and a sequence of work items is expected to complete + without yielding. + endmenu menu "Atomic Operations" diff --git a/kernel/system_work_q.c b/kernel/system_work_q.c index 46fd1b0b3e2..496fbbe62d9 100644 --- a/kernel/system_work_q.c +++ b/kernel/system_work_q.c @@ -14,19 +14,31 @@ #include #include -K_KERNEL_STACK_DEFINE(sys_work_q_stack, CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE); +static K_KERNEL_STACK_DEFINE(sys_work_q_stack, + CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE); struct k_work_q k_sys_work_q; static int k_sys_work_q_init(const struct device *dev) { ARG_UNUSED(dev); + struct k_work_queue_config cfg = { + .name = "sysworkq", + .no_yield = IS_ENABLED(CONFIG_SYSTEM_WORKQUEUE_NO_YIELD), + }; +#ifdef CONFIG_KERNEL_WORK1 k_work_q_start(&k_sys_work_q, sys_work_q_stack, K_KERNEL_STACK_SIZEOF(sys_work_q_stack), CONFIG_SYSTEM_WORKQUEUE_PRIORITY); k_thread_name_set(&k_sys_work_q.thread, "sysworkq"); +#else /* CONFIG_KERNEL_WORK1 */ + k_work_queue_start(&k_sys_work_q, + sys_work_q_stack, + K_KERNEL_STACK_SIZEOF(sys_work_q_stack), + CONFIG_SYSTEM_WORKQUEUE_PRIORITY, &cfg); +#endif /* CONFIG_KERNEL_WORK1 */ return 0; } diff --git a/kernel/work.c b/kernel/work.c new file mode 100644 index 00000000000..a936009321f --- /dev/null +++ b/kernel/work.c @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * + * Second generation work queue implementation + */ + +#include +#include +#include +#include +#include +#include +#include + +static inline void flag_clear(uint32_t *flagp, + uint32_t bit) +{ + *flagp &= ~BIT(bit); +} + +static inline void flag_set(uint32_t *flagp, + uint32_t bit) +{ + *flagp |= BIT(bit); +} + +static inline bool flag_test(const uint32_t *flagp, + uint32_t bit) +{ + return (*flagp & BIT(bit)) != 0U; +} + +static inline bool flag_test_and_clear(uint32_t *flagp, + int bit) +{ + bool ret = flag_test(flagp, bit); + + flag_clear(flagp, bit); + + return ret; +} + +static inline void flags_set(uint32_t *flagp, + uint32_t flags) +{ + *flagp = flags; +} + +static inline uint32_t flags_get(const uint32_t *flagp) +{ + return *flagp; +} + +/* Lock to protect the internal state of all work items, work queues, + * and pending_cancels. + */ +static struct k_spinlock lock; + +/* Invoked by work thread */ +static void handle_flush(struct k_work *work) +{ + struct z_work_flusher *flusher + = CONTAINER_OF(work, struct z_work_flusher, work); + + k_sem_give(&flusher->sem); +} + +static inline void init_flusher(struct z_work_flusher *flusher) +{ + k_sem_init(&flusher->sem, 0, 1); + k_work_init(&flusher->work, handle_flush); +} + +/* List of pending cancellations. */ +static sys_slist_t pending_cancels; + +/* Initialize a canceler record and add it to the list of pending + * cancels. + * + * Invoked with work lock held. + * + * @param canceler the structure used to notify a waiting process. + * @param work the work structure that is to be canceled + */ +static inline void init_work_cancel(struct z_work_canceller *canceler, + struct k_work *work) +{ + k_sem_init(&canceler->sem, 0, 1); + canceler->work = work; + sys_slist_append(&pending_cancels, &canceler->node); +} + +/* Complete cancellation of a work item and unlock held lock. + * + * Invoked with work lock held. + * + * Invoked from a work queue thread. + * + * Reschedules. + * + * @param work the work structre that has completed cancellation + */ +static void finalize_cancel_locked(struct k_work *work) +{ + struct z_work_canceller *wc, *tmp; + sys_snode_t *prev = NULL; + + /* Clear this first, so released high-priority threads don't + * see it when doing things. + */ + flag_clear(&work->flags, K_WORK_CANCELING_BIT); + + /* Search for and remove the matching container, and release + * what's waiting for the completion. The same work item can + * appear multiple times in the list if multiple threads + * attempt to cancel it. + */ + SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&pending_cancels, wc, tmp, node) { + if (wc->work == work) { + sys_slist_remove(&pending_cancels, prev, &wc->node); + k_sem_give(&wc->sem); + } else { + prev = &wc->node; + } + } +} + +void k_work_init(struct k_work *work, + k_work_handler_t handler) +{ + __ASSERT_NO_MSG(work != NULL); + __ASSERT_NO_MSG(handler != 0); + + *work = (struct k_work)Z_WORK_INITIALIZER(handler); +} + +static inline int work_busy_get_locked(const struct k_work *work) +{ + return flags_get(&work->flags) & K_WORK_MASK; +} + +int k_work_busy_get(const struct k_work *work) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + int ret = work_busy_get_locked(work); + + k_spin_unlock(&lock, key); + + return ret; +} + +/* Add a flusher work item to the queue. + * + * Invoked with work lock held. + * + * Caller must notify queue of pending work. + * + * @param queue queue on which a work item may appear. + * @param work the work item that is either queued or running on @p + * queue + * @param flusher an uninitialized/unused flusher object + */ +static void queue_flusher_locked(struct k_work_q *queue, + struct k_work *work, + struct z_work_flusher *flusher) +{ + bool in_list = false; + struct k_work *wn; + + /* Determine whether the work item is still queued. */ + SYS_SLIST_FOR_EACH_CONTAINER(&queue->pending, wn, node) { + if (wn == work) { + in_list = true; + break; + } + } + + init_flusher(flusher); + if (in_list) { + sys_slist_insert(&queue->pending, &work->node, + &flusher->work.node); + } else { + sys_slist_prepend(&queue->pending, &flusher->work.node); + } +} + +/* Try to remove a work item from the given queue. + * + * Invoked with work lock held. + * + * @param queue the queue from which the work should be removed + * @param work work that may be on the queue + */ +static inline void queue_remove_locked(struct k_work_q *queue, + struct k_work *work) +{ + if (flag_test_and_clear(&work->flags, K_WORK_QUEUED_BIT)) { + (void)sys_slist_find_and_remove(&queue->pending, &work->node); + } +} + +/* Potentially notify a queue that it needs to look for pending work. + * + * This may make the work queue thread ready, but as the lock is held it + * will not be a reschedule point. Callers should yield after the lock is + * released where appropriate (generally if this returns true). + * + * @param queue to be notified. If this is null no notification is required. + * + * @return true if and only if the queue was notified and woken, i.e. a + * reschedule is pending. + */ +static inline bool notify_queue_locked(struct k_work_q *queue) +{ + bool rv = false; + + if (queue != NULL) { + rv = z_sched_wake(&queue->notifyq, 0, NULL); + } + + return rv; +} + +/* Submit an work item to a queue if queue state allows new work. + * + * Submission is rejected if no queue is provided, or if the queue is + * draining and the work isn't being submitted from the queue's + * thread (chained submission). + * + * Invoked with work lock held. + * Conditionally notifies queue. + * + * @param queue the queue to which work should be submitted. This may + * be null, in which case the submission will fail. + * + * @param work to be submitted + * + * @retval 1 if successfully queued + * @retval -EINVAL if no queue is provided + * @retval -ENODEV if the queue is not started + * @retval -EBUSY if the submission was rejected (draining, plugged) + */ +static inline int queue_submit_locked(struct k_work_q *queue, + struct k_work *work) +{ + if (queue == NULL) { + return -EINVAL; + } + + int ret = -EBUSY; + bool chained = (_current == &queue->thread) && !k_is_in_isr(); + bool draining = flag_test(&queue->flags, K_WORK_QUEUE_DRAIN_BIT); + bool plugged = flag_test(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT); + + /* Test for acceptability, in priority order: + * + * * -ENODEV if the queue isn't running. + * * -EBUSY if draining and not chained + * * -EBUSY if plugged and not draining + * * otherwise OK + */ + if (!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)) { + ret = -ENODEV; + } else if (draining && !chained) { + ret = -EBUSY; + } else if (plugged && !draining) { + ret = -EBUSY; + } else { + sys_slist_append(&queue->pending, &work->node); + ret = 1; + (void)notify_queue_locked(queue); + } + + return ret; +} + +/* Attempt to submit work to a queue. + * + * The submission can fail if: + * * the work is cancelling, + * * no candidate queue can be identified; + * * the candidate queue rejects the submission. + * + * Invoked with work lock held. + * Conditionally notifies queue. + * + * @param work the work structure to be submitted + + * @param queuep pointer to a queue reference. On input this should + * dereference to the proposed queue (which may be null); after completion it + * will be null if the work was not submitted or if submitted will reference + * the queue it was submitted to. That may or may not be the queue provided + * on input. + * + * @retval 0 if work was already submitted to a queue + * @retval 1 if work was not submitted and has been queued to @p queue + * @retval 2 if work was running and has been queued to the queue that was + * running it + * @retval -EBUSY if canceling or submission was rejected by queue + * @retval -EINVAL if no queue is provided + * @retval -ENODEV if the queue is not started + */ +static int submit_to_queue_locked(struct k_work *work, + struct k_work_q **queuep) +{ + int ret = 0; + + if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { + /* Disallowed */ + ret = -EBUSY; + } else if (!flag_test(&work->flags, K_WORK_QUEUED_BIT)) { + /* Not currently queued */ + ret = 1; + + /* If no queue specified resubmit to last queue. + */ + if (*queuep == NULL) { + *queuep = work->queue; + } + + /* If the work is currently running we have to use the + * queue it's running on to prevent handler + * re-entrancy. + */ + if (flag_test(&work->flags, K_WORK_RUNNING_BIT)) { + __ASSERT_NO_MSG(work->queue != NULL); + *queuep = work->queue; + ret = 2; + } + + int rc = queue_submit_locked(*queuep, work); + + if (rc < 0) { + ret = rc; + } else { + flag_set(&work->flags, K_WORK_QUEUED_BIT); + work->queue = *queuep; + } + } else { + /* Already queued, do nothing. */ + } + + if (ret <= 0) { + *queuep = NULL; + } + + return ret; +} + +int k_work_submit_to_queue(struct k_work_q *queue, + struct k_work *work) +{ + __ASSERT_NO_MSG(work != NULL); + + k_spinlock_key_t key = k_spin_lock(&lock); + int ret = submit_to_queue_locked(work, &queue); + + k_spin_unlock(&lock, key); + + /* If we changed the queue contents (as indicated by a positive ret) + * the queue thread may now be ready, but we missed the reschedule + * point because the lock was held. If this is being invoked by a + * preemptible thread then yield. + */ + if ((ret > 0) && (k_is_preempt_thread() != 0)) { + k_yield(); + } + + + return ret; +} + +/* Flush the work item if necessary. + * + * Flushing is necessary only if the work is either queued or running. + * + * Invoked with work lock held by key. + * Sleeps. + * + * @param work the work item that is to be flushed + * @param flusher state used to synchronize the flush + * + * @retval true if work is queued or running. If this happens the + * caller must take the flusher semaphore after releasing the lock. + * + * @retval false otherwise. No wait required. + */ +static bool work_flush_locked(struct k_work *work, + struct z_work_flusher *flusher) +{ + bool need_flush = (flags_get(&work->flags) + & (K_WORK_QUEUED | K_WORK_RUNNING)) != 0; + + if (need_flush) { + struct k_work_q *queue = work->queue; + + __ASSERT_NO_MSG(queue != NULL); + + queue_flusher_locked(queue, work, flusher); + notify_queue_locked(queue); + } + + return need_flush; +} + +bool k_work_flush(struct k_work *work, + struct k_work_sync *sync) +{ + __ASSERT_NO_MSG(work != NULL); + __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); + __ASSERT_NO_MSG(!k_is_in_isr()); + __ASSERT_NO_MSG(sync != NULL); +#ifdef CONFIG_KERNEL_COHERENCE + __ASSERT_NO_MSG(arch_mem_coherent(sync)); +#endif + + struct z_work_flusher *flusher = &sync->flusher; + k_spinlock_key_t key = k_spin_lock(&lock); + + bool need_flush = work_flush_locked(work, flusher); + + k_spin_unlock(&lock, key); + + /* If necessary wait until the flusher item completes */ + if (need_flush) { + k_sem_take(&flusher->sem, K_FOREVER); + } + + return need_flush; +} + +/* Execute the non-waiting steps necessary to cancel a work item. + * + * Invoked with work lock held. + * + * @param work the work item to be canceled. + * + * @retval true if we need to wait for the work item to finish canceling + * @retval false if the work item is idle + * + * @return k_busy_wait() captured under lock + */ +static int cancel_async_locked(struct k_work *work) +{ + /* If we haven't already started canceling, do it now. */ + if (!flag_test(&work->flags, K_WORK_CANCELING_BIT)) { + /* Remove it from the queue, if it's queued. */ + queue_remove_locked(work->queue, work); + } + + /* If it's still busy after it's been dequeued, then flag it + * as canceling. + */ + int ret = work_busy_get_locked(work); + + if (ret != 0) { + flag_set(&work->flags, K_WORK_CANCELING_BIT); + ret = work_busy_get_locked(work); + } + + return ret; +} + +/* Complete cancellation necessary, release work lock, and wait if + * necessary. + * + * Invoked with work lock held by key. + * Sleeps. + * + * @param work work that is being canceled + * @param canceller state used to synchronize the cancellation + * @param key used by work lock + * + * @retval true if and only if the work was still active on entry. The caller + * must wait on the canceller semaphore after releasing the lock. + * + * @retval false if work was idle on entry. The caller need not wait. + */ +static bool cancel_sync_locked(struct k_work *work, + struct z_work_canceller *canceller) +{ + bool ret = flag_test(&work->flags, K_WORK_CANCELING_BIT); + + /* If something's still running then we have to wait for + * completion, which is indicated when finish_cancel() gets + * invoked. + */ + if (ret) { + init_work_cancel(canceller, work); + } + + return ret; +} + +int k_work_cancel(struct k_work *work) +{ + __ASSERT_NO_MSG(work != NULL); + __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); + + k_spinlock_key_t key = k_spin_lock(&lock); + int ret = cancel_async_locked(work); + + k_spin_unlock(&lock, key); + + return ret; +} + +bool k_work_cancel_sync(struct k_work *work, + struct k_work_sync *sync) +{ + __ASSERT_NO_MSG(work != NULL); + __ASSERT_NO_MSG(sync != NULL); + __ASSERT_NO_MSG(!flag_test(&work->flags, K_WORK_DELAYABLE_BIT)); + __ASSERT_NO_MSG(!k_is_in_isr()); +#ifdef CONFIG_KERNEL_COHERENCE + __ASSERT_NO_MSG(arch_mem_coherent(sync)); +#endif + + struct z_work_canceller *canceller = &sync->canceller; + k_spinlock_key_t key = k_spin_lock(&lock); + + (void)cancel_async_locked(work); + + bool need_wait = cancel_sync_locked(work, canceller); + + k_spin_unlock(&lock, key); + + if (need_wait) { + k_sem_take(&canceller->sem, K_FOREVER); + } + + return need_wait; +} + +/* Work has been dequeued and is about to be invoked by the work + * thread. + * + * If the work is being canceled the cancellation will be completed + * here, and the caller told not to use the work item. + * + * Invoked by work queue thread. + * Takes and releases lock. + * Reschedules via finalize_cancel_locked + * + * @param work work that is changing state + * @param queue queue that is running work + * + * @retval true if work is to be run by the work thread + * @retval false if it has been canceled and should not be run + */ +static inline bool work_set_running(struct k_work *work, + struct k_work_q *queue) +{ + bool ret = false; + k_spinlock_key_t key = k_spin_lock(&lock); + + /* Allow the work to be queued again. */ + flag_clear(&work->flags, K_WORK_QUEUED_BIT); + + /* Normally we indicate that the work is being processed by + * setting RUNNING. However, something may have initiated + * cancellation between when the work thread pulled this off + * its queue and this claimed the work lock. If that happened + * we complete the cancellation now and tell the work thread + * not to do anything. + */ + ret = !flag_test(&work->flags, K_WORK_CANCELING_BIT); + if (ret) { + /* Not cancelling: mark running and go */ + flag_set(&work->flags, K_WORK_RUNNING_BIT); + } else { + /* Caught the item before being invoked; complete the + * cancellation now. + */ + finalize_cancel_locked(work); + } + + k_spin_unlock(&lock, key); + + return ret; +} + +/* Work handler has been called and is about to go idle. + * + * If the work is being canceled this will notify anything waiting + * for the cancellation. + * + * Invoked by work queue thread. + * Takes and releases lock. + * Reschedules via finalize_cancel_locked + * + * @param work work that is in running state + */ +static inline void work_clear_running(struct k_work *work) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + + /* Clear running */ + flag_clear(&work->flags, K_WORK_RUNNING_BIT); + + if (flag_test(&work->flags, K_WORK_CANCELING_BIT)) { + finalize_cancel_locked(work); + } + + k_spin_unlock(&lock, key); +} + +/* Loop executed by a work queue thread. + * + * @param workq_ptr pointer to the work queue structure + */ +static void work_queue_main(void *workq_ptr, void *p2, void *p3) +{ + struct k_work_q *queue = (struct k_work_q *)workq_ptr; + + while (true) { + sys_snode_t *node; + struct k_work *work = NULL; + k_spinlock_key_t key = k_spin_lock(&lock); + + /* Clear the record of processing any previous work, and check + * for new work. + */ + node = sys_slist_get(&queue->pending); + if (node != NULL) { + /* Mark that there's some work active that's + * not on the pending list. + */ + flag_set(&queue->flags, K_WORK_QUEUE_BUSY_BIT); + work = CONTAINER_OF(node, struct k_work, node); + } else if (flag_test_and_clear(&queue->flags, + K_WORK_QUEUE_DRAIN_BIT)) { + /* Not busy and draining: move threads waiting for + * drain to ready state. The held spinlock inhibits + * immediate reschedule; released threads get their + * chance when this invokes z_sched_wait() below. + * + * We don't touch K_WORK_QUEUE_PLUGGABLE, so getting + * here doesn't mean that the queue will allow new + * submissions. + */ + (void)z_sched_wake_all(&queue->drainq, 1, NULL); + } + + if (work == NULL) { + /* Nothing's had a chance to add work since we took + * the lock, and we didn't find work nor got asked to + * stop. Just go to sleep: when something happens the + * work thread will be woken and we can check again. + */ + + (void)z_sched_wait(&lock, key, &queue->notifyq, + K_FOREVER, NULL); + continue; + } + + k_spin_unlock(&lock, key); + + if (work != NULL) { + bool yield; + k_work_handler_t handler = work->handler; + + __ASSERT_NO_MSG(handler != 0); + + if (work_set_running(work, queue)) { + handler(work); + work_clear_running(work); + } + + /* No longer referencing the work, so we can clear the + * BUSY flag while we yield to prevent starving other + * threads. + */ + key = k_spin_lock(&lock); + flag_clear(&queue->flags, K_WORK_QUEUE_BUSY_BIT); + yield = !flag_test(&queue->flags, K_WORK_QUEUE_NO_YIELD_BIT); + k_spin_unlock(&lock, key); + + /* Optionally yield to prevent the work queue from + * starving other threads. + */ + if (yield) { + k_yield(); + } + } + } +} + +void k_work_queue_start(struct k_work_q *queue, + k_thread_stack_t *stack, + size_t stack_size, + int prio, + const struct k_work_queue_config *cfg) +{ + __ASSERT_NO_MSG(queue); + __ASSERT_NO_MSG(stack); + __ASSERT_NO_MSG(!flag_test(&queue->flags, K_WORK_QUEUE_STARTED_BIT)); + uint32_t flags = K_WORK_QUEUE_STARTED; + + sys_slist_init(&queue->pending); + z_waitq_init(&queue->notifyq); + z_waitq_init(&queue->drainq); + + if ((cfg != NULL) && cfg->no_yield) { + flags |= K_WORK_QUEUE_NO_YIELD; + } + + /* It hasn't actually been started yet, but all the state is in place + * so we can submit things and once the thread gets control it's ready + * to roll. + */ + flags_set(&queue->flags, flags); + + (void)k_thread_create(&queue->thread, stack, stack_size, + work_queue_main, queue, NULL, NULL, + prio, 0, K_FOREVER); + + if ((cfg != NULL) && (cfg->name != NULL)) { + k_thread_name_set(&queue->thread, cfg->name); + } + + k_thread_start(&queue->thread); +} + +int k_work_queue_drain(struct k_work_q *queue, + bool plug) +{ + __ASSERT_NO_MSG(queue); + __ASSERT_NO_MSG(!k_is_in_isr()); + + int ret = 0; + k_spinlock_key_t key = k_spin_lock(&lock); + + if (((flags_get(&queue->flags) + & (K_WORK_QUEUE_BUSY | K_WORK_QUEUE_DRAIN)) != 0U) + || plug + || !sys_slist_is_empty(&queue->pending)) { + flag_set(&queue->flags, K_WORK_QUEUE_DRAIN_BIT); + if (plug) { + flag_set(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT); + } + + notify_queue_locked(queue); + ret = z_sched_wait(&lock, key, &queue->drainq, + K_FOREVER, NULL); + } else { + k_spin_unlock(&lock, key); + } + + return ret; +} + +int k_work_queue_unplug(struct k_work_q *queue) +{ + __ASSERT_NO_MSG(queue); + + int ret = -EALREADY; + k_spinlock_key_t key = k_spin_lock(&lock); + + if (flag_test_and_clear(&queue->flags, K_WORK_QUEUE_PLUGGED_BIT)) { + ret = 0; + } + + k_spin_unlock(&lock, key); + + return ret; +} + +#ifdef CONFIG_SYS_CLOCK_EXISTS + +/* Timeout handler for delayable work. + * + * Invoked by timeout infrastructure. + * Takes and releases work lock. + * Conditionally reschedules. + */ +static void work_timeout(struct _timeout *to) +{ + struct k_work_delayable *dw + = CONTAINER_OF(to, struct k_work_delayable, timeout); + struct k_work *wp = &dw->work; + k_spinlock_key_t key = k_spin_lock(&lock); + struct k_work_q *queue = NULL; + + /* If the work is still marked delayed (should be) then clear that + * state and submit it to the queue. If successful the queue will be + * notified of new work at the next reschedule point. + * + * If not successful there is no notification that the work has been + * abandoned. Sorry. + */ + if (flag_test_and_clear(&wp->flags, K_WORK_DELAYED_BIT)) { + queue = dw->queue; + (void)submit_to_queue_locked(wp, &queue); + } + + k_spin_unlock(&lock, key); +} + +void k_work_init_delayable(struct k_work_delayable *dwork, + k_work_handler_t handler) +{ + __ASSERT_NO_MSG(dwork != NULL); + __ASSERT_NO_MSG(handler != 0); + + *dwork = (struct k_work_delayable){ + .work = { + .handler = handler, + .flags = K_WORK_DELAYABLE, + }, + }; + z_init_timeout(&dwork->timeout); + (void)work_timeout; +} + +static inline int work_delayable_busy_get_locked(const struct k_work_delayable *dwork) +{ + return atomic_get(&dwork->work.flags) & K_WORK_MASK; +} + +int k_work_delayable_busy_get(const struct k_work_delayable *dwork) +{ + k_spinlock_key_t key = k_spin_lock(&lock); + int ret = work_delayable_busy_get_locked(dwork); + + k_spin_unlock(&lock, key); + return ret; +} + +/* Attempt to schedule a work item for future (maybe immediate) + * submission. + * + * Invoked with work lock held. + * + * See also submit_to_queue_locked(), which implements this for a no-wait + * delay. + * + * Invoked with work lock held. + * + * @param queuep pointer to a pointer to a queue. On input this + * should dereference to the proposed queue (which may be null); after + * completion it will be null if the work was not submitted or if + * submitted will reference the queue it was submitted to. That may + * or may not be the queue provided on input. + * + * @param dwork the delayed work structure + * + * @param delay the delay to use before scheduling. + * + * @retval from submit_to_queue_locked() if delay is K_NO_WAIT; otherwise + * @retval 1 to indicate successfully scheduled. + */ +static int schedule_for_queue_locked(struct k_work_q **queuep, + struct k_work_delayable *dwork, + k_timeout_t delay) +{ + int ret = 1; + struct k_work *work = &dwork->work; + + if (K_TIMEOUT_EQ(delay, K_NO_WAIT)) { + return submit_to_queue_locked(work, queuep); + } + + flag_set(&work->flags, K_WORK_DELAYED_BIT); + dwork->queue = *queuep; + + /* Add timeout */ + z_add_timeout(&dwork->timeout, work_timeout, delay); + + return ret; +} + +/* Unschedule delayable work. + * + * If the work is delayed, cancel the timeout and clear the delayed + * flag. + * + * Invoked with work lock held. + * + * @param dwork pointer to delayable work structure. + * + * @return true if and only if work had been delayed so the timeout + * was cancelled. + */ +static inline bool unschedule_locked(struct k_work_delayable *dwork) +{ + bool ret = false; + struct k_work *work = &dwork->work; + + /* If scheduled, try to cancel. */ + if (flag_test_and_clear(&work->flags, K_WORK_DELAYED_BIT)) { + z_abort_timeout(&dwork->timeout); + ret = true; + } + + return ret; +} + +/* Full cancellation of a delayable work item. + * + * Unschedules the delayed part then delegates to standard work + * cancellation. + * + * Invoked with work lock held. + * + * @param dwork delayable work item + * + * @return k_work_busy_get() flags + */ +static int cancel_delayable_async_locked(struct k_work_delayable *dwork) +{ + (void)unschedule_locked(dwork); + + return cancel_async_locked(&dwork->work); +} + +int k_work_schedule_for_queue(struct k_work_q *queue, + struct k_work_delayable *dwork, + k_timeout_t delay) +{ + __ASSERT_NO_MSG(dwork != NULL); + + struct k_work *work = &dwork->work; + int ret = 0; + k_spinlock_key_t key = k_spin_lock(&lock); + + /* Schedule the work item if it's idle. */ + if (work_busy_get_locked(work) == 0U) { + ret = schedule_for_queue_locked(&queue, dwork, delay); + } + + k_spin_unlock(&lock, key); + + return ret; +} + +int k_work_reschedule_for_queue(struct k_work_q *queue, + struct k_work_delayable *dwork, + k_timeout_t delay) +{ + __ASSERT_NO_MSG(dwork != NULL); + + int ret = 0; + k_spinlock_key_t key = k_spin_lock(&lock); + + /* Remove any active scheduling. */ + (void)unschedule_locked(dwork); + + /* Schedule the work item with the new parameters. */ + ret = schedule_for_queue_locked(&queue, dwork, delay); + + k_spin_unlock(&lock, key); + + return ret; +} + +int k_work_cancel_delayable(struct k_work_delayable *dwork) +{ + __ASSERT_NO_MSG(dwork != NULL); + + k_spinlock_key_t key = k_spin_lock(&lock); + int ret = cancel_delayable_async_locked(dwork); + + k_spin_unlock(&lock, key); + return ret; +} + +bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, + struct k_work_sync *sync) +{ + __ASSERT_NO_MSG(dwork != NULL); + __ASSERT_NO_MSG(sync != NULL); + __ASSERT_NO_MSG(!k_is_in_isr()); +#ifdef CONFIG_KERNEL_COHERENCE + __ASSERT_NO_MSG(arch_mem_coherent(sync)); +#endif + + struct z_work_canceller *canceller = &sync->canceller; + k_spinlock_key_t key = k_spin_lock(&lock); + + (void)cancel_delayable_async_locked(dwork); + + bool need_wait = cancel_sync_locked(&dwork->work, canceller); + + k_spin_unlock(&lock, key); + + if (need_wait) { + k_sem_take(&canceller->sem, K_FOREVER); + } + + return need_wait; +} + +bool k_work_flush_delayable(struct k_work_delayable *dwork, + struct k_work_sync *sync) +{ + __ASSERT_NO_MSG(dwork != NULL); + __ASSERT_NO_MSG(sync != NULL); + __ASSERT_NO_MSG(!k_is_in_isr()); +#ifdef CONFIG_KERNEL_COHERENCE + __ASSERT_NO_MSG(arch_mem_coherent(sync)); +#endif + + struct k_work *work = &dwork->work; + struct z_work_flusher *flusher = &sync->flusher; + k_spinlock_key_t key = k_spin_lock(&lock); + + /* If it's idle release the lock and return immediately. */ + if (work_busy_get_locked(work) == 0U) { + k_spin_unlock(&lock, key); + return false; + } + + /* If unscheduling did something then submit it. Ignore a + * failed submission (e.g. when cancelling). + */ + if (unschedule_locked(dwork)) { + struct k_work_q *queue = dwork->queue; + + (void)submit_to_queue_locked(work, &queue); + } + + /* Wait for it to finish */ + bool need_flush = work_flush_locked(work, flusher); + + k_spin_unlock(&lock, key); + + /* If necessary wait until the flusher item completes */ + if (need_flush) { + k_sem_take(&flusher->sem, K_FOREVER); + } + + return need_flush; +} + +#endif /* CONFIG_SYS_CLOCK_EXISTS */