From a37a981b21d5e436e92c6160b5ad70e2ffd956ba Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Tue, 24 Jul 2018 11:26:43 -0700 Subject: [PATCH] kernel/work_q: Spinlockify Each work_q object gets a separate spinlock to synchronize access instead of the global lock. Note that there was a recursive lock condition in k_delayed_work_cancel(), so that's been split out into an internal unlocked version and the API entry point that wraps it with a lock. Signed-off-by: Andy Ross --- include/kernel.h | 1 + include/kernel_includes.h | 1 + kernel/work_q.c | 53 +++++++++++++++++++++++---------------- 3 files changed, 33 insertions(+), 22 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index 2bda8e5042f..b1ba5e84355 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -2526,6 +2526,7 @@ typedef void (*k_work_handler_t)(struct k_work *work); struct k_work_q { struct k_queue queue; struct k_thread thread; + struct k_spinlock lock; }; enum { diff --git a/include/kernel_includes.h b/include/kernel_includes.h index 283a9f19412..3240d390166 100644 --- a/include/kernel_includes.h +++ b/include/kernel_includes.h @@ -34,5 +34,6 @@ #include #include #include +#include #endif /* ZEPHYR_INCLUDE_KERNEL_INCLUDES_H_ */ diff --git a/kernel/work_q.c b/kernel/work_q.c index 8f11ebc8af0..8c6335ab5d1 100644 --- a/kernel/work_q.c +++ b/kernel/work_q.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -47,11 +48,32 @@ void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler) work->work_q = NULL; } +static int work_cancel(struct k_delayed_work *work) +{ + __ASSERT(work->work_q != NULL, ""); + + if (k_work_pending(&work->work)) { + /* Remove from the queue if already submitted */ + if (!k_queue_remove(&work->work_q->queue, &work->work)) { + return -EINVAL; + } + } else { + (void)_abort_timeout(&work->timeout); + } + + /* Detach from workqueue */ + work->work_q = NULL; + + atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING); + + return 0; +} + int k_delayed_work_submit_to_queue(struct k_work_q *work_q, struct k_delayed_work *work, s32_t delay) { - unsigned int key = irq_lock(); + k_spinlock_key_t key = k_spin_lock(&work_q->lock); int err; /* Work cannot be active in multiple queues */ @@ -62,7 +84,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, /* Cancel if work has been submitted */ if (work->work_q == work_q) { - err = k_delayed_work_cancel(work); + err = work_cancel(work); if (err < 0) { goto done; } @@ -83,36 +105,23 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q, err = 0; done: - irq_unlock(key); + k_spin_unlock(&work_q->lock, key); return err; } int k_delayed_work_cancel(struct k_delayed_work *work) { - unsigned int key = irq_lock(); - if (!work->work_q) { - irq_unlock(key); return -EINVAL; } - if (k_work_pending(&work->work)) { - /* Remove from the queue if already submitted */ - if (!k_queue_remove(&work->work_q->queue, &work->work)) { - irq_unlock(key); - return -EINVAL; - } - } else { - (void)_abort_timeout(&work->timeout); - } + struct k_spinlock *lock = &work->work_q->lock; + k_spinlock_key_t key = k_spin_lock(lock); + int ret = work_cancel(work); - /* Detach from workqueue */ - work->work_q = NULL; - - atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING); - irq_unlock(key); - - return 0; + k_spin_unlock(lock, key); + return ret; } + #endif /* CONFIG_SYS_CLOCK_EXISTS */