kernel/work_q: Spinlockify

Each work_q object gets a separate spinlock to synchronize access
instead of the global lock.  Note that there was a recursive lock
condition in k_delayed_work_cancel(), so that's been split out into an
internal unlocked version and the API entry point that wraps it with a
lock.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-07-24 11:26:43 -07:00 committed by Anas Nashif
commit a37a981b21
3 changed files with 33 additions and 22 deletions

View file

@ -2526,6 +2526,7 @@ typedef void (*k_work_handler_t)(struct k_work *work);
struct k_work_q {
struct k_queue queue;
struct k_thread thread;
struct k_spinlock lock;
};
enum {

View file

@ -34,5 +34,6 @@
#include <arch/cpu.h>
#include <misc/rb.h>
#include <sys_clock.h>
#include <spinlock.h>
#endif /* ZEPHYR_INCLUDE_KERNEL_INCLUDES_H_ */

View file

@ -13,6 +13,7 @@
#include <kernel_structs.h>
#include <wait_q.h>
#include <spinlock.h>
#include <errno.h>
#include <stdbool.h>
@ -47,11 +48,32 @@ void k_delayed_work_init(struct k_delayed_work *work, k_work_handler_t handler)
work->work_q = NULL;
}
static int work_cancel(struct k_delayed_work *work)
{
__ASSERT(work->work_q != NULL, "");
if (k_work_pending(&work->work)) {
/* Remove from the queue if already submitted */
if (!k_queue_remove(&work->work_q->queue, &work->work)) {
return -EINVAL;
}
} else {
(void)_abort_timeout(&work->timeout);
}
/* Detach from workqueue */
work->work_q = NULL;
atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
return 0;
}
int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
struct k_delayed_work *work,
s32_t delay)
{
unsigned int key = irq_lock();
k_spinlock_key_t key = k_spin_lock(&work_q->lock);
int err;
/* Work cannot be active in multiple queues */
@ -62,7 +84,7 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
/* Cancel if work has been submitted */
if (work->work_q == work_q) {
err = k_delayed_work_cancel(work);
err = work_cancel(work);
if (err < 0) {
goto done;
}
@ -83,36 +105,23 @@ int k_delayed_work_submit_to_queue(struct k_work_q *work_q,
err = 0;
done:
irq_unlock(key);
k_spin_unlock(&work_q->lock, key);
return err;
}
int k_delayed_work_cancel(struct k_delayed_work *work)
{
unsigned int key = irq_lock();
if (!work->work_q) {
irq_unlock(key);
return -EINVAL;
}
if (k_work_pending(&work->work)) {
/* Remove from the queue if already submitted */
if (!k_queue_remove(&work->work_q->queue, &work->work)) {
irq_unlock(key);
return -EINVAL;
}
} else {
(void)_abort_timeout(&work->timeout);
}
struct k_spinlock *lock = &work->work_q->lock;
k_spinlock_key_t key = k_spin_lock(lock);
int ret = work_cancel(work);
/* Detach from workqueue */
work->work_q = NULL;
atomic_clear_bit(work->work.flags, K_WORK_STATE_PENDING);
irq_unlock(key);
return 0;
k_spin_unlock(lock, key);
return ret;
}
#endif /* CONFIG_SYS_CLOCK_EXISTS */