diff --git a/include/kernel.h b/include/kernel.h index 835a99fe1db..b9d09d96b80 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -3020,8 +3020,9 @@ int k_work_cancel(struct k_work *work); * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object * must be allocated in coherent memory. * - * @retval true if work was not idle (call had to wait for cancellation to - * complete); + * @retval true if work was pending (call had to wait for cancellation of a + * running handler to complete, or scheduled or submitted operations were + * cancelled); * @retval false otherwise */ bool k_work_cancel_sync(struct k_work *work, struct k_work_sync *sync); @@ -3357,8 +3358,9 @@ int k_work_cancel_delayable(struct k_work_delayable *dwork); * one completes. On architectures with CONFIG_KERNEL_COHERENCE the object * must be allocated in coherent memory. * - * @retval true if work was not idle (call had to wait for cancellation to - * complete); + * @retval true if work was not idle (call had to wait for cancellation of a + * running handler to complete, or scheduled or submitted operations were + * cancelled); * @retval false otherwise */ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, diff --git a/kernel/work.c b/kernel/work.c index 1e31a9e7772..cc94560c291 100644 --- a/kernel/work.c +++ b/kernel/work.c @@ -524,10 +524,13 @@ bool k_work_cancel_sync(struct k_work *work, struct z_work_canceller *canceller = &sync->canceller; k_spinlock_key_t key = k_spin_lock(&lock); + bool pending = (work_busy_get_locked(work) != 0U); + bool need_wait = false; - (void)cancel_async_locked(work); - - bool need_wait = cancel_sync_locked(work, canceller); + if (pending) { + (void)cancel_async_locked(work); + need_wait = cancel_sync_locked(work, canceller); + } k_spin_unlock(&lock, key); @@ -535,7 +538,7 @@ bool k_work_cancel_sync(struct k_work *work, k_sem_take(&canceller->sem, K_FOREVER); } - return need_wait; + return pending; } /* Work has been dequeued and is about to be invoked by the work @@ -983,10 +986,13 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, struct z_work_canceller *canceller = &sync->canceller; k_spinlock_key_t key = k_spin_lock(&lock); + bool pending = (work_delayable_busy_get_locked(dwork) != 0U); + bool need_wait = false; - (void)cancel_delayable_async_locked(dwork); - - bool need_wait = cancel_sync_locked(&dwork->work, canceller); + if (pending) { + (void)cancel_delayable_async_locked(dwork); + need_wait = cancel_sync_locked(&dwork->work, canceller); + } k_spin_unlock(&lock, key); @@ -994,7 +1000,7 @@ bool k_work_cancel_delayable_sync(struct k_work_delayable *dwork, k_sem_take(&canceller->sem, K_FOREVER); } - return need_wait; + return pending; } bool k_work_flush_delayable(struct k_work_delayable *dwork, diff --git a/tests/kernel/workq/work/src/main.c b/tests/kernel/workq/work/src/main.c index 1f92414e22b..342fd59a05b 100644 --- a/tests/kernel/workq/work/src/main.c +++ b/tests/kernel/workq/work/src/main.c @@ -537,8 +537,10 @@ static void test_1cpu_queued_cancel_sync(void) zassert_equal(rc, 1, NULL); zassert_equal(coophi_counter(), 0, NULL); - /* Cancellation should complete immediately. */ - zassert_false(k_work_cancel_sync(&work, &work_sync), NULL); + /* Cancellation should complete immediately, indicating that + * work was pending. + */ + zassert_true(k_work_cancel_sync(&work, &work_sync), NULL); /* Shouldn't have run. */ zassert_equal(coophi_counter(), 0, NULL); @@ -582,8 +584,10 @@ static void test_1cpu_delayed_cancel_sync(void) zassert_equal(rc, 1, NULL); zassert_equal(coophi_counter(), 0, NULL); - /* Cancellation should complete immediately. */ - zassert_false(k_work_cancel_delayable_sync(&dwork, &work_sync), NULL); + /* Cancellation should complete immediately, indicating that + * work was pending. + */ + zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync), NULL); /* Shouldn't have run. */ zassert_equal(coophi_counter(), 0, NULL);