kernel: sem: handle resets with outstanding waiting threads

Previously, a k_sem_reset with any outstanding waiting threads would
result in the semaphore in an inconsistent state, with more threads
waiting in the wait_q than the count would indicate.

Explicitly -EAGAIN any waiting threads upon k_sem_reset, to
ensure safety here.

Signed-off-by: James Harris <james.harris@intel.com>
This commit is contained in:
James Harris 2021-03-04 15:47:27 -08:00 committed by Anas Nashif
commit 53b8179371
2 changed files with 24 additions and 10 deletions

View file

@ -2769,7 +2769,8 @@ __syscall int k_sem_init(struct k_sem *sem, unsigned int initial_count,
* *
* @retval 0 Semaphore taken. * @retval 0 Semaphore taken.
* @retval -EBUSY Returned without waiting. * @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out. * @retval -EAGAIN Waiting period timed out,
* or the semaphore was reset during the waiting period.
*/ */
__syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout); __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
@ -2788,9 +2789,11 @@ __syscall int k_sem_take(struct k_sem *sem, k_timeout_t timeout);
__syscall void k_sem_give(struct k_sem *sem); __syscall void k_sem_give(struct k_sem *sem);
/** /**
* @brief Reset a semaphore's count to zero. * @brief Resets a semaphore's count to zero.
* *
* This routine sets the count of @a sem to zero. * This routine sets the count of @a sem to zero.
* Any outstanding semaphore takes will be aborted
* with -EAGAIN.
* *
* @param sem Address of the semaphore. * @param sem Address of the semaphore.
* *
@ -2798,14 +2801,6 @@ __syscall void k_sem_give(struct k_sem *sem);
*/ */
__syscall void k_sem_reset(struct k_sem *sem); __syscall void k_sem_reset(struct k_sem *sem);
/**
* @internal
*/
static inline void z_impl_k_sem_reset(struct k_sem *sem)
{
sem->count = 0U;
}
/** /**
* @brief Get a semaphore's count. * @brief Get a semaphore's count.
* *

View file

@ -163,6 +163,25 @@ out:
return ret; return ret;
} }
void z_impl_k_sem_reset(struct k_sem *sem)
{
struct k_thread *thread;
k_spinlock_key_t key = k_spin_lock(&lock);
while (true) {
thread = z_unpend_first_thread(&sem->wait_q);
if (thread == NULL) {
break;
}
arch_thread_return_value_set(thread, -EAGAIN);
z_ready_thread(thread);
}
sem->count = 0;
handle_poll_events(sem);
z_reschedule(&lock, key);
}
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout) static inline int z_vrfy_k_sem_take(struct k_sem *sem, k_timeout_t timeout)
{ {