From 86bb2d06d74c31724fdb06f5f071faf1fcbe487f Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Sat, 4 May 2019 10:18:13 -0400 Subject: [PATCH] kernel: mutex: add error checking k_mutex_unlock will now perform error checking and return on failures. If the current thread does not own the mutex, we will now return -EPERM. In the unlikely situation where we own a lock and the lock count is zero, we assert. This is considered an undefined bahviour and should not happen. Signed-off-by: Anas Nashif --- include/kernel.h | 13 +++++++++---- kernel/mutex.c | 43 +++++++++++++++++++++++++++++++++---------- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/include/kernel.h b/include/kernel.h index c16d583d975..1587b784ca3 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -3346,10 +3346,12 @@ struct k_mutex { * * @param mutex Address of the mutex. * - * @return N/A + * @retval 0 Mutex object created + * * @req K-MUTEX-002 */ -__syscall void k_mutex_init(struct k_mutex *mutex); +__syscall int k_mutex_init(struct k_mutex *mutex); + /** * @brief Lock a mutex. @@ -3385,10 +3387,13 @@ __syscall int k_mutex_lock(struct k_mutex *mutex, s32_t timeout); * * @param mutex Address of the mutex. * - * @return N/A + * @retval 0 Mutex unlocked. + * @retval -EPERM The current thread does not own the mutex + * @retval -EINVAL The mutex is not locked + * * @req K-MUTEX-002 */ -__syscall void k_mutex_unlock(struct k_mutex *mutex); +__syscall int k_mutex_unlock(struct k_mutex *mutex); /** * @} diff --git a/kernel/mutex.c b/kernel/mutex.c index 60338b6eb3d..ecf9e135f66 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -38,6 +38,7 @@ #include #include #include +#include /* We use a global spinlock here because some of the synchronization * is protecting things like owner thread priorities which aren't @@ -67,7 +68,7 @@ SYS_INIT(init_mutex_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); #endif /* CONFIG_OBJECT_TRACING */ -void z_impl_k_mutex_init(struct k_mutex *mutex) +int z_impl_k_mutex_init(struct k_mutex *mutex) { mutex->owner = NULL; mutex->lock_count = 0U; @@ -79,13 +80,15 @@ void z_impl_k_mutex_init(struct k_mutex *mutex) SYS_TRACING_OBJ_INIT(k_mutex, mutex); z_object_init(mutex); sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT); + + return 0; } #ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_mutex_init(struct k_mutex *mutex) +static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex) { Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX)); - z_impl_k_mutex_init(mutex); + return z_impl_k_mutex_init(mutex); } #include #endif @@ -203,18 +206,37 @@ static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, s32_t timeout) #include #endif -void z_impl_k_mutex_unlock(struct k_mutex *mutex) +int z_impl_k_mutex_unlock(struct k_mutex *mutex) { struct k_thread *new_owner; - __ASSERT(mutex->lock_count > 0U, ""); - __ASSERT(mutex->owner == _current, ""); + CHECKIF(mutex->owner == NULL) { + return -EINVAL; + } + /* + * The current thread does not own the mutex. + */ + CHECKIF(mutex->owner != _current) { + return -EPERM; + } + + /* + * Attempt to unlock a mutex which is unlocked. mutex->lock_count + * cannot be zero if the current thread is equal to mutex->owner, + * therefore no underflow check is required. Use assert to catch + * undefined behavior. + */ + __ASSERT_NO_MSG(mutex->lock_count > 0U); sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK); z_sched_lock(); K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count); + /* + * If we are the owner and count is greater than 1, then decrement + * the count and return and keep current thread as the owner. + */ if (mutex->lock_count - 1U != 0U) { mutex->lock_count--; goto k_mutex_unlock_return; @@ -224,6 +246,7 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex) adjust_owner_prio(mutex, mutex->owner_orig_prio); + /* Get the new owner, if any */ new_owner = z_unpend_first_thread(&mutex->wait_q); mutex->owner = new_owner; @@ -250,15 +273,15 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex) k_mutex_unlock_return: k_sched_unlock(); sys_trace_end_call(SYS_TRACE_ID_MUTEX_UNLOCK); + + return 0; } #ifdef CONFIG_USERSPACE -static inline void z_vrfy_k_mutex_unlock(struct k_mutex *mutex) +static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex) { Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); - Z_OOPS(Z_SYSCALL_VERIFY(mutex->lock_count > 0)); - Z_OOPS(Z_SYSCALL_VERIFY(mutex->owner == _current)); - z_impl_k_mutex_unlock(mutex); + return z_impl_k_mutex_unlock(mutex); } #include #endif