kernel: mutex: add error checking

k_mutex_unlock will now perform error checking and return on failures.

If the current thread does not own the mutex, we will now return -EPERM.
In the unlikely situation where we own a lock and the lock count is
zero, we assert. This is considered an undefined bahviour and should not
happen.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2019-05-04 10:18:13 -04:00
commit 86bb2d06d7
2 changed files with 42 additions and 14 deletions

View file

@ -3346,10 +3346,12 @@ struct k_mutex {
* *
* @param mutex Address of the mutex. * @param mutex Address of the mutex.
* *
* @return N/A * @retval 0 Mutex object created
*
* @req K-MUTEX-002 * @req K-MUTEX-002
*/ */
__syscall void k_mutex_init(struct k_mutex *mutex); __syscall int k_mutex_init(struct k_mutex *mutex);
/** /**
* @brief Lock a mutex. * @brief Lock a mutex.
@ -3385,10 +3387,13 @@ __syscall int k_mutex_lock(struct k_mutex *mutex, s32_t timeout);
* *
* @param mutex Address of the mutex. * @param mutex Address of the mutex.
* *
* @return N/A * @retval 0 Mutex unlocked.
* @retval -EPERM The current thread does not own the mutex
* @retval -EINVAL The mutex is not locked
*
* @req K-MUTEX-002 * @req K-MUTEX-002
*/ */
__syscall void k_mutex_unlock(struct k_mutex *mutex); __syscall int k_mutex_unlock(struct k_mutex *mutex);
/** /**
* @} * @}

View file

@ -38,6 +38,7 @@
#include <init.h> #include <init.h>
#include <syscall_handler.h> #include <syscall_handler.h>
#include <debug/tracing.h> #include <debug/tracing.h>
#include <sys/check.h>
/* We use a global spinlock here because some of the synchronization /* We use a global spinlock here because some of the synchronization
* is protecting things like owner thread priorities which aren't * is protecting things like owner thread priorities which aren't
@ -67,7 +68,7 @@ SYS_INIT(init_mutex_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJECT_TRACING */ #endif /* CONFIG_OBJECT_TRACING */
void z_impl_k_mutex_init(struct k_mutex *mutex) int z_impl_k_mutex_init(struct k_mutex *mutex)
{ {
mutex->owner = NULL; mutex->owner = NULL;
mutex->lock_count = 0U; mutex->lock_count = 0U;
@ -79,13 +80,15 @@ void z_impl_k_mutex_init(struct k_mutex *mutex)
SYS_TRACING_OBJ_INIT(k_mutex, mutex); SYS_TRACING_OBJ_INIT(k_mutex, mutex);
z_object_init(mutex); z_object_init(mutex);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT); sys_trace_end_call(SYS_TRACE_ID_MUTEX_INIT);
return 0;
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_mutex_init(struct k_mutex *mutex) static inline int z_vrfy_k_mutex_init(struct k_mutex *mutex)
{ {
Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX)); Z_OOPS(Z_SYSCALL_OBJ_INIT(mutex, K_OBJ_MUTEX));
z_impl_k_mutex_init(mutex); return z_impl_k_mutex_init(mutex);
} }
#include <syscalls/k_mutex_init_mrsh.c> #include <syscalls/k_mutex_init_mrsh.c>
#endif #endif
@ -203,18 +206,37 @@ static inline int z_vrfy_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
#include <syscalls/k_mutex_lock_mrsh.c> #include <syscalls/k_mutex_lock_mrsh.c>
#endif #endif
void z_impl_k_mutex_unlock(struct k_mutex *mutex) int z_impl_k_mutex_unlock(struct k_mutex *mutex)
{ {
struct k_thread *new_owner; struct k_thread *new_owner;
__ASSERT(mutex->lock_count > 0U, ""); CHECKIF(mutex->owner == NULL) {
__ASSERT(mutex->owner == _current, ""); return -EINVAL;
}
/*
* The current thread does not own the mutex.
*/
CHECKIF(mutex->owner != _current) {
return -EPERM;
}
/*
* Attempt to unlock a mutex which is unlocked. mutex->lock_count
* cannot be zero if the current thread is equal to mutex->owner,
* therefore no underflow check is required. Use assert to catch
* undefined behavior.
*/
__ASSERT_NO_MSG(mutex->lock_count > 0U);
sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK); sys_trace_void(SYS_TRACE_ID_MUTEX_UNLOCK);
z_sched_lock(); z_sched_lock();
K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count); K_DEBUG("mutex %p lock_count: %d\n", mutex, mutex->lock_count);
/*
* If we are the owner and count is greater than 1, then decrement
* the count and return and keep current thread as the owner.
*/
if (mutex->lock_count - 1U != 0U) { if (mutex->lock_count - 1U != 0U) {
mutex->lock_count--; mutex->lock_count--;
goto k_mutex_unlock_return; goto k_mutex_unlock_return;
@ -224,6 +246,7 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex)
adjust_owner_prio(mutex, mutex->owner_orig_prio); adjust_owner_prio(mutex, mutex->owner_orig_prio);
/* Get the new owner, if any */
new_owner = z_unpend_first_thread(&mutex->wait_q); new_owner = z_unpend_first_thread(&mutex->wait_q);
mutex->owner = new_owner; mutex->owner = new_owner;
@ -250,15 +273,15 @@ void z_impl_k_mutex_unlock(struct k_mutex *mutex)
k_mutex_unlock_return: k_mutex_unlock_return:
k_sched_unlock(); k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_UNLOCK); sys_trace_end_call(SYS_TRACE_ID_MUTEX_UNLOCK);
return 0;
} }
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_mutex_unlock(struct k_mutex *mutex) static inline int z_vrfy_k_mutex_unlock(struct k_mutex *mutex)
{ {
Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX)); Z_OOPS(Z_SYSCALL_OBJ(mutex, K_OBJ_MUTEX));
Z_OOPS(Z_SYSCALL_VERIFY(mutex->lock_count > 0)); return z_impl_k_mutex_unlock(mutex);
Z_OOPS(Z_SYSCALL_VERIFY(mutex->owner == _current));
z_impl_k_mutex_unlock(mutex);
} }
#include <syscalls/k_mutex_unlock_mrsh.c> #include <syscalls/k_mutex_unlock_mrsh.c>
#endif #endif