kernel/mutex: Fix locking to be SMP-safe

The mutex locking was written to use k_sched_lock(), which doesn't
work as a synchronization primitive if there is another CPU running
(it prevents the current CPU from preempting the thread, it says
nothing about what the others are doing).

Use the pre-existing spinlock for all synchronization.  One wrinkle is
that the priority code was needing to call z_thread_priority_set(),
which is a rescheduling call that cannot be called with a lock held.
So that got split out with a low level utility that can update the
schedule state but allow the caller to defer yielding until later.

Fixes #17584

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2019-08-20 11:21:28 -07:00 committed by Anas Nashif
commit 6f13980fc7
3 changed files with 28 additions and 14 deletions

View file

@ -51,6 +51,7 @@ struct k_thread *z_unpend_first_thread(_wait_q_t *wait_q);
void z_unpend_thread(struct k_thread *thread);
int z_unpend_all(_wait_q_t *wait_q);
void z_thread_priority_set(struct k_thread *thread, int prio);
bool z_set_prio(struct k_thread *thread, int prio);
void *z_get_next_switch_handle(void *interrupted);
struct k_thread *z_find_first_thread_to_unpend(_wait_q_t *wait_q,
struct k_thread *from);

View file

@ -99,7 +99,7 @@ static s32_t new_prio_for_inheritance(s32_t target, s32_t limit)
return new_prio;
}
static void adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
static bool adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
{
if (mutex->owner->base.prio != new_prio) {
@ -108,17 +108,19 @@ static void adjust_owner_prio(struct k_mutex *mutex, s32_t new_prio)
'y' : 'n',
new_prio, mutex->owner->base.prio);
z_thread_priority_set(mutex->owner, new_prio);
return z_set_prio(mutex->owner, new_prio);
}
return false;
}
int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
{
int new_prio;
k_spinlock_key_t key;
bool resched = false;
sys_trace_void(SYS_TRACE_ID_MUTEX_LOCK);
z_sched_lock();
key = k_spin_lock(&lock);
if (likely((mutex->lock_count == 0U) || (mutex->owner == _current))) {
@ -133,14 +135,14 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
_current, mutex, mutex->lock_count,
mutex->owner_orig_prio);
k_sched_unlock();
k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return 0;
}
if (unlikely(timeout == (s32_t)K_NO_WAIT)) {
k_sched_unlock();
k_spin_unlock(&lock, key);
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EBUSY;
}
@ -148,12 +150,10 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
new_prio = new_prio_for_inheritance(_current->base.prio,
mutex->owner->base.prio);
key = k_spin_lock(&lock);
K_DEBUG("adjusting prio up on mutex %p\n", mutex);
if (z_is_prio_higher(new_prio, mutex->owner->base.prio)) {
adjust_owner_prio(mutex, new_prio);
resched = adjust_owner_prio(mutex, new_prio);
}
int got_mutex = z_pend_curr(&lock, key, &mutex->wait_q, timeout);
@ -164,7 +164,6 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
got_mutex ? 'y' : 'n');
if (got_mutex == 0) {
k_sched_unlock();
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return 0;
}
@ -173,6 +172,8 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
K_DEBUG("%p timeout on mutex %p\n", _current, mutex);
key = k_spin_lock(&lock);
struct k_thread *waiter = z_waitq_head(&mutex->wait_q);
new_prio = mutex->owner_orig_prio;
@ -182,11 +183,13 @@ int z_impl_k_mutex_lock(struct k_mutex *mutex, s32_t timeout)
K_DEBUG("adjusting prio down on mutex %p\n", mutex);
key = k_spin_lock(&lock);
adjust_owner_prio(mutex, new_prio);
k_spin_unlock(&lock, key);
resched = adjust_owner_prio(mutex, new_prio) || resched;
k_sched_unlock();
if (resched) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
sys_trace_end_call(SYS_TRACE_ID_MUTEX_LOCK);
return -EAGAIN;

View file

@ -477,7 +477,10 @@ void z_unpend_thread(struct k_thread *thread)
(void)z_abort_thread_timeout(thread);
}
void z_thread_priority_set(struct k_thread *thread, int prio)
/* Priority set utility that does no rescheduling, it just changes the
* run queue state, returning true if a reschedule is needed later.
*/
bool z_set_prio(struct k_thread *thread, int prio)
{
bool need_sched = 0;
@ -500,6 +503,13 @@ void z_thread_priority_set(struct k_thread *thread, int prio)
}
sys_trace_thread_priority_set(thread);
return need_sched;
}
void z_thread_priority_set(struct k_thread *thread, int prio)
{
bool need_sched = z_set_prio(thread, prio);
if (IS_ENABLED(CONFIG_SMP) &&
!IS_ENABLED(CONFIG_SCHED_IPI_SUPPORTED)) {
z_sched_ipi();