kernel: need to release spinlock before busy_wait

need to release spinlock first before busy_wait,
or other cores cannot get the spinlock when the holder is
busy waitting.

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2019-10-14 22:14:28 +08:00 committed by Anas Nashif
commit b1fbe85156

View file

@ -1047,6 +1047,8 @@ void z_sched_ipi(void)
void z_sched_abort(struct k_thread *thread)
{
k_spinlock_key_t key;
if (thread == _current) {
z_remove_thread_from_ready_q(thread);
return;
@ -1065,17 +1067,19 @@ void z_sched_abort(struct k_thread *thread)
* running on or because we caught it idle in the queue
*/
while ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
LOCKED(&sched_spinlock) {
if (z_is_thread_prevented_from_running(thread)) {
__ASSERT(!z_is_thread_queued(thread), "");
thread->base.thread_state |= _THREAD_DEAD;
} else if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
thread->base.thread_state |= _THREAD_DEAD;
} else {
k_busy_wait(100);
}
key = k_spin_lock(&sched_spinlock);
if (z_is_thread_prevented_from_running(thread)) {
__ASSERT(!z_is_thread_queued(thread), "");
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread);
thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else {
k_spin_unlock(&sched_spinlock, key);
k_busy_wait(100);
}
}
}