kernel: need to release spinlock before busy_wait

need to release spinlock first before busy_wait,
or other cores cannot get the spinlock when the holder is
busy waitting.

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2019-10-14 22:14:28 +08:00 committed by Anas Nashif
commit b1fbe85156

View file

@ -1047,6 +1047,8 @@ void z_sched_ipi(void)
void z_sched_abort(struct k_thread *thread) void z_sched_abort(struct k_thread *thread)
{ {
k_spinlock_key_t key;
if (thread == _current) { if (thread == _current) {
z_remove_thread_from_ready_q(thread); z_remove_thread_from_ready_q(thread);
return; return;
@ -1065,20 +1067,22 @@ void z_sched_abort(struct k_thread *thread)
* running on or because we caught it idle in the queue * running on or because we caught it idle in the queue
*/ */
while ((thread->base.thread_state & _THREAD_DEAD) == 0U) { while ((thread->base.thread_state & _THREAD_DEAD) == 0U) {
LOCKED(&sched_spinlock) { key = k_spin_lock(&sched_spinlock);
if (z_is_thread_prevented_from_running(thread)) { if (z_is_thread_prevented_from_running(thread)) {
__ASSERT(!z_is_thread_queued(thread), ""); __ASSERT(!z_is_thread_queued(thread), "");
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else if (z_is_thread_queued(thread)) { } else if (z_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread); _priq_run_remove(&_kernel.ready_q.runq, thread);
z_mark_thread_as_not_queued(thread); z_mark_thread_as_not_queued(thread);
thread->base.thread_state |= _THREAD_DEAD; thread->base.thread_state |= _THREAD_DEAD;
k_spin_unlock(&sched_spinlock, key);
} else { } else {
k_spin_unlock(&sched_spinlock, key);
k_busy_wait(100); k_busy_wait(100);
} }
} }
} }
}
#endif #endif
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE