kernel/sched: Don't preempt cooperative threads

The scheduler rewrite added a regression in uniprocessor mode where
cooperative threads would be unexpectedly preempted, because nothing
was checking the preemption status of _current at the point where the
next-thread cache pointer was being updated.

Note that update_cache() needs a little more context: spots like
k_yield() that leave _current runable need to be able to tell it that
"yes, preemption is OK here even though the thread is cooperative'.
So it has a "preempt_ok" argument now.

Interestingly this didn't get caught because we don't test that.  We
have lots and lots of tests of the converse cases (i.e. making sure
that threads get preempted when we expect them to), but nothing that
explicitly tries to jump in front of a cooperative thread.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2018-05-21 11:48:35 -07:00 committed by Anas Nashif
commit 1856e2206d

View file

@ -126,10 +126,21 @@ static struct k_thread *next_up(void)
#endif
}
static void update_cache(void)
static void update_cache(int preempt_ok)
{
#ifndef CONFIG_SMP
_kernel.ready_q.cache = next_up();
struct k_thread *th = next_up();
if (_current && !_is_idle(_current) && !_is_thread_dummy(_current)) {
/* Don't preempt cooperative threads unless the caller allows
* it (i.e. k_yield())
*/
if (!preempt_ok && !_is_preempt(_current)) {
th = _current;
}
}
_kernel.ready_q.cache = th;
#endif
}
@ -138,7 +149,7 @@ void _add_thread_to_ready_q(struct k_thread *thread)
LOCKED(&sched_lock) {
_priq_run_add(&_kernel.ready_q.runq, thread);
_mark_thread_as_queued(thread);
update_cache();
update_cache(0);
}
}
@ -148,7 +159,7 @@ void _move_thread_to_end_of_prio_q(struct k_thread *thread)
_priq_run_remove(&_kernel.ready_q.runq, thread);
_priq_run_add(&_kernel.ready_q.runq, thread);
_mark_thread_as_queued(thread);
update_cache();
update_cache(0);
}
}
@ -158,7 +169,7 @@ void _remove_thread_from_ready_q(struct k_thread *thread)
if (_is_thread_queued(thread)) {
_priq_run_remove(&_kernel.ready_q.runq, thread);
_mark_thread_as_not_queued(thread);
update_cache();
update_cache(thread == _current);
}
}
}
@ -277,7 +288,7 @@ void _thread_priority_set(struct k_thread *thread, int prio)
_priq_run_remove(&_kernel.ready_q.runq, thread);
thread->base.prio = prio;
_priq_run_add(&_kernel.ready_q.runq, thread);
update_cache();
update_cache(1);
} else {
thread->base.prio = prio;
}
@ -302,7 +313,9 @@ int _reschedule(int key)
void k_sched_lock(void)
{
LOCKED(&sched_lock) {
_sched_lock();
}
}
void k_sched_unlock(void)
@ -311,16 +324,15 @@ void k_sched_unlock(void)
__ASSERT(_current->base.sched_locked != 0, "");
__ASSERT(!_is_in_isr(), "");
int key = irq_lock();
/* compiler_barrier() not needed, comes from irq_lock() */
LOCKED(&sched_lock) {
++_current->base.sched_locked;
update_cache(1);
}
K_DEBUG("scheduler unlocked (%p:%d)\n",
_current, _current->base.sched_locked);
_reschedule(key);
_reschedule(irq_lock());
#endif
}
@ -588,7 +600,11 @@ void _impl_k_yield(void)
__ASSERT(!_is_in_isr(), "");
if (!_is_idle(_current)) {
_move_thread_to_end_of_prio_q(_current);
LOCKED(&sched_lock) {
_priq_run_remove(&_kernel.ready_q.runq, _current);
_priq_run_add(&_kernel.ready_q.runq, _current);
update_cache(1);
}
}
if (_get_next_ready_thread() != _current) {