kernel/sched: Put k_thread_start() under a single lock

Similar to the suspend refactoring earlier, this really nees to be
done in an atomic block.  There were two confirmable races here,
though it's not completely clear either was being hit in practice:

1. The bit operations in z_mark_thread_as_started() aren't atomic so
   it needs to be protected.

2. The intermediate state in z_ready_thread() could result in a dead
   or suspended thread being added to the ready queue if another
   context tried a simultaneous abort or suspend.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-01-23 13:28:30 -08:00 committed by Anas Nashif
commit 96ccc46e03
3 changed files with 27 additions and 21 deletions

View file

@ -37,7 +37,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO
#endif
void z_sched_init(void);
void z_add_thread_to_ready_q(struct k_thread *thread);
void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
void z_remove_thread_from_ready_q(struct k_thread *thread);
int z_is_thread_time_slicing(struct k_thread *thread);
@ -61,6 +60,8 @@ void z_time_slice(int ticks);
void z_reset_time_slice(void);
void z_sched_abort(struct k_thread *thread);
void z_sched_ipi(void);
void z_sched_start(struct k_thread *thread);
void z_ready_thread(struct k_thread *thread);
static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout)
{
@ -246,14 +247,6 @@ static inline bool _is_valid_prio(int prio, void *entry_point)
return true;
}
static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread)
{
if (z_is_thread_ready(thread)) {
z_add_thread_to_ready_q(thread);
sys_trace_thread_ready(thread);
}
}
static inline void _ready_one_thread(_wait_q_t *wq)
{
struct k_thread *thread = z_unpend_first_thread(wq);

View file

@ -376,9 +376,10 @@ static void update_cache(int preempt_ok)
#endif
}
void z_add_thread_to_ready_q(struct k_thread *thread)
static void ready_thread(struct k_thread *thread)
{
LOCKED(&sched_spinlock) {
if (z_is_thread_ready(thread)) {
sys_trace_thread_ready(thread);
_priq_run_add(&_kernel.ready_q.runq, thread);
z_mark_thread_as_queued(thread);
update_cache(0);
@ -388,6 +389,13 @@ void z_add_thread_to_ready_q(struct k_thread *thread)
}
}
void z_ready_thread(struct k_thread *thread)
{
LOCKED(&sched_spinlock) {
ready_thread(thread);
}
}
void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
{
LOCKED(&sched_spinlock) {
@ -400,6 +408,20 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
}
}
void z_sched_start(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
if (z_has_thread_started(thread)) {
k_spin_unlock(&sched_spinlock, key);
return;
}
z_mark_thread_as_started(thread);
ready_thread(thread);
z_reschedule(&sched_spinlock, key);
}
void z_thread_single_suspend(struct k_thread *thread)
{
(void)z_abort_thread_timeout(thread);

View file

@ -369,16 +369,7 @@ void z_check_stack_sentinel(void)
#ifdef CONFIG_MULTITHREADING
void z_impl_k_thread_start(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&lock); /* protect kernel queues */
if (z_has_thread_started(thread)) {
k_spin_unlock(&lock, key);
return;
}
z_mark_thread_as_started(thread);
z_ready_thread(thread);
z_reschedule(&lock, key);
z_sched_start(thread);
}
#ifdef CONFIG_USERSPACE