diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 4b2c5446051..5d707056ccd 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -37,7 +37,6 @@ BUILD_ASSERT(K_LOWEST_APPLICATION_THREAD_PRIO #endif void z_sched_init(void); -void z_add_thread_to_ready_q(struct k_thread *thread); void z_move_thread_to_end_of_prio_q(struct k_thread *thread); void z_remove_thread_from_ready_q(struct k_thread *thread); int z_is_thread_time_slicing(struct k_thread *thread); @@ -61,6 +60,8 @@ void z_time_slice(int ticks); void z_reset_time_slice(void); void z_sched_abort(struct k_thread *thread); void z_sched_ipi(void); +void z_sched_start(struct k_thread *thread); +void z_ready_thread(struct k_thread *thread); static inline void z_pend_curr_unlocked(_wait_q_t *wait_q, s32_t timeout) { @@ -246,14 +247,6 @@ static inline bool _is_valid_prio(int prio, void *entry_point) return true; } -static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread) -{ - if (z_is_thread_ready(thread)) { - z_add_thread_to_ready_q(thread); - sys_trace_thread_ready(thread); - } -} - static inline void _ready_one_thread(_wait_q_t *wq) { struct k_thread *thread = z_unpend_first_thread(wq); diff --git a/kernel/sched.c b/kernel/sched.c index 0067005b2bb..4e0bc32f32a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -376,9 +376,10 @@ static void update_cache(int preempt_ok) #endif } -void z_add_thread_to_ready_q(struct k_thread *thread) +static void ready_thread(struct k_thread *thread) { - LOCKED(&sched_spinlock) { + if (z_is_thread_ready(thread)) { + sys_trace_thread_ready(thread); _priq_run_add(&_kernel.ready_q.runq, thread); z_mark_thread_as_queued(thread); update_cache(0); @@ -388,6 +389,13 @@ void z_add_thread_to_ready_q(struct k_thread *thread) } } +void z_ready_thread(struct k_thread *thread) +{ + LOCKED(&sched_spinlock) { + ready_thread(thread); + } +} + void z_move_thread_to_end_of_prio_q(struct k_thread *thread) { LOCKED(&sched_spinlock) { @@ -400,6 +408,20 @@ void z_move_thread_to_end_of_prio_q(struct k_thread *thread) } } +void z_sched_start(struct k_thread *thread) +{ + k_spinlock_key_t key = k_spin_lock(&sched_spinlock); + + if (z_has_thread_started(thread)) { + k_spin_unlock(&sched_spinlock, key); + return; + } + + z_mark_thread_as_started(thread); + ready_thread(thread); + z_reschedule(&sched_spinlock, key); +} + void z_thread_single_suspend(struct k_thread *thread) { (void)z_abort_thread_timeout(thread); diff --git a/kernel/thread.c b/kernel/thread.c index 734879c0f2c..54f932c7a91 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -369,16 +369,7 @@ void z_check_stack_sentinel(void) #ifdef CONFIG_MULTITHREADING void z_impl_k_thread_start(struct k_thread *thread) { - k_spinlock_key_t key = k_spin_lock(&lock); /* protect kernel queues */ - - if (z_has_thread_started(thread)) { - k_spin_unlock(&lock, key); - return; - } - - z_mark_thread_as_started(thread); - z_ready_thread(thread); - z_reschedule(&lock, key); + z_sched_start(thread); } #ifdef CONFIG_USERSPACE