diff --git a/arch/arm/include/nano_private.h b/arch/arm/include/nano_private.h index 758b7ca0301..edafbb7e754 100644 --- a/arch/arm/include/nano_private.h +++ b/arch/arm/include/nano_private.h @@ -226,6 +226,7 @@ struct tcs { #ifdef CONFIG_KERNEL_V2 struct ready_q { + struct k_thread *cache; uint32_t prio_bmap[1]; sys_dlist_t q[K_NUM_PRIORITIES]; }; diff --git a/arch/x86/include/nano_private.h b/arch/x86/include/nano_private.h index 5de5585515c..77c38075198 100644 --- a/arch/x86/include/nano_private.h +++ b/arch/x86/include/nano_private.h @@ -745,6 +745,7 @@ struct tcs { #ifdef CONFIG_KERNEL_V2 struct ready_q { + struct k_thread *cache; uint32_t prio_bmap[1]; sys_dlist_t q[K_NUM_PRIORITIES]; }; diff --git a/kernel/unified/include/sched.h b/kernel/unified/include/sched.h index b84f8e9c1a6..a2fec7a3f20 100644 --- a/kernel/unified/include/sched.h +++ b/kernel/unified/include/sched.h @@ -32,6 +32,7 @@ extern void k_sched_unlock(void); extern void _pend_thread(struct tcs *thread, _wait_q_t *wait_q, int32_t timeout); extern void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout); +extern void _move_thread_to_end_of_prio_q(struct k_thread *thread); extern struct tcs *_get_next_ready_thread(void); extern int __must_switch_threads(void); extern void k_thread_priority_set(struct tcs *thread, int32_t priority); diff --git a/kernel/unified/sched.c b/kernel/unified/sched.c index 59828078cfa..9fdb632165b 100644 --- a/kernel/unified/sched.c +++ b/kernel/unified/sched.c @@ -41,7 +41,13 @@ static void _clear_ready_q_prio_bit(int prio) /* * Add thread to the ready queue, in the slot for its priority; the thread * must not be on a wait queue. + * + * This function, along with _move_thread_to_end_of_prio_q(), are the _only_ + * places where a thread is put on the ready queue. + * + * Interrupts must be locked when calling this function. */ + void _add_thread_to_ready_q(struct tcs *thread) { int q_index = _get_ready_q_q_index(thread->prio); @@ -49,9 +55,20 @@ void _add_thread_to_ready_q(struct tcs *thread) _set_ready_q_prio_bit(thread->prio); sys_dlist_append(q, &thread->k_q_node); + + struct k_thread **cache = &_nanokernel.ready_q.cache; + + *cache = *cache && _is_prio_higher(thread->prio, (*cache)->prio) ? + thread : *cache; } -/* remove thread from the ready queue */ +/* + * This function, along with _move_thread_to_end_of_prio_q(), are the _only_ + * places where a thread is taken off the ready queue. + * + * Interrupts must be locked when calling this function. + */ + void _remove_thread_from_ready_q(struct tcs *thread) { int q_index = _get_ready_q_q_index(thread->prio); @@ -61,6 +78,10 @@ void _remove_thread_from_ready_q(struct tcs *thread) if (sys_dlist_is_empty(q)) { _clear_ready_q_prio_bit(thread->prio); } + + struct k_thread **cache = &_nanokernel.ready_q.cache; + + *cache = *cache == thread ? NULL : *cache; } /* reschedule threads if the scheduler is not locked */ @@ -142,9 +163,11 @@ void _pend_current_thread(_wait_q_t *wait_q, int32_t timeout) _pend_thread(_current, wait_q, timeout); } -/* find which one is the next thread to run */ -/* must be called with interrupts locked */ -struct tcs *_get_next_ready_thread(void) +/* + * Find the next thread to run when there is no thread in the cache and update + * the cache. + */ +static struct k_thread *__get_next_ready_thread(void) { int prio = _get_highest_ready_prio(); int q_index = _get_ready_q_q_index(prio); @@ -157,9 +180,20 @@ struct tcs *_get_next_ready_thread(void) struct k_thread *thread = (struct k_thread *)sys_dlist_peek_head_not_empty(list); + _nanokernel.ready_q.cache = thread; + return thread; } +/* find which one is the next thread to run */ +/* must be called with interrupts locked */ +struct k_thread *_get_next_ready_thread(void) +{ + struct k_thread *cache = _nanokernel.ready_q.cache; + + return cache ? cache : __get_next_ready_thread(); +} + /* * Check if there is a thread of higher prio than the current one. Should only * be called if we already know that the current thread is preemptible. @@ -197,6 +231,30 @@ int k_current_priority_get(void) return k_thread_priority_get(_current); } +/* + * Interrupts must be locked when calling this function. + * + * This function, along with _add_thread_to_ready_q() and + * _remove_thread_from_ready_q(), are the _only_ places where a thread is + * taken off or put on the ready queue. + */ +void _move_thread_to_end_of_prio_q(struct k_thread *thread) +{ + int q_index = _get_ready_q_q_index(thread->prio); + sys_dlist_t *q = &_nanokernel.ready_q.q[q_index]; + + if (sys_dlist_is_tail(q, &thread->k_q_node)) { + return; + } + + sys_dlist_remove(&thread->k_q_node); + sys_dlist_append(q, &thread->k_q_node); + + struct k_thread **cache = &_nanokernel.ready_q.cache; + + *cache = *cache == thread ? NULL : *cache; +} + /* * application API: the current thread yields control to threads of higher or * equal priorities. This is done by remove the thread from the ready queue, @@ -209,8 +267,7 @@ void k_yield(void) int key = irq_lock(); - _remove_thread_from_ready_q(_current); - _add_thread_to_ready_q(_current); + _move_thread_to_end_of_prio_q(_current); if (_current == _get_next_ready_thread()) { irq_unlock(key); diff --git a/kernel/unified/sys_clock.c b/kernel/unified/sys_clock.c index f70a169b644..dbd824fc2ab 100644 --- a/kernel/unified/sys_clock.c +++ b/kernel/unified/sys_clock.c @@ -211,8 +211,7 @@ static void handle_time_slicing(int32_t ticks) _time_slice_elapsed += _ticks_to_ms(ticks); if (_time_slice_elapsed >= _time_slice_duration) { _time_slice_elapsed = 0; - _remove_thread_from_ready_q(_current); - _add_thread_to_ready_q(_current); + _move_thread_to_end_of_prio_q(_current); } } #else