kernel: sched: inline update_cache

This improves context switching by 7% when measured using the
thread_metric benchmark.

Before:
**** Thread-Metric Preemptive Scheduling Test **** Relative Time: 120
Time Period Total:  5451879

After:
**** Thread-Metric Preemptive Scheduling Test **** Relative Time: 30
Time Period Total:  5853535

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2024-10-08 18:13:03 -04:00
commit 121cb49a46

View file

@ -35,7 +35,7 @@ struct k_spinlock _sched_spinlock;
*/ */
__incoherent struct k_thread _thread_dummy; __incoherent struct k_thread _thread_dummy;
static void update_cache(int preempt_ok); static ALWAYS_INLINE void update_cache(int preempt_ok);
static void halt_thread(struct k_thread *thread, uint8_t new_state); static void halt_thread(struct k_thread *thread, uint8_t new_state);
static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q); static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
@ -320,7 +320,7 @@ static void update_metairq_preempt(struct k_thread *thread)
*/ */
} }
static void update_cache(int preempt_ok) static ALWAYS_INLINE void update_cache(int preempt_ok)
{ {
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
struct k_thread *thread = next_up(); struct k_thread *thread = next_up();