From b2791b0ac8122e712e87b5c39e25bcd0c02483c3 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 28 Jan 2019 09:36:36 -0800 Subject: [PATCH] kernel/sched: Force inlining of some routines within the scheduler guts GCC 6.2.0 is making frustratingly poor inlining decisions with some of these routines, resulting in an awful lot of runtime calls for code that is only ever expanded once or twice within the file. Treat with targetted ALWAYS_INLINE's to force the issue. The scheduler code is a hot path. Signed-off-by: Andy Ross --- kernel/include/ksched.h | 2 +- kernel/sched.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index f44df94894d..bad96aaefe9 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -215,7 +215,7 @@ static inline bool _is_valid_prio(int prio, void *entry_point) return true; } -static inline void _ready_thread(struct k_thread *thread) +static ALWAYS_INLINE void _ready_thread(struct k_thread *thread) { if (_is_thread_ready(thread)) { _add_thread_to_ready_q(thread); diff --git a/kernel/sched.c b/kernel/sched.c index 6b959c168ca..f44be6b325b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -111,7 +111,7 @@ bool _is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2) return false; } -static bool should_preempt(struct k_thread *th, int preempt_ok) +static ALWAYS_INLINE bool should_preempt(struct k_thread *th, int preempt_ok) { /* Preemption is OK if it's being explicitly allowed by * software state (e.g. the thread called k_yield()) @@ -153,7 +153,7 @@ static bool should_preempt(struct k_thread *th, int preempt_ok) return false; } -static struct k_thread *next_up(void) +static ALWAYS_INLINE struct k_thread *next_up(void) { #ifndef CONFIG_SMP /* In uniprocessor mode, we can leave the current thread in @@ -367,8 +367,8 @@ static _wait_q_t *pended_on(struct k_thread *thread) return thread->base.pended_on; } -struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, - struct k_thread *from) +ALWAYS_INLINE struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, + struct k_thread *from) { ARG_UNUSED(from); @@ -381,7 +381,7 @@ struct k_thread *_find_first_thread_to_unpend(_wait_q_t *wait_q, return ret; } -void _unpend_thread_no_timeout(struct k_thread *thread) +ALWAYS_INLINE void _unpend_thread_no_timeout(struct k_thread *thread) { LOCKED(&sched_lock) { _priq_wait_remove(&pended_on(thread)->waitq, thread); @@ -565,7 +565,7 @@ void *_get_next_switch_handle(void *interrupted) } #endif -void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) +ALWAYS_INLINE void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) { struct k_thread *t; @@ -667,7 +667,7 @@ struct k_thread *_priq_rb_best(struct _priq_rb *pq) # endif #endif -void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) +ALWAYS_INLINE void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) { int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; @@ -675,7 +675,7 @@ void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) pq->bitmask |= (1 << priority_bit); } -void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) +ALWAYS_INLINE void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) { int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO;