From 4ff2dfce0953453bdae6084a6739ac974487d2d2 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Mon, 28 Jan 2019 09:35:37 -0800 Subject: [PATCH] kernel/spinlock: Force inlining Something is going wrong with code generation here, potentially the inline assembly generated by _arch_irq_un/lock(), and these calls are not being inlined by gcc. So what should be a ~3 instruction sequence on most uniprocessor architectures is turning into 8-20 cycles worth of work to implement the API as written. Use an ALWAYS_INLINE, which is sort of ugly semantically but produces much better code. Signed-off-by: Andy Ross --- include/spinlock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/spinlock.h b/include/spinlock.h index 30ef43f754f..85a70b5a839 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -32,8 +32,7 @@ struct k_spinlock { #endif }; - -static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l) +static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) { k_spinlock_key_t k; @@ -59,7 +58,8 @@ static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l) return k; } -static inline void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key) +static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, + k_spinlock_key_t key) { #ifdef SPIN_VALIDATE __ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),