kernel/spinlock: Force inlining
Something is going wrong with code generation here, potentially the inline assembly generated by _arch_irq_un/lock(), and these calls are not being inlined by gcc. So what should be a ~3 instruction sequence on most uniprocessor architectures is turning into 8-20 cycles worth of work to implement the API as written. Use an ALWAYS_INLINE, which is sort of ugly semantically but produces much better code. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
eda4c027da
commit
4ff2dfce09
1 changed files with 3 additions and 3 deletions
|
@ -32,8 +32,7 @@ struct k_spinlock {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
||||||
static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
|
||||||
{
|
{
|
||||||
k_spinlock_key_t k;
|
k_spinlock_key_t k;
|
||||||
|
|
||||||
|
@ -59,7 +58,8 @@ static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
||||||
return k;
|
return k;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
|
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
||||||
|
k_spinlock_key_t key)
|
||||||
{
|
{
|
||||||
#ifdef SPIN_VALIDATE
|
#ifdef SPIN_VALIDATE
|
||||||
__ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),
|
__ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue