kernel: show what spinlock was used incorrectly

Also helps identify corruption cases where the spinlock pointer
used wasn't actually a spinlock.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-01-10 17:17:05 -08:00 committed by Anas Nashif
commit c1fdf98ba5

View file

@ -71,7 +71,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
k.key = arch_irq_lock(); k.key = arch_irq_lock();
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock"); __ASSERT(z_spin_lock_valid(l), "Recursive spinlock %p", l);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -90,7 +90,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
{ {
ARG_UNUSED(l); ARG_UNUSED(l);
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -113,7 +113,7 @@ static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
{ {
ARG_UNUSED(l); ARG_UNUSED(l);
#ifdef CONFIG_SPIN_VALIDATE #ifdef CONFIG_SPIN_VALIDATE
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!"); __ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
atomic_clear(&l->locked); atomic_clear(&l->locked);