kernel/spinlock: Move validation out of header inlines
The validation checking recently added to spinlocks is useful, but requires kernel-internals like _current and _current_cpu in a header context that tends to be needed before those are declared (or where we don't want them declared), and is causing big header dependency headaches. Move it to C code, it's just a validation tool, not a performance thing. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
aa6e21c24c
commit
5aa7460e5c
2 changed files with 34 additions and 10 deletions
|
@ -9,7 +9,10 @@
|
|||
#include <atomic.h>
|
||||
|
||||
#if defined(CONFIG_ASSERT) && (CONFIG_MP_NUM_CPUS < 4)
|
||||
#include <kernel_structs.h>
|
||||
#include <misc/__assert.h>
|
||||
struct k_spinlock;
|
||||
int z_spin_lock_valid(struct k_spinlock *l);
|
||||
int z_spin_unlock_valid(struct k_spinlock *l);
|
||||
#define SPIN_VALIDATE
|
||||
#endif
|
||||
|
||||
|
@ -43,11 +46,7 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
|||
k.key = _arch_irq_lock();
|
||||
|
||||
#ifdef SPIN_VALIDATE
|
||||
if (l->thread_cpu) {
|
||||
__ASSERT((l->thread_cpu & 3) != _current_cpu->id,
|
||||
"Recursive spinlock");
|
||||
}
|
||||
l->thread_cpu = _current_cpu->id | (u32_t)_current;
|
||||
__ASSERT(z_spin_lock_valid(l), "Recursive spinlock");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -61,13 +60,11 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
|||
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
||||
k_spinlock_key_t key)
|
||||
{
|
||||
ARG_UNUSED(l);
|
||||
#ifdef SPIN_VALIDATE
|
||||
__ASSERT(l->thread_cpu == (_current_cpu->id | (u32_t)_current),
|
||||
"Not my spinlock!");
|
||||
l->thread_cpu = 0;
|
||||
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Strictly we don't need atomic_clear() here (which is an
|
||||
* exchange operation that returns the old value). We are always
|
||||
|
@ -86,6 +83,7 @@ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
|||
*/
|
||||
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
|
||||
{
|
||||
ARG_UNUSED(l);
|
||||
#ifdef SPIN_VALIDATE
|
||||
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock!");
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue