kernel: Simple spinlock API
Minimal spinlock API based on the existing atomic.h layer. Usage works just like irq_lock(), but takes an argument to a specific struct k_spinlock_t to un/lock. No attempt at implementing fairness or backoff semantics. No attempt made at architecture-specific assembly. When CONFIG_SMP is not enabled, this code falls back to a zero-size struct and becomes functionally identical to irq_lock/unlock(). Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
d3376f2781
commit
7a023cfb89
1 changed files with 69 additions and 0 deletions
69
include/spinlock.h
Normal file
69
include/spinlock.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright (c) 2018 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#ifndef _SPINLOCK_H
|
||||
#define _SPINLOCK_H
|
||||
|
||||
#include <atomic.h>
|
||||
|
||||
struct k_spinlock_key {
|
||||
int key;
|
||||
};
|
||||
|
||||
typedef struct k_spinlock_key k_spinlock_key_t;
|
||||
|
||||
struct k_spinlock {
|
||||
#ifdef CONFIG_SMP
|
||||
atomic_t locked;
|
||||
#ifdef CONFIG_DEBUG
|
||||
int saved_key;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
||||
{
|
||||
k_spinlock_key_t k;
|
||||
|
||||
/* Note that we need to use the underlying arch-specific lock
|
||||
* implementation. The "irq_lock()" API in SMP context is
|
||||
* actually a wrapper for a global spinlock!
|
||||
*/
|
||||
k.key = _arch_irq_lock();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# ifdef CONFIG_DEBUG
|
||||
l->saved_key = k.key;
|
||||
# endif
|
||||
while (!atomic_cas(&l->locked, 0, 1)) {
|
||||
}
|
||||
#endif
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
# ifdef CONFIG_DEBUG
|
||||
/* This doesn't attempt to catch all mismatches, just spots
|
||||
* where the arch state register shows a difference. Should
|
||||
* add a nesting count or something...
|
||||
*/
|
||||
__ASSERT(l->saved_key == key.key, "Mismatched spin lock/unlock");
|
||||
# endif
|
||||
/* Strictly we don't need atomic_clear() here (which is an
|
||||
* exchange operation that returns the old value). We are always
|
||||
* setting a zero and (because we hold the lock) know the existing
|
||||
* state won't change due to a race. But some architectures need
|
||||
* a memory barrier when used like this, and we don't have a
|
||||
* Zephyr framework for that.
|
||||
*/
|
||||
atomic_clear(&l->locked);
|
||||
#endif
|
||||
_arch_irq_unlock(key.key);
|
||||
}
|
||||
|
||||
#endif /* _SPINLOCK_H */
|
Loading…
Add table
Add a link
Reference in a new issue