diff --git a/include/spinlock.h b/include/spinlock.h index 8bc2bb5b663..64a91ba15e0 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -27,8 +27,56 @@ struct k_spinlock_key { int key; }; +/** + * @brief Kernel Spin Lock + * + * This struct defines a spin lock record on which CPUs can wait with + * k_spin_lock(). Any number of spinlocks may be defined in + * application code. + */ +struct k_spinlock; + +/** + * @brief Spinlock key type + * + * This type defines a "key" value used by a spinlock implementation + * to store the system interrupt state at the time of a call to + * k_spin_lock(). It is expected to be passed to a matching + * k_spin_unlock(). + * + * This type is opaque and should not be inspected by application + * code. + */ typedef struct k_spinlock_key k_spinlock_key_t; +/** + * @brief Lock a spinlock + * + * This routine locks the specified spinlock, returning a key handle + * representing interrupt state needed at unlock time. Upon + * returning, the calling thread is guaranteed not to be suspended or + * interrupted on its current CPU until it calls k_spin_unlock(). The + * implementation guarantees mutual exclusion: exactly one thread on + * one CPU will return from k_spin_lock() at a time. Other CPUs + * trying to acquire a lock already held by another CPU will enter an + * implementation-defined busy loop ("spinning") until the lock is + * released. + * + * Separate spin locks may be nested. It is legal to lock an + * (unlocked) spin lock while holding a different lock. Spin locks + * are not recursive, however: an attempt to acquire a spin lock that + * the CPU already holds will deadlock. + * + * In circumstances where only one CPU exists, the behavior of + * k_spin_lock() remains as specified above, though obviously no + * spinning will take place. Implementations may be free to optimize + * in uniprocessor contexts such that the locking reduces to an + * interrupt mask operation. + * + * @param l A pointer to the spinlock to lock + * @return A key value that must be passed to k_spin_unlock() when the + * lock is released. + */ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) { ARG_UNUSED(l); @@ -55,6 +103,27 @@ static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) return k; } +/** + * @brief Unlock a spin lock + * + * This releases a lock acquired by k_spin_lock(). After this + * function is called, any CPU will be able to acquire the lock. If + * other CPUs are currently spinning inside k_spin_lock() waiting for + * this lock, exactly one of them will return synchronously with the + * lock held. + * + * Spin locks must be properly nested. A call to k_spin_unlock() must + * be made on the lock object most recently locked using + * k_spin_lock(), using the key value that it returned. Attempts to + * unlock mis-nested locks, or to unlock locks that are not held, or + * to passing a key parameter other than the one returned from + * k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some + * of these errors can be detected by the framework. + * + * @param l A pointer to the spinlock to release + * @param key The value returned from k_spin_lock() when this lock was + * acquired + */ static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l, k_spinlock_key_t key) {