2018-01-25 23:04:32 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Intel Corporation.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2022-02-01 21:30:19 +01:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @file
|
|
|
|
* @brief Public interface for spinlocks
|
|
|
|
*/
|
|
|
|
|
2018-09-14 19:43:44 +02:00
|
|
|
#ifndef ZEPHYR_INCLUDE_SPINLOCK_H_
|
|
|
|
#define ZEPHYR_INCLUDE_SPINLOCK_H_
|
2018-01-25 23:04:32 +01:00
|
|
|
|
2023-07-01 23:06:42 +02:00
|
|
|
#include <errno.h>
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
|
|
|
#include <zephyr/arch/cpu.h>
|
2022-05-06 10:30:42 +02:00
|
|
|
#include <zephyr/sys/atomic.h>
|
|
|
|
#include <zephyr/sys/__assert.h>
|
2022-09-30 20:54:48 +02:00
|
|
|
#include <zephyr/sys/time_units.h>
|
2019-01-30 21:27:43 +01:00
|
|
|
|
2021-01-28 08:45:50 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2022-02-01 21:30:19 +01:00
|
|
|
/**
|
|
|
|
* @brief Spinlock APIs
|
|
|
|
* @defgroup spinlock_apis Spinlock APIs
|
|
|
|
* @ingroup kernel_apis
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
|
2020-09-28 22:25:35 +02:00
|
|
|
struct z_spinlock_key {
|
2018-01-25 23:04:32 +01:00
|
|
|
int key;
|
|
|
|
};
|
|
|
|
|
2020-05-07 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* @brief Kernel Spin Lock
|
|
|
|
*
|
|
|
|
* This struct defines a spin lock record on which CPUs can wait with
|
|
|
|
* k_spin_lock(). Any number of spinlocks may be defined in
|
|
|
|
* application code.
|
|
|
|
*/
|
2020-09-28 22:24:43 +02:00
|
|
|
struct k_spinlock {
|
2023-08-08 12:31:18 +02:00
|
|
|
/**
|
|
|
|
* @cond INTERNAL_HIDDEN
|
|
|
|
*/
|
2020-09-28 22:24:43 +02:00
|
|
|
#ifdef CONFIG_SMP
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
|
|
|
/*
|
|
|
|
* Ticket spinlocks are conceptually two atomic variables,
|
|
|
|
* one indicating the current FIFO head (spinlock owner),
|
|
|
|
* and the other indicating the current FIFO tail.
|
|
|
|
* Spinlock is acquired in the following manner:
|
|
|
|
* - current FIFO tail value is atomically incremented while it's
|
|
|
|
* original value is saved as a "ticket"
|
|
|
|
* - we spin until the FIFO head becomes equal to the ticket value
|
|
|
|
*
|
|
|
|
* Spinlock is released by atomic increment of the FIFO head
|
|
|
|
*/
|
|
|
|
atomic_t owner;
|
|
|
|
atomic_t tail;
|
|
|
|
#else
|
2020-09-28 22:24:43 +02:00
|
|
|
atomic_t locked;
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
|
|
|
#endif /* CONFIG_SMP */
|
2020-09-28 22:24:43 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
|
|
|
/* Stores the thread that holds the lock with the locking CPU
|
|
|
|
* ID in the bottom two bits.
|
|
|
|
*/
|
|
|
|
uintptr_t thread_cpu;
|
2022-09-30 20:54:48 +02:00
|
|
|
#ifdef CONFIG_SPIN_LOCK_TIME_LIMIT
|
|
|
|
/* Stores the time (in cycles) when a lock was taken
|
|
|
|
*/
|
|
|
|
uint32_t lock_time;
|
|
|
|
#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
|
|
|
|
#endif /* CONFIG_SPIN_VALIDATE */
|
2020-09-28 22:24:43 +02:00
|
|
|
|
2022-12-08 22:16:44 +01:00
|
|
|
#if defined(CONFIG_CPP) && !defined(CONFIG_SMP) && \
|
2020-09-28 22:24:43 +02:00
|
|
|
!defined(CONFIG_SPIN_VALIDATE)
|
|
|
|
/* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined
|
|
|
|
* the k_spinlock struct will have no members. The result
|
|
|
|
* is that in C sizeof(k_spinlock) is 0 and in C++ it is 1.
|
|
|
|
*
|
|
|
|
* This size difference causes problems when the k_spinlock
|
|
|
|
* is embedded into another struct like k_msgq, because C and
|
|
|
|
* C++ will have different ideas on the offsets of the members
|
|
|
|
* that come after the k_spinlock member.
|
|
|
|
*
|
|
|
|
* To prevent this we add a 1 byte dummy member to k_spinlock
|
|
|
|
* when the user selects C++ support and k_spinlock would
|
|
|
|
* otherwise be empty.
|
|
|
|
*/
|
|
|
|
char dummy;
|
|
|
|
#endif
|
2023-08-08 12:31:18 +02:00
|
|
|
/**
|
|
|
|
* INTERNAL_HIDDEN @endcond
|
|
|
|
*/
|
2020-09-28 22:24:43 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/* There's a spinlock validation framework available when asserts are
|
|
|
|
* enabled. It adds a relatively hefty overhead (about 3k or so) to
|
|
|
|
* kernel code size, don't use on platforms known to be small.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
|
|
|
bool z_spin_lock_valid(struct k_spinlock *l);
|
|
|
|
bool z_spin_unlock_valid(struct k_spinlock *l);
|
|
|
|
void z_spin_lock_set_owner(struct k_spinlock *l);
|
2022-10-18 18:11:46 +02:00
|
|
|
BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS <= 4, "Too many CPUs for mask");
|
2021-02-03 01:35:15 +01:00
|
|
|
|
|
|
|
# ifdef CONFIG_KERNEL_COHERENCE
|
|
|
|
bool z_spin_lock_mem_coherent(struct k_spinlock *l);
|
|
|
|
# endif /* CONFIG_KERNEL_COHERENCE */
|
|
|
|
|
2020-09-28 22:24:43 +02:00
|
|
|
#endif /* CONFIG_SPIN_VALIDATE */
|
2020-05-07 18:47:25 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Spinlock key type
|
|
|
|
*
|
|
|
|
* This type defines a "key" value used by a spinlock implementation
|
|
|
|
* to store the system interrupt state at the time of a call to
|
|
|
|
* k_spin_lock(). It is expected to be passed to a matching
|
|
|
|
* k_spin_unlock().
|
|
|
|
*
|
|
|
|
* This type is opaque and should not be inspected by application
|
|
|
|
* code.
|
|
|
|
*/
|
2020-09-28 22:25:35 +02:00
|
|
|
typedef struct z_spinlock_key k_spinlock_key_t;
|
2018-01-25 23:04:32 +01:00
|
|
|
|
2023-07-01 23:06:42 +02:00
|
|
|
static ALWAYS_INLINE void z_spinlock_validate_pre(struct k_spinlock *l)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(l);
|
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
|
|
|
__ASSERT(z_spin_lock_valid(l), "Invalid spinlock %p", l);
|
|
|
|
#ifdef CONFIG_KERNEL_COHERENCE
|
|
|
|
__ASSERT_NO_MSG(z_spin_lock_mem_coherent(l));
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static ALWAYS_INLINE void z_spinlock_validate_post(struct k_spinlock *l)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(l);
|
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
|
|
|
z_spin_lock_set_owner(l);
|
|
|
|
#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
|
|
|
|
l->lock_time = sys_clock_cycle_get_32();
|
|
|
|
#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
|
|
|
|
#endif /* CONFIG_SPIN_VALIDATE */
|
|
|
|
}
|
|
|
|
|
2020-05-07 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* @brief Lock a spinlock
|
|
|
|
*
|
|
|
|
* This routine locks the specified spinlock, returning a key handle
|
|
|
|
* representing interrupt state needed at unlock time. Upon
|
|
|
|
* returning, the calling thread is guaranteed not to be suspended or
|
|
|
|
* interrupted on its current CPU until it calls k_spin_unlock(). The
|
|
|
|
* implementation guarantees mutual exclusion: exactly one thread on
|
|
|
|
* one CPU will return from k_spin_lock() at a time. Other CPUs
|
|
|
|
* trying to acquire a lock already held by another CPU will enter an
|
|
|
|
* implementation-defined busy loop ("spinning") until the lock is
|
|
|
|
* released.
|
|
|
|
*
|
|
|
|
* Separate spin locks may be nested. It is legal to lock an
|
|
|
|
* (unlocked) spin lock while holding a different lock. Spin locks
|
|
|
|
* are not recursive, however: an attempt to acquire a spin lock that
|
|
|
|
* the CPU already holds will deadlock.
|
|
|
|
*
|
|
|
|
* In circumstances where only one CPU exists, the behavior of
|
|
|
|
* k_spin_lock() remains as specified above, though obviously no
|
|
|
|
* spinning will take place. Implementations may be free to optimize
|
|
|
|
* in uniprocessor contexts such that the locking reduces to an
|
|
|
|
* interrupt mask operation.
|
|
|
|
*
|
|
|
|
* @param l A pointer to the spinlock to lock
|
|
|
|
* @return A key value that must be passed to k_spin_unlock() when the
|
|
|
|
* lock is released.
|
|
|
|
*/
|
2019-01-28 18:35:37 +01:00
|
|
|
static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l)
|
2018-01-25 23:04:32 +01:00
|
|
|
{
|
2018-07-24 21:19:16 +02:00
|
|
|
ARG_UNUSED(l);
|
2018-01-25 23:04:32 +01:00
|
|
|
k_spinlock_key_t k;
|
|
|
|
|
|
|
|
/* Note that we need to use the underlying arch-specific lock
|
|
|
|
* implementation. The "irq_lock()" API in SMP context is
|
|
|
|
* actually a wrapper for a global spinlock!
|
|
|
|
*/
|
2019-11-07 21:43:29 +01:00
|
|
|
k.key = arch_irq_lock();
|
2018-01-25 23:04:32 +01:00
|
|
|
|
2023-07-01 23:06:42 +02:00
|
|
|
z_spinlock_validate_pre(l);
|
2018-01-25 23:04:32 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
|
|
|
/*
|
|
|
|
* Enqueue ourselves to the end of a spinlock waiters queue
|
|
|
|
* receiving a ticket
|
|
|
|
*/
|
|
|
|
atomic_val_t ticket = atomic_inc(&l->tail);
|
|
|
|
/* Spin until our ticket is served */
|
|
|
|
while (atomic_get(&l->owner) != ticket) {
|
|
|
|
arch_spin_relax();
|
|
|
|
}
|
|
|
|
#else
|
2018-01-25 23:04:32 +01:00
|
|
|
while (!atomic_cas(&l->locked, 0, 1)) {
|
2023-05-19 18:00:57 +02:00
|
|
|
arch_spin_relax();
|
2018-01-25 23:04:32 +01:00
|
|
|
}
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
|
|
|
#endif /* CONFIG_SMP */
|
2023-07-01 23:06:42 +02:00
|
|
|
z_spinlock_validate_post(l);
|
2018-01-25 23:04:32 +01:00
|
|
|
|
|
|
|
return k;
|
|
|
|
}
|
|
|
|
|
2023-07-01 23:06:42 +02:00
|
|
|
/**
|
|
|
|
* @brief Attempt to lock a spinlock
|
|
|
|
*
|
|
|
|
* This routine makes one attempt to lock @p l. If it is successful, then
|
|
|
|
* it will store the key into @p k.
|
|
|
|
*
|
|
|
|
* @param[in] l A pointer to the spinlock to lock
|
|
|
|
* @param[out] k A pointer to the spinlock key
|
|
|
|
* @retval 0 on success
|
|
|
|
* @retval -EBUSY if another thread holds the lock
|
|
|
|
*
|
|
|
|
* @see k_spin_lock
|
|
|
|
* @see k_spin_unlock
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE int k_spin_trylock(struct k_spinlock *l, k_spinlock_key_t *k)
|
|
|
|
{
|
|
|
|
int key = arch_irq_lock();
|
|
|
|
|
|
|
|
z_spinlock_validate_pre(l);
|
|
|
|
#ifdef CONFIG_SMP
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
|
|
|
/*
|
|
|
|
* atomic_get and atomic_cas operations below are not executed
|
|
|
|
* simultaneously.
|
|
|
|
* So in theory k_spin_trylock can lock an already locked spinlock.
|
|
|
|
* To reproduce this the following conditions should be met after we
|
|
|
|
* executed atomic_get and before we executed atomic_cas:
|
|
|
|
*
|
|
|
|
* - spinlock needs to be taken 0xffff_..._ffff + 1 times
|
|
|
|
* (which requires 0xffff_..._ffff number of CPUs, as k_spin_lock call
|
|
|
|
* is blocking) or
|
|
|
|
* - spinlock needs to be taken and released 0xffff_..._ffff times and
|
|
|
|
* then taken again
|
|
|
|
*
|
|
|
|
* In real-life systems this is considered non-reproducible given that
|
|
|
|
* required actions need to be done during this tiny window of several
|
|
|
|
* CPU instructions (which execute with interrupt locked,
|
|
|
|
* so no preemption can happen here)
|
|
|
|
*/
|
|
|
|
atomic_val_t ticket_val = atomic_get(&l->owner);
|
|
|
|
|
|
|
|
if (!atomic_cas(&l->tail, ticket_val, ticket_val + 1)) {
|
|
|
|
goto busy;
|
|
|
|
}
|
|
|
|
#else
|
2023-07-01 23:06:42 +02:00
|
|
|
if (!atomic_cas(&l->locked, 0, 1)) {
|
2023-08-08 12:31:18 +02:00
|
|
|
goto busy;
|
2023-07-01 23:06:42 +02:00
|
|
|
}
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
|
|
|
#endif /* CONFIG_SMP */
|
2023-07-01 23:06:42 +02:00
|
|
|
z_spinlock_validate_post(l);
|
|
|
|
|
|
|
|
k->key = key;
|
|
|
|
|
|
|
|
return 0;
|
2023-08-08 12:31:18 +02:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
busy:
|
|
|
|
arch_irq_unlock(key);
|
|
|
|
return -EBUSY;
|
|
|
|
#endif /* CONFIG_SMP */
|
2023-07-01 23:06:42 +02:00
|
|
|
}
|
|
|
|
|
2020-05-07 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* @brief Unlock a spin lock
|
|
|
|
*
|
|
|
|
* This releases a lock acquired by k_spin_lock(). After this
|
|
|
|
* function is called, any CPU will be able to acquire the lock. If
|
|
|
|
* other CPUs are currently spinning inside k_spin_lock() waiting for
|
|
|
|
* this lock, exactly one of them will return synchronously with the
|
|
|
|
* lock held.
|
|
|
|
*
|
|
|
|
* Spin locks must be properly nested. A call to k_spin_unlock() must
|
|
|
|
* be made on the lock object most recently locked using
|
|
|
|
* k_spin_lock(), using the key value that it returned. Attempts to
|
|
|
|
* unlock mis-nested locks, or to unlock locks that are not held, or
|
|
|
|
* to passing a key parameter other than the one returned from
|
|
|
|
* k_spin_lock(), are illegal. When CONFIG_SPIN_VALIDATE is set, some
|
|
|
|
* of these errors can be detected by the framework.
|
|
|
|
*
|
|
|
|
* @param l A pointer to the spinlock to release
|
|
|
|
* @param key The value returned from k_spin_lock() when this lock was
|
|
|
|
* acquired
|
|
|
|
*/
|
2019-01-28 18:35:37 +01:00
|
|
|
static ALWAYS_INLINE void k_spin_unlock(struct k_spinlock *l,
|
|
|
|
k_spinlock_key_t key)
|
2018-01-25 23:04:32 +01:00
|
|
|
{
|
2019-02-05 18:35:57 +01:00
|
|
|
ARG_UNUSED(l);
|
2019-12-13 11:24:56 +01:00
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
2020-01-11 02:17:05 +01:00
|
|
|
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
|
2022-09-30 20:54:48 +02:00
|
|
|
|
|
|
|
#if defined(CONFIG_SPIN_LOCK_TIME_LIMIT) && (CONFIG_SPIN_LOCK_TIME_LIMIT != 0)
|
|
|
|
uint32_t delta = sys_clock_cycle_get_32() - l->lock_time;
|
|
|
|
|
|
|
|
__ASSERT(delta < CONFIG_SPIN_LOCK_TIME_LIMIT,
|
|
|
|
"Spin lock %p held %u cycles, longer than limit of %u cycles",
|
|
|
|
l, delta, CONFIG_SPIN_LOCK_TIME_LIMIT);
|
|
|
|
#endif /* CONFIG_SPIN_LOCK_TIME_LIMIT */
|
|
|
|
#endif /* CONFIG_SPIN_VALIDATE */
|
2019-01-30 21:27:43 +01:00
|
|
|
|
2018-01-25 23:04:32 +01:00
|
|
|
#ifdef CONFIG_SMP
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
|
|
|
/* Give the spinlock to the next CPU in a FIFO */
|
2024-04-30 13:54:05 +02:00
|
|
|
(void)atomic_inc(&l->owner);
|
2023-08-08 12:31:18 +02:00
|
|
|
#else
|
2018-01-25 23:04:32 +01:00
|
|
|
/* Strictly we don't need atomic_clear() here (which is an
|
|
|
|
* exchange operation that returns the old value). We are always
|
|
|
|
* setting a zero and (because we hold the lock) know the existing
|
|
|
|
* state won't change due to a race. But some architectures need
|
|
|
|
* a memory barrier when used like this, and we don't have a
|
|
|
|
* Zephyr framework for that.
|
|
|
|
*/
|
2024-04-30 13:54:05 +02:00
|
|
|
(void)atomic_clear(&l->locked);
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
|
|
|
#endif /* CONFIG_SMP */
|
2019-11-07 21:43:29 +01:00
|
|
|
arch_irq_unlock(key.key);
|
2018-01-25 23:04:32 +01:00
|
|
|
}
|
|
|
|
|
2023-07-06 13:19:21 +02:00
|
|
|
/**
|
|
|
|
* @cond INTERNAL_HIDDEN
|
2018-07-24 19:42:12 +02:00
|
|
|
*/
|
2023-07-06 13:19:21 +02:00
|
|
|
|
2023-08-30 17:05:55 +02:00
|
|
|
#if defined(CONFIG_SMP) && defined(CONFIG_TEST)
|
|
|
|
/*
|
|
|
|
* @brief Checks if spinlock is held by some CPU, including the local CPU.
|
|
|
|
* This API shouldn't be used outside the tests for spinlock
|
|
|
|
*
|
|
|
|
* @param l A pointer to the spinlock
|
|
|
|
* @retval true - if spinlock is held by some CPU; false - otherwise
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE bool z_spin_is_locked(struct k_spinlock *l)
|
|
|
|
{
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
|
|
|
atomic_val_t ticket_val = atomic_get(&l->owner);
|
|
|
|
|
|
|
|
return !atomic_cas(&l->tail, ticket_val, ticket_val);
|
|
|
|
#else
|
2023-08-30 17:05:55 +02:00
|
|
|
return l->locked;
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
2023-08-30 17:05:55 +02:00
|
|
|
}
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* defined(CONFIG_SMP) && defined(CONFIG_TEST) */
|
2023-08-30 17:05:55 +02:00
|
|
|
|
2023-07-06 13:19:21 +02:00
|
|
|
/* Internal function: releases the lock, but leaves local interrupts disabled */
|
2018-07-24 19:42:12 +02:00
|
|
|
static ALWAYS_INLINE void k_spin_release(struct k_spinlock *l)
|
|
|
|
{
|
2019-02-05 18:35:57 +01:00
|
|
|
ARG_UNUSED(l);
|
2019-12-13 11:24:56 +01:00
|
|
|
#ifdef CONFIG_SPIN_VALIDATE
|
2020-01-11 02:17:05 +01:00
|
|
|
__ASSERT(z_spin_unlock_valid(l), "Not my spinlock %p", l);
|
2018-07-24 19:42:12 +02:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SMP
|
2023-08-08 12:31:18 +02:00
|
|
|
#ifdef CONFIG_TICKET_SPINLOCKS
|
2024-04-30 13:54:05 +02:00
|
|
|
(void)atomic_inc(&l->owner);
|
2023-08-08 12:31:18 +02:00
|
|
|
#else
|
2024-04-30 13:54:05 +02:00
|
|
|
(void)atomic_clear(&l->locked);
|
2023-08-08 12:31:18 +02:00
|
|
|
#endif /* CONFIG_TICKET_SPINLOCKS */
|
|
|
|
#endif /* CONFIG_SMP */
|
2018-07-24 19:42:12 +02:00
|
|
|
}
|
|
|
|
|
2023-07-06 14:27:08 +02:00
|
|
|
#if defined(CONFIG_SPIN_VALIDATE) && defined(__GNUC__)
|
2023-07-14 05:22:28 +02:00
|
|
|
static ALWAYS_INLINE void z_spin_onexit(__maybe_unused k_spinlock_key_t *k)
|
2023-07-06 14:27:08 +02:00
|
|
|
{
|
|
|
|
__ASSERT(k->key, "K_SPINLOCK exited with goto, break or return, "
|
|
|
|
"use K_SPINLOCK_BREAK instead.");
|
|
|
|
}
|
|
|
|
#define K_SPINLOCK_ONEXIT __attribute__((__cleanup__(z_spin_onexit)))
|
|
|
|
#else
|
|
|
|
#define K_SPINLOCK_ONEXIT
|
|
|
|
#endif
|
|
|
|
|
2023-07-06 13:19:21 +02:00
|
|
|
/**
|
|
|
|
* INTERNAL_HIDDEN @endcond
|
|
|
|
*/
|
|
|
|
|
2023-06-30 16:55:59 +02:00
|
|
|
/**
|
|
|
|
* @brief Leaves a code block guarded with @ref K_SPINLOCK after releasing the
|
|
|
|
* lock.
|
|
|
|
*
|
|
|
|
* See @ref K_SPINLOCK for details.
|
|
|
|
*/
|
|
|
|
#define K_SPINLOCK_BREAK continue
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Guards a code block with the given spinlock, automatically acquiring
|
|
|
|
* the lock before executing the code block. The lock will be released either
|
|
|
|
* when reaching the end of the code block or when leaving the block with
|
|
|
|
* @ref K_SPINLOCK_BREAK.
|
|
|
|
*
|
|
|
|
* @details Example usage:
|
|
|
|
*
|
|
|
|
* @code{.c}
|
|
|
|
* K_SPINLOCK(&mylock) {
|
|
|
|
*
|
|
|
|
* ...execute statements with the lock held...
|
|
|
|
*
|
|
|
|
* if (some_condition) {
|
|
|
|
* ...release the lock and leave the guarded section prematurely:
|
|
|
|
* K_SPINLOCK_BREAK;
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* ...execute statements with the lock held...
|
|
|
|
*
|
|
|
|
* }
|
|
|
|
* @endcode
|
|
|
|
*
|
|
|
|
* Behind the scenes this pattern expands to a for-loop whose body is executed
|
|
|
|
* exactly once:
|
|
|
|
*
|
|
|
|
* @code{.c}
|
|
|
|
* for (k_spinlock_key_t key = k_spin_lock(&mylock); ...; k_spin_unlock(&mylock, key)) {
|
|
|
|
* ...
|
|
|
|
* }
|
|
|
|
* @endcode
|
|
|
|
*
|
|
|
|
* @warning The code block must execute to its end or be left by calling
|
|
|
|
* @ref K_SPINLOCK_BREAK. Otherwise, e.g. if exiting the block with a break,
|
|
|
|
* goto or return statement, the spinlock will not be released on exit.
|
|
|
|
*
|
|
|
|
* @note In user mode the spinlock must be placed in memory accessible to the
|
|
|
|
* application, see @ref K_APP_DMEM and @ref K_APP_BMEM macros for details.
|
|
|
|
*
|
|
|
|
* @param lck Spinlock used to guard the enclosed code block.
|
|
|
|
*/
|
|
|
|
#define K_SPINLOCK(lck) \
|
2023-07-06 14:27:08 +02:00
|
|
|
for (k_spinlock_key_t __i K_SPINLOCK_ONEXIT = {}, __key = k_spin_lock(lck); !__i.key; \
|
2024-06-05 11:44:36 +02:00
|
|
|
k_spin_unlock((lck), __key), __i.key = 1)
|
2023-06-30 16:55:59 +02:00
|
|
|
|
2022-02-01 21:30:19 +01:00
|
|
|
/** @} */
|
|
|
|
|
2021-01-28 08:45:50 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
2018-07-24 19:42:12 +02:00
|
|
|
|
2018-09-14 19:43:44 +02:00
|
|
|
#endif /* ZEPHYR_INCLUDE_SPINLOCK_H_ */
|