arch/xtensa: Inline atomics

The xtensa atomics layer was written with hand-coded assembly that had
to be called as functions.  That's needlessly slow, given that the low
level primitives are a two-instruction sequence.  Ideally the compiler
should see this as an inline to permit it to better optimize around
the needed barriers.

There was also a bug with the atomic_cas function, which had a loop
internally instead of returning the old value synchronously on a
failed swap.  That's benign right now because our existing spin lock
does nothing but retry it in a tight loop anyway, but it's incorrect
per spec and would have caused a contention hang with more elaborate
algorithms (for example a spinlock with backoff semantics).

Remove the old implementation and replace with a much smaller inline C
one based on just two assembly primitives.

This patch also contains a little bit of refactoring to address the
scheme has been split out into a separate header for each, and the
ATOMIC_OPERATIONS_CUSTOM kconfig has been renamed to
ATOMIC_OPERATIONS_ARCH to better capture what it means.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2021-02-14 11:51:46 -08:00 committed by Anas Nashif
commit 820c94e5dd
9 changed files with 551 additions and 830 deletions

View file

@ -91,6 +91,7 @@ config XTENSA
select HAS_DTS
select USE_SWITCH
select USE_SWITCH_SUPPORTED
select ATOMIC_OPERATIONS_ARCH
help
Xtensa architecture

View file

@ -13,7 +13,6 @@ zephyr_library_sources(
irq_manage.c
)
zephyr_library_sources_ifndef(CONFIG_ATOMIC_OPERATIONS_C atomic.S)
zephyr_library_sources_ifdef(CONFIG_XTENSA_USE_CORE_CRT1 crt1.S)
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c)
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)

View file

@ -1,456 +0,0 @@
/*
* Copyright (c) 2016 Cadence Design Systems, Inc.
* SPDX-License-Identifier: Apache-2.0
*/
/*
* MACROS TO HANDLE ABI SPECIFICS OF FUNCTION ENTRY AND RETURN
*
* Convenient where the frame size requirements are the same for both ABIs.
* ENTRY(sz), RET(sz) are for framed functions (have locals or make calls).
* ENTRY0, RET0 are for frameless functions (no locals, no calls).
*
* where size = size of stack frame in bytes (must be >0 and aligned to 16).
* For framed functions the frame is created and the return address saved at
* base of frame (Call0 ABI) or as determined by hardware (Windowed ABI). For
* frameless functions, there is no frame and return address remains in
* a0.
*
* Note: Because CPP macros expand to a single line, macros requiring
* multi-line expansions are implemented as assembler macros.
*/
#ifdef __XTENSA_CALL0_ABI__
/* Call0 */
#define ENTRY(sz) entry1 sz
.macro entry1 size=0x10
addi sp, sp, -\size
s32i a0, sp, 0
.endm
#define ENTRY0
#define RET(sz) ret1 sz
.macro ret1 size=0x10
l32i a0, sp, 0
addi sp, sp, \size
ret
.endm
#define RET0 ret
#else
/* Windowed */
#define ENTRY(sz) entry sp, sz
#define ENTRY0 entry sp, 0x10
#define RET(sz) retw
#define RET0 retw
#endif /* __XTENSA_CALL0_ABI__ */
/**
*
* @brief Atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
.global atomic_clear
.type atomic_clear,@function
.global atomic_ptr_clear
.type atomic_ptr_clear,@function
.align 4
atomic_clear:
atomic_ptr_clear:
ENTRY(48)
movi a4, 0
.L_LoopClear:
l32ai a3, a2, 0
wsr a3, scompare1
s32c1i a4, a2, 0
bne a3, a4, .L_LoopClear
mov a2, a3
RET(48)
/**
*
* @brief Atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
.global atomic_set
.type atomic_set,@function
.global atomic_ptr_set
.type atomic_ptr_set,@function
.align 4
atomic_set:
atomic_ptr_set:
ENTRY(48)
.L_LoopSet:
l32ai a4, a2, 0
wsr a4, scompare1
s32c1i a3, a2, 0
bne a3, a4, .L_LoopSet
mov a2, a3
RET(48)
/**
*
* @brief Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* @return value read from address target.
*
*/
.global atomic_get
.type atomic_get,@function
.global atomic_ptr_get
.type atomic_ptr_get,@function
.align 4
atomic_get:
atomic_ptr_get:
ENTRY(48)
l32ai a2, a2, 0
RET(48)
/**
*
* @brief Atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may
* impose restrictions with regards to the alignment and cache attributes of
* the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
.global atomic_inc
.type atomic_inc,@function
.align 4
atomic_inc:
ENTRY(48)
.L_LoopInc:
l32ai a3, a2, 0
wsr a3, scompare1
addi a4, a3, 1
s32c1i a4, a2, 0
bne a3, a4, .L_LoopInc
mov a2, a3
RET(48)
/**
*
* @brief Atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
.global atomic_add
.type atomic_add,@function
.align 4
atomic_add:
ENTRY(48)
.L_LoopAdd:
l32ai a4, a2, 0
wsr a4, scompare1
add a5, a3, a4
s32c1i a5, a2, 0
bne a5, a4, .L_LoopAdd
mov a2, a5
RET(48)
/**
*
* @brief Atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
.global atomic_dec
.type atomic_dec,@function
.align 4
atomic_dec:
ENTRY(48)
.L_LoopDec:
l32ai a3, a2, 0
wsr a3, scompare1
addi a4, a3, -1
s32c1i a4, a2, 0
bne a3, a4, .L_LoopDec
mov a2, a3
RET(48)
/**
*
* @brief Atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
.global atomic_sub
.type atomic_sub,@function
.align 4
atomic_sub:
ENTRY(48)
.L_LoopSub:
l32ai a4, a2, 0
wsr a4, scompare1
sub a5, a4, a3
s32c1i a5, a2, 0
bne a5, a4, .L_LoopSub
mov a2, a5
RET(48)
/**
*
* @brief Atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
.global atomic_nand
.type atomic_nand,@function
.align 4
atomic_nand:
ENTRY(48)
.L_LoopNand:
l32ai a4, a2, 0
wsr a4, scompare1
and a5, a3, a4
neg a5, a5
addi a5, a5, -1
s32c1i a5, a2, 0
bne a5, a4, .L_LoopNand
mov a2, a4
RET(48)
/**
*
* @brief Atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
.global atomic_and
.type atomic_and,@function
.align 4
atomic_and:
ENTRY(48)
.L_LoopAnd:
l32ai a4, a2, 0
wsr a4, scompare1
and a5, a3, a4
s32c1i a5, a2, 0
bne a5, a4, .L_LoopAnd
mov a2, a4
RET(48)
/**
*
* @brief Atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
.global atomic_or
.type atomic_or,@function
.align 4
atomic_or:
ENTRY(48)
.L_LoopOr:
l32ai a4, a2, 0
wsr a4, scompare1
or a5, a3, a4
s32c1i a5, a2, 0
bne a4, a5, .L_LoopOr
mov a2, a4
RET(48)
/**
*
* @brief Atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
.global atomic_xor
.type atomic_xor,@function
.align 4
atomic_xor:
ENTRY(48)
.L_LoopXor:
l32ai a4, a2, 0
wsr a4, scompare1
xor a5, a3, a4
s32c1i a5, a2, 0
bne a5, a4, .L_LoopXor
mov a2, a4
RET(48)
/**
*
* @brief Atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents
* of <target> contains <oldValue>, and if it does, setting the value of
* <target> to <newValue>. Various CPU architectures may impose restrictions
* with regards to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return 1 if the swap is actually executed, 0 otherwise.
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
.global atomic_cas
.type atomic_cas,@function
.global atomic_ptr_cas
.type atomic_ptr_cas,@function
.align 4
atomic_cas:
atomic_ptr_cas:
ENTRY(48)
l32ai a5, a2, 0
beq a5, a3, 2f
1:
movi a2, 0
j 3f
2:
wsr a5, scompare1
s32c1i a4, a2, 0
bne a4, a5, 1b
movi a2, 1
3:
RET(48)

View file

@ -107,7 +107,7 @@ Configuration Options
Related configuration options:
* :option:`CONFIG_ATOMIC_OPERATIONS_BUILTIN`
* :option:`CONFIG_ATOMIC_OPERATIONS_CUSTOM`
* :option:`CONFIG_ATOMIC_OPERATIONS_ARCH`
* :option:`CONFIG_ATOMIC_OPERATIONS_C`
API Reference

View file

@ -0,0 +1,143 @@
/**
* Copyright (c) 2021 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
#define ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_
/* Included from <sys/atomic.h> */
/* Recent GCC versions actually do have working atomics support on
* Xtensa (and so should work with CONFIG_ATOMIC_OPERATIONS_BUILTIN),
* but existing versions of Xtensa's XCC do not. So we define an
* inline implementation here that is more or less identical
*/
static ALWAYS_INLINE atomic_val_t atomic_get(const atomic_t *target)
{
atomic_val_t ret;
/* Actual Xtensa hardware seems to have only in-order
* pipelines, but the architecture does define a barrier load,
* so use it. There is a matching s32ri instruction, but
* nothing in the Zephyr API requires a barrier store (all the
* atomic write ops have exchange semantics.
*/
__asm__ volatile("l32ai %0, %1, 0"
: "=r"(ret) : "r"(target) : "memory");
return ret;
}
static ALWAYS_INLINE
atomic_val_t xtensa_cas(atomic_t *addr, atomic_val_t oldval,
atomic_val_t newval)
{
__asm__ volatile("wsr %1, SCOMPARE1; s32c1i %0, %2, 0"
: "+r"(newval), "+r"(oldval) : "r"(addr) : "memory");
return newval; /* got swapped with the old memory by s32c1i */
}
static ALWAYS_INLINE
bool atomic_cas(atomic_t *target, atomic_val_t oldval, atomic_val_t newval)
{
return oldval == xtensa_cas(target, oldval, newval);
}
static ALWAYS_INLINE
bool atomic_ptr_cas(atomic_ptr_t *target, void *oldval, void *newval)
{
return (atomic_val_t) oldval
== xtensa_cas((atomic_t *) target, (atomic_val_t) oldval,
(atomic_val_t) newval);
}
/* Generates an atomic exchange sequence that swaps the value at
* address "target", whose old value is read to be "cur", with the
* specified expression. Evaluates to the old value which was
* atomically replaced.
*/
#define Z__GEN_ATOMXCHG(expr) ({ \
atomic_val_t res, cur; \
do { \
cur = *target; \
res = xtensa_cas(target, cur, (expr)); \
} while (res != cur); \
res; })
static ALWAYS_INLINE
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(value);
}
static ALWAYS_INLINE
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur + value);
}
static ALWAYS_INLINE
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur - value);
}
static ALWAYS_INLINE
atomic_val_t atomic_inc(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur + 1);
}
static ALWAYS_INLINE
atomic_val_t atomic_dec(atomic_t *target)
{
return Z__GEN_ATOMXCHG(cur - 1);
}
static ALWAYS_INLINE atomic_val_t atomic_or(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur | value);
}
static ALWAYS_INLINE atomic_val_t atomic_xor(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur ^ value);
}
static ALWAYS_INLINE atomic_val_t atomic_and(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(cur & value);
}
static ALWAYS_INLINE atomic_val_t atomic_nand(atomic_t *target,
atomic_val_t value)
{
return Z__GEN_ATOMXCHG(~(cur & value));
}
static ALWAYS_INLINE void *atomic_ptr_get(const atomic_ptr_t *target)
{
return (void *) atomic_get((atomic_t *)target);
}
static ALWAYS_INLINE void *atomic_ptr_set(atomic_ptr_t *target, void *value)
{
return (void *) atomic_set((atomic_t *) target, (atomic_val_t) value);
}
static ALWAYS_INLINE atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
static ALWAYS_INLINE void *atomic_ptr_clear(atomic_ptr_t *target)
{
return (void *) atomic_set((atomic_t *) target, 0);
}
#endif /* ZEPHYR_INCLUDE_ATOMIC_XTENSA_H_ */

View file

@ -1,7 +1,6 @@
/* atomic operations */
/*
* Copyright (c) 1997-2015, Wind River Systems, Inc.
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -23,376 +22,30 @@ typedef int atomic_t;
typedef atomic_t atomic_val_t;
typedef void *atomic_ptr_t;
/* Low-level primitives come in several styles: */
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN)
/* Default. See this file for the Doxygen reference: */
#include <sys/atomic_builtin.h>
#elif defined(CONFIG_ATOMIC_OPERATIONS_ARCH)
/* Some architectures need their own implementation */
# ifdef CONFIG_XTENSA
/* Not all Xtensa toolchains support GCC-style atomic intrinsics */
# include <arch/xtensa/atomic_xtensa.h>
# endif
#else
/* Generic-but-slow implementation based on kernel locking and syscalls */
#include <sys/atomic_c.h>
#endif
/* Portable higher-level utilities: */
/**
* @defgroup atomic_apis Atomic Services APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Atomic compare-and-set.
*
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return true if @a new_value is written, false otherwise.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#else
extern bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#endif
/**
* @brief Atomic compare-and-set with pointer values
*
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return true if @a new_value is written, false otherwise.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value);
#else
extern bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value);
#endif
/**
*
* @brief Atomic addition.
*
* This routine performs an atomic addition on @a target.
*
* @param target Address of atomic variable.
* @param value Value to add.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic subtraction.
*
* This routine performs an atomic subtraction on @a target.
*
* @param target Address of atomic variable.
* @param value Value to subtract.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic increment.
*
* This routine performs an atomic increment by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
#else
extern atomic_val_t atomic_inc(atomic_t *target);
#endif
/**
*
* @brief Atomic decrement.
*
* This routine performs an atomic decrement by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
#else
extern atomic_val_t atomic_dec(atomic_t *target);
#endif
/**
*
* @brief Atomic get.
*
* This routine performs an atomic read on @a target.
*
* @param target Address of atomic variable.
*
* @return Value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_get(const atomic_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_get(const atomic_t *target);
#endif
/**
*
* @brief Atomic get a pointer value
*
* This routine performs an atomic read on @a target.
*
* @param target Address of pointer variable.
*
* @return Value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline void *atomic_ptr_get(const atomic_ptr_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
#else
extern void *atomic_ptr_get(const atomic_ptr_t *target);
#endif
/**
*
* @brief Atomic get-and-set.
*
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/* This builtin, as described by Intel, is not a traditional
* test-and-set operation, but rather an atomic exchange operation. It
* writes value into *ptr, and returns the previous contents of *ptr.
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic get-and-set for pointer values
*
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline void *atomic_ptr_set(atomic_ptr_t *target, void *value)
{
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall void *atomic_ptr_set(atomic_ptr_t *target, void *value);
#else
extern void *atomic_ptr_set(atomic_ptr_t *target, void *value);
#endif
/**
*
* @brief Atomic clear.
*
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
#else
extern atomic_val_t atomic_clear(atomic_t *target);
#endif
/**
*
* @brief Atomic clear of a pointer value
*
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || \
defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline void *atomic_ptr_clear(atomic_ptr_t *target)
{
return atomic_ptr_set(target, NULL);
}
#else
extern void *atomic_ptr_clear(atomic_ptr_t *target);
#endif
/**
*
* @brief Atomic bitwise inclusive OR.
*
* This routine atomically sets @a target to the bitwise inclusive OR of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to OR.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise exclusive OR (XOR).
*
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to XOR
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise AND.
*
* This routine atomically sets @a target to the bitwise AND of @a target
* and @a value.
*
* @param target Address of atomic variable.
* @param value Value to AND.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#endif
/**
*
* @brief Atomic bitwise NAND.
*
* This routine atomically sets @a target to the bitwise NAND of @a target
* and @a value. (This operation is equivalent to target = ~(target & value).)
*
* @param target Address of atomic variable.
* @param value Value to NAND.
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#endif
/**
* @brief Initialize an atomic variable.
*
@ -555,11 +208,7 @@ static inline void atomic_set_bit_to(atomic_t *target, int bit, bool val)
*/
#ifdef __cplusplus
}
#endif
#ifdef CONFIG_ATOMIC_OPERATIONS_C
#include <syscalls/atomic.h>
} /* extern "C" */
#endif
#endif /* ZEPHYR_INCLUDE_SYS_ATOMIC_H_ */

View file

@ -0,0 +1,307 @@
/* atomic operations */
/*
* Copyright (c) 1997-2015, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_SYS_ATOMIC_BUILTIN_H_
#define ZEPHYR_INCLUDE_SYS_ATOMIC_BUILTIN_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Included from <atomic.h> */
/**
* @addtogroup atomic_apis Atomic Services APIs
* @ingroup kernel_apis
* @{
*/
/**
* @brief Atomic compare-and-set.
*
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return true if @a new_value is written, false otherwise.
*/
static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
/**
* @brief Atomic compare-and-set with pointer values
*
* This routine performs an atomic compare-and-set on @a target. If the current
* value of @a target equals @a old_value, @a target is set to @a new_value.
* If the current value of @a target does not equal @a old_value, @a target
* is left unchanged.
*
* @param target Address of atomic variable.
* @param old_value Original value to compare against.
* @param new_value New value to store.
* @return true if @a new_value is written, false otherwise.
*/
static inline bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic addition.
*
* This routine performs an atomic addition on @a target.
*
* @param target Address of atomic variable.
* @param value Value to add.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic subtraction.
*
* This routine performs an atomic subtraction on @a target.
*
* @param target Address of atomic variable.
* @param value Value to subtract.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic increment.
*
* This routine performs an atomic increment by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
/**
*
* @brief Atomic decrement.
*
* This routine performs an atomic decrement by 1 on @a target.
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
/**
*
* @brief Atomic get.
*
* This routine performs an atomic read on @a target.
*
* @param target Address of atomic variable.
*
* @return Value of @a target.
*/
static inline atomic_val_t atomic_get(const atomic_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic get a pointer value
*
* This routine performs an atomic read on @a target.
*
* @param target Address of pointer variable.
*
* @return Value of @a target.
*/
static inline void *atomic_ptr_get(const atomic_ptr_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic get-and-set.
*
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/* This builtin, as described by Intel, is not a traditional
* test-and-set operation, but rather an atomic exchange operation. It
* writes value into *ptr, and returns the previous contents of *ptr.
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic get-and-set for pointer values
*
* This routine atomically sets @a target to @a value and returns
* the previous value of @a target.
*
* @param target Address of atomic variable.
* @param value Value to write to @a target.
*
* @return Previous value of @a target.
*/
static inline void *atomic_ptr_set(atomic_ptr_t *target, void *value)
{
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic clear.
*
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
/**
*
* @brief Atomic clear of a pointer value
*
* This routine atomically sets @a target to zero and returns its previous
* value. (Hence, it is equivalent to atomic_set(target, 0).)
*
* @param target Address of atomic variable.
*
* @return Previous value of @a target.
*/
static inline void *atomic_ptr_clear(atomic_ptr_t *target)
{
return atomic_ptr_set(target, NULL);
}
/**
*
* @brief Atomic bitwise inclusive OR.
*
* This routine atomically sets @a target to the bitwise inclusive OR of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to OR.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR).
*
* This routine atomically sets @a target to the bitwise exclusive OR (XOR) of
* @a target and @a value.
*
* @param target Address of atomic variable.
* @param value Value to XOR
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise AND.
*
* This routine atomically sets @a target to the bitwise AND of @a target
* and @a value.
*
* @param target Address of atomic variable.
* @param value Value to AND.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise NAND.
*
* This routine atomically sets @a target to the bitwise NAND of @a target
* and @a value. (This operation is equivalent to target = ~(target & value).)
*
* @param target Address of atomic variable.
* @param value Value to NAND.
*
* @return Previous value of @a target.
*/
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
/** @} */
#ifdef __cplusplus
}
#endif
#ifdef CONFIG_ATOMIC_OPERATIONS_C
#include <syscalls/atomic.h>
#endif
#endif /* ZEPHYR_INCLUDE_SYS_ATOMIC_BUILTIN_H_ */

78
include/sys/atomic_c.h Normal file
View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 1997-2015, Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_
#define ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_
/* Included from <atomic.h> */
#ifdef __cplusplus
extern "C" {
#endif
/* Simple and correct (but very slow) implementation of atomic
* primitives that require nothing more than kernel interrupt locking.
*/
__syscall bool atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
__syscall bool atomic_ptr_cas(atomic_ptr_t *target, void *old_value,
void *new_value);
__syscall atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
extern atomic_val_t atomic_get(const atomic_t *target);
extern void *atomic_ptr_get(const atomic_ptr_t *target);
__syscall atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
__syscall void *atomic_ptr_set(atomic_ptr_t *target, void *value);
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
static inline void *atomic_ptr_clear(atomic_ptr_t *target)
{
return atomic_ptr_set(target, NULL);
}
__syscall atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
__syscall atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#ifdef __cplusplus
}
#endif
#ifdef CONFIG_ATOMIC_OPERATIONS_C
#include <syscalls/atomic_c.h>
#endif
#endif /* ZEPHYR_INCLUDE_SYS_ATOMIC_C_H_ */

View file

@ -415,7 +415,7 @@ config ATOMIC_OPERATIONS_BUILTIN
the preferred method. However, support for all arches in GCC is
incomplete.
config ATOMIC_OPERATIONS_CUSTOM
config ATOMIC_OPERATIONS_ARCH
bool
help
Use when there isn't support for compiler built-ins, but you have