nanokernel: support GCC compiler atomic builtins

Arches now select whether they want to use the GCC built-ins,
their own assembly implementation, or the generic C code.

At the moment, the SDK compilers only support builtins for ARM
and X86. ZEP-557 opened to investigate further.

Change-Id: I53e411b4967d87f737338379bd482bd653f19422
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2016-07-15 13:15:00 -07:00
commit 6a1474e75b
9 changed files with 259 additions and 813 deletions

View file

@ -27,13 +27,16 @@ config ARC
config ARM
bool "ARM architecture"
select ATOMIC_OPERATIONS_BUILTIN
config X86
bool "x86 architecture"
select NANOKERNEL_TICKLESS_IDLE_SUPPORTED
select ATOMIC_OPERATIONS_BUILTIN
config NIOS2
bool "Nios II Gen 2 architecture"
select ATOMIC_OPERATIONS_C
endchoice

View file

@ -14,9 +14,8 @@ obj-y += prep_c.o \
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
ifneq ($(CONFIG_ATOMIC_OPERATIONS_C),y)
obj-y += atomic.o
endif
# Some ARC cores like the EM4 lack the atomic LLOCK/SCOND and
# can't use these.
obj-$(CONFIG_ATOMIC_OPERATIONS_CUSTOM) += atomic.o
obj-$(CONFIG_IRQ_VECTOR_TABLE_BSP) += irq_vector_table.o
obj-$(CONFIG_SW_ISR_TABLE) += sw_isr_table.o

View file

@ -3,7 +3,7 @@ ccflags-y += -I$(srctree)/kernel/microkernel/include
asflags-y := ${ccflags-y}
obj-y = atomic.o exc_exit.o irq_init.o \
obj-y = exc_exit.o irq_init.o \
fiber_abort.o swap.o \
fault.o gdb_stub_irq_vector_table.o \
irq_manage.o thread.o cpu_idle.o \

View file

@ -1,415 +0,0 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief ARM atomic operations library
*
* This library provides routines to perform a number of atomic operations
* on a memory location: add, subtract, increment, decrement, bitwise OR,
* bitwise NOR, bitwise AND, bitwise NAND, set, clear and compare-and-swap.
*/
#define _ASMLANGUAGE
#include <toolchain.h>
#include <sections.h>
_ASM_FILE_PROLOGUE
/* exports */
GTEXT(atomic_set)
GTEXT(atomic_get)
GTEXT(atomic_add)
GTEXT(atomic_nand)
GTEXT(atomic_and)
GTEXT(atomic_or)
GTEXT(atomic_xor)
GTEXT(atomic_clear)
GTEXT(atomic_dec)
GTEXT(atomic_inc)
GTEXT(atomic_sub)
GTEXT(atomic_cas)
/**
*
* @brief Atomically clear a memory location
*
* This routine atomically clears the contents of <target> and returns the old
* value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_clear
* (
* atomic_t *target /@ memory location to clear @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_clear)
MOV r1, #0
/* fall through into atomic_set */
/**
*
* @brief Atomically set a memory location
*
* This routine atomically sets the contents of <target> to <value> and returns
* the old value that was in <target>.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_set
* (
* atomic_t *target, /@ memory location to set @/
* atomic_val_t value /@ set with this value @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_clear_set, atomic_set)
LDREX r2, [r0] /* load old value and mark exclusive access */
STREX r12, r1, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_set /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Get the value of a shared memory atomically
*
* This routine atomically retrieves the value in *target
*
* long atomic_get
* (
* atomic_t * target /@ address of atom to be retrieved @/
* )
*
* RETURN: value read from address target.
*
*/
SECTION_FUNC(TEXT, atomic_get)
LDR r0, [r0]
MOV pc, lr
/**
*
* @brief Atomically increment a memory location
*
* This routine atomically increments the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_inc
* (
* atomic_t *target, /@ memory location to increment @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_inc)
MOV r1, #1
/* fall through into atomic_add */
/**
*
* @brief Atomically add a value to a memory location
*
* This routine atomically adds the contents of <target> and <value>, placing
* the result in <target>. The operation is done using signed integer arithmetic.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_add
* (
* atomic_t *target, /@ memory location to add to @/
* atomic_val_t value /@ value to add @/
* )
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_inc_add, atomic_add)
LDREX r2, [r0] /* load old value and mark exclusive access */
ADD r3, r2, r1 /* add word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_add /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically decrement a memory location
*
* This routine atomically decrements the value in <target>. The operation is
* done using unsigned integer arithmetic. Various CPU architectures may impose
* restrictions with regards to the alignment and cache attributes of the
* atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_dec
* (
* atomic_t *target, /@ memory location to decrement @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_dec)
MOV r1, #1
/* fall through into atomic_sub */
/**
*
* @brief Atomically subtract a value from a memory location
*
* This routine atomically subtracts <value> from the contents of <target>,
* placing the result in <target>. The operation is done using signed integer
* arithmetic. Various CPU architectures may impose restrictions with regards to
* the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_sub
* (
* atomic_t *target, /@ memory location to subtract from @/
* atomic_val_t value /@ value to subtract @/
* )
*
*/
SECTION_SUBSEC_FUNC(TEXT, atomic_decSub, atomic_sub)
LDREX r2, [r0] /* load old value and mark exclusive access */
SUB r3, r2, r1 /* subtract word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_sub /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically perform a bitwise NAND on a memory location
*
* This routine atomically performs a bitwise NAND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_nand
* (
* atomic_t *target, /@ memory location to NAND @/
* atomic_val_t value /@ NAND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_nand)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
MVN r3, r3 /* invert */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_nand /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically perform a bitwise AND on a memory location
*
* This routine atomically performs a bitwise AND operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_and
* (
* atomic_t *target, /@ memory location to AND @/
* atomic_val_t value /@ AND with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_and)
LDREX r2, [r0] /* load old value and mark exclusive access */
AND r3, r2, r1 /* AND word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_and /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically perform a bitwise OR on memory location
*
* This routine atomically performs a bitwise OR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_or
* (
* atomic_t *target, /@ memory location to OR @/
* atomic_val_t value /@ OR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_or)
LDREX r2, [r0] /* load old value and mark exclusive access */
ORR r3, r2, r1 /* OR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_or /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically perform a bitwise XOR on a memory location
*
* This routine atomically performs a bitwise XOR operation of the contents of
* <target> and <value>, placing the result in <target>.
* Various CPU architectures may impose restrictions with regards to the
* alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return Contents of <target> before the atomic operation
*
* ERRNO: N/A
*
* atomic_val_t atomic_xor
* (
* atomic_t *target, /@ memory location to XOR @/
* atomic_val_t value /@ XOR with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_xor)
LDREX r2, [r0] /* load old value and mark exclusive access */
EOR r3, r2, r1 /* XOR word */
STREX r12, r3, [r0] /* try to store new value */
TEQ r12, #0 /* store successful? */
BNE atomic_xor /* if not, retry */
MOV r0, r2 /* return old value */
MOV pc, lr
/**
*
* @brief Atomically compare-and-swap the contents of a memory location
*
* This routine performs an atomic compare-and-swap. testing that the contents of
* <target> contains <oldValue>, and if it does, setting the value of <target>
* to <newValue>. Various CPU architectures may impose restrictions with regards
* to the alignment and cache attributes of the atomic_t type.
*
* This routine can be used from both task and interrupt level.
*
* @return 1 if the swap is actually executed, 0 otherwise.
*
* ERRNO: N/A
*
* int atomic_cas
* (
* atomic_t *target, /@ memory location to compare-and-swap @/
* atomic_val_t oldValue, /@ compare to this value @/
* atomic_val_t newValue, /@ swap with this value @/
* )
*
*/
SECTION_FUNC(TEXT, atomic_cas)
LDREX r3, [r0] /* load the value and mark exclusive access */
CMP r3, r1 /* if (*target != oldValue) */
ITT NE
MOVNE r0, #0 /* return FALSE */
MOVNE pc, lr
STREX r12, r2, [r0] /* try to store if equal */
TEQ r12, #0 /* store successful? */
BNE atomic_cas /* if not, retry */
MOV r0, #1 /* return TRUE if swap occurred */
MOV pc, lr

View file

@ -36,7 +36,6 @@ menu "Nios II Gen 2 Processor Options"
config CPU_NIOS2_GEN2
bool
default y
select ATOMIC_OPERATIONS_C
help
This option signifies the use of a Nios II Gen 2 CPU

View file

@ -14,7 +14,7 @@ KBUILD_AFLAGS += -Wa,--divide
obj-y += fatal.o cpuhalt.o \
msr.o dynamic.o intconnect.o \
excconnect.o sys_fatal_error_handler.o \
crt0.o atomic.o cache_s.o cache.o excstub.o \
crt0.o cache_s.o cache.o excstub.o \
intstub.o swap.o thread.o
obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o

View file

@ -1,380 +0,0 @@
/*
* Copyright (c) 2015 Intel Corporation
* Copyright (c) 2011-2014 Wind River Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file Atomic ops for x86
*
* This module provides the atomic operators for IA-32
* architectures on platforms that support the LOCK prefix instruction.
*
* The atomic operations are guaranteed to be atomic with respect
* to interrupt service routines, and to operations performed by peer
* processors.
*/
#include <atomic.h>
#include <toolchain.h>
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param oldValue value to compare against
* @param newValue value to compare against
* @return Returns 1 if <newValue> is written, 0 otherwise.
*/
FUNC_NO_FP int atomic_cas(atomic_t *target, atomic_val_t oldValue,
atomic_val_t newValue)
{
int eax;
__asm__ (
"movl %[oldValue], %%eax\n\t"
"lock cmpxchg %[newValue], (%[target])\n\t"
"sete %%al\n\t"
"movzbl %%al,%%eax"
: "=&a" (eax)
: [newValue] "r" (newValue), [oldValue] "m" (oldValue),
[target] "r" (target));
return eax;
}
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
__asm__ ("lock xadd %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target)
: );
return value;
}
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
__asm__ ("neg %[value]\n\t"
"lock xadd %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target)
: );
return value;
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
FUNC_NO_FP atomic_val_t atomic_inc(atomic_t *target)
{
atomic_t value = 1;
__asm__ ("lock xadd %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target));
return value;
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
FUNC_NO_FP atomic_val_t atomic_dec(atomic_t *target)
{
atomic_t value = -1;
__asm__ ("lock xadd %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target));
return value;
}
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
FUNC_NO_FP atomic_val_t atomic_get(const atomic_t *target)
{
return *target;
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/*
* The 'lock' prefix is not required with the 'xchg' instruction.
* According to the IA-32 instruction reference manual:
*
* "If a memory operand is referenced, the processor's locking
* protocol is automatically implemented for the duration of
* the exchange operation, regardless of the presence
* or absence of the LOCK prefix or of the value of the IOPL."
*/
__asm__ ("xchg %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target));
return value;
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_clear(atomic_t *target)
{
atomic_t value = 0;
/* See note in atomic_set about non-use of 'lock' here */
__asm__ ("xchg %[value], (%[target])"
: [value] "+r" (value)
: [target] "r" (target));
return value;
}
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
atomic_val_t eax;
__asm__ volatile (
"mov %[target], %%edx\n\t"
/*
* Dereference target pointer and store in EAX, we will
* use this later to ensure the value hasn't changed
*/
"mov (%%edx), %%eax\n\t"
"1:\n\t"
/*
* Set ECX to be (value <op> *eax), use ECX so we don't lose
* the original value in case we need to do this again
*/
"mov %[value], %%ecx\n\t"
"or %%eax, %%ecx\n\t"
/*
* Check if *EDX (which is *target) == EAX
* If they differ, *target was changed, EAX gets updated with
* the new value of *target, and we try again
* If they are the same, EAX now has ECX's value which is
* what we want to return to the caller.
*/
"lock cmpxchg %%ecx, (%%edx)\n\t"
"jnz 1b"
: "=a" (eax)
: [target] "m" (target), [value] "m" (value)
: "ecx", "edx");
return eax;
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
/*
* See comments in atomic_or() for explanation on how
* this works
*/
atomic_val_t eax;
__asm__ volatile (
"mov %[target], %%edx\n\t"
"mov (%%edx), %%eax\n\t"
"1:\n\t"
"mov %[value], %%ecx\n\t"
"xor %%eax, %%ecx\n\t"
"lock cmpxchg %%ecx, (%%edx)\n\t"
"jnz 1b"
: "=a" (eax)
: [target] "m" (target), [value] "m" (value)
: "ecx", "edx");
return eax;
}
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
/*
* See comments in atomic_or() for explanation on how
* this works
*/
atomic_val_t eax;
__asm__ volatile (
"mov %[target], %%edx\n\t"
"mov (%%edx), %%eax\n\t"
"1:\n\t"
"mov %[value], %%ecx\n\t"
"and %%eax, %%ecx\n\t"
"lock cmpxchg %%ecx, (%%edx)\n\t"
"jnz 1b"
: "=a" (eax)
: [target] "m" (target), [value] "m" (value)
: "ecx", "edx");
return eax;
}
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
FUNC_NO_FP atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
/*
* See comments in atomic_or() for explanation on how
* this works
*/
atomic_val_t eax;
__asm__ volatile (
"mov %[target], %%edx\n\t"
"mov (%%edx), %%eax\n\t"
"1:\n\t"
"mov %[value], %%ecx\n\t"
"and %%eax, %%ecx\n\t"
"not %%ecx\n\t"
"lock cmpxchg %%ecx, (%%edx)\n\t"
"jnz 1b"
: "=a" (eax)
: [target] "m" (target), [value] "m" (value)
: "ecx", "edx");
return eax;
}

View file

@ -26,6 +26,232 @@ extern "C" {
typedef int atomic_t;
typedef atomic_t atomic_val_t;
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
/**
*
* @brief Atomic compare-and-set primitive
*
* This routine provides the compare-and-set operator. If the original value at
* <target> equals <oldValue>, then <newValue> is stored at <target> and the
* function returns 1.
*
* If the original value at <target> does not equal <oldValue>, then the store
* is not done and the function returns 0.
*
* The reading of the original value at <target>, the comparison,
* and the write of the new value (if it occurs) all happen atomically with
* respect to both interrupts and accesses of other processors to <target>.
*
* @param target address to be tested
* @param old_value value to compare against
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
*/
static inline int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
return __atomic_compare_exchange_n(target, &old_value, new_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic addition primitive
*
* This routine provides the atomic addition operator. The <value> is
* atomically added to the value at <target>, placing the result at <target>,
* and the old value from <target> is returned.
*
* @param target memory location to add to
* @param value the value to add
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic subtraction primitive
*
* This routine provides the atomic subtraction operator. The <value> is
* atomically subtracted from the value at <target>, placing the result at
* <target>, and the old value from <target> is returned.
*
* @param target the memory location to subtract from
* @param value the value to subtract
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
}
/**
*
* @brief Atomic get primitive
*
* @param target memory location to read from
*
* This routine provides the atomic get primitive to atomically read
* a value from <target>. It simply does an ordinary load. Note that <target>
* is expected to be aligned to a 4-byte boundary.
*
* @return The value read from <target>
*/
static inline atomic_val_t atomic_get(const atomic_t *target)
{
return __atomic_load_n(target, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic get-and-set primitive
*
* This routine provides the atomic set operator. The <value> is atomically
* written at <target> and the previous value at <target> is returned.
*
* @param target the memory location to write to
* @param value the value to write
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
{
/* This builtin, as described by Intel, is not a traditional
* test-and-set operation, but rather an atomic exchange operation. It
* writes value into *ptr, and returns the previous contents of *ptr.
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
}
/**
*
* @brief Atomic bitwise inclusive OR primitive
*
* This routine provides the atomic bitwise inclusive OR operator. The <value>
* is atomically bitwise OR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to OR
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
*
* This routine provides the atomic bitwise exclusive OR operator. The <value>
* is atomically bitwise XOR'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to XOR
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise AND primitive
*
* This routine provides the atomic bitwise AND operator. The <value> is
* atomically bitwise AND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* @param target the memory location to be modified
* @param value the value to AND
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
/**
*
* @brief Atomic bitwise NAND primitive
*
* This routine provides the atomic bitwise NAND operator. The <value> is
* atomically bitwise NAND'ed with the value at <target>, placing the result
* at <target>, and the previous value at <target> is returned.
*
* The operation here is equivalent to *target = ~(tmp & value)
*
* @param target the memory location to be modified
* @param value the value to NAND
*
* @return The previous value from <target>
*/
static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_dec(atomic_t *target);
@ -37,8 +263,9 @@ extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
extern atomic_val_t atomic_clear(atomic_t *target);
extern atomic_val_t atomic_get(const atomic_t *target);
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
extern int atomic_cas(atomic_t *target,
atomic_val_t oldValue, atomic_val_t newValue);
extern int atomic_cas(atomic_t *target, atomic_val_t oldValue,
atomic_val_t newValue);
#endif /* CONFIG_ATOMIC_OPERATIONS_BUILTIN */
#define ATOMIC_INIT(i) {(i)}

View file

@ -106,15 +106,6 @@ config ERRNO
include errno.h provided by the C library (libc) to use the errno symbol.
The C library must access the per-thread errno via the _get_errno() symbol.
config ATOMIC_OPERATIONS_C
bool
default n
help
Use atomic operations routines that are implemented entirely
in C by locking interrupts. Selected by architectures which either
do not have support for atomic operations in their instruction
set, or haven't been implemented yet during bring-up.
config NANO_WORKQUEUE
bool "Enable nano workqueue support"
default n
@ -141,4 +132,26 @@ config SYSTEM_WORKQUEUE_PRIORITY
default 10
depends on SYSTEM_WORKQUEUE
config ATOMIC_OPERATIONS_BUILTIN
bool
help
Use the compiler builtin functions for atomic operations. This is
the preferred method. However, support for all arches in GCC is
incomplete.
config ATOMIC_OPERATIONS_CUSTOM
bool
help
Use when there isn't support for compiler built-ins, but you have
written optimized assembly code under arch/ which implements these.
config ATOMIC_OPERATIONS_C
bool
help
Use atomic operations routines that are implemented entirely
in C by locking interrupts. Selected by architectures which either
do not have support for atomic operations in their instruction
set, or haven't been implemented yet during bring-up, and also
the compiler does not have support for the atomic __sync_* builtins.
endmenu