kernel: fix atomic ops in user mode on some arches

Most CPUs have instructions like LOCK, LDREX/STREX, etc which
allows for atomic operations without locking interrupts that
can be invoked from user mode without complication. They typically
use compiler builtin atomic operations, or custom assembly
to implement them.

However, some CPUs may lack these kinds of instructions, such
as Cortex-M0 or some ARC. They use these C-based atomic
operation implementations instead. Unfortunately these require
grabbing a spinlock to ensure proper concurrency with other
threads and ISRs. Hence, they will trigger an exception when
called from user mode.

For these platforms, which support user mode but not atomic
operation instructions, the atomic API has been exposed as
system calls.

Some of the implementations in atomic_c.c which can be instead
expressed in terms of other atomic operations have been removed.

The kernel test of atomic operations now runs in user mode to
prove that this works.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-03-15 13:51:59 -07:00 committed by Anas Nashif
commit b3eb510f5c
3 changed files with 81 additions and 90 deletions

View file

@ -10,6 +10,7 @@
#define ZEPHYR_INCLUDE_ATOMIC_H_
#include <stdbool.h>
#include <toolchain.h>
#ifdef __cplusplus
extern "C" {
@ -45,6 +46,10 @@ static inline bool atomic_cas(atomic_t *target, atomic_val_t old_value,
0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
#else
extern int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value);
@ -66,6 +71,8 @@ static inline atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_add(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_add(atomic_t *target, atomic_val_t value);
#endif
@ -86,6 +93,8 @@ static inline atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_sub(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
#endif
@ -100,7 +109,7 @@ extern atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value);
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_inc(atomic_t *target)
{
return atomic_add(target, 1);
@ -119,7 +128,7 @@ extern atomic_val_t atomic_inc(atomic_t *target);
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_dec(atomic_t *target)
{
return atomic_sub(target, 1);
@ -168,6 +177,8 @@ static inline atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
*/
return __atomic_exchange_n(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
#endif
@ -183,7 +194,7 @@ extern atomic_val_t atomic_set(atomic_t *target, atomic_val_t value);
*
* @return Previous value of @a target.
*/
#ifdef CONFIG_ATOMIC_OPERATIONS_BUILTIN
#if defined(CONFIG_ATOMIC_OPERATIONS_BUILTIN) || defined (CONFIG_ATOMIC_OPERATIONS_C)
static inline atomic_val_t atomic_clear(atomic_t *target)
{
return atomic_set(target, 0);
@ -209,6 +220,9 @@ static inline atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_or(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_or(atomic_t *target, atomic_val_t value);
#endif
@ -230,6 +244,8 @@ static inline atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_xor(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value);
#endif
@ -251,6 +267,8 @@ static inline atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_and(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_and(atomic_t *target, atomic_val_t value);
#endif
@ -272,6 +290,8 @@ static inline atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
{
return __atomic_fetch_nand(target, value, __ATOMIC_SEQ_CST);
}
#elif defined(CONFIG_ATOMIC_OPERATIONS_C)
__syscall atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#else
extern atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value);
#endif
@ -434,6 +454,9 @@ static inline void atomic_set_bit_to(atomic_t *target, int bit, bool val)
}
}
#ifdef CONFIG_ATOMIC_OPERATIONS_C
#include <syscalls/atomic.h>
#endif
/**
* @}
*/

View file

@ -30,6 +30,30 @@
*/
static struct k_spinlock lock;
/* For those rare CPUs which support user mode, but not native atomic
* operations, the best we can do for them is implement the atomic
* functions as system calls, since in user mode locking a spinlock is
* forbidden.
*/
#ifdef CONFIG_USERSPACE
#include <syscall_handler.h>
#define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
Z_SYSCALL_HANDLER(name, target) { \
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
return z_impl_##name((atomic_t *)target); \
}
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
Z_SYSCALL_HANDLER(name, target, value) { \
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
return z_impl_##name((atomic_t *)target, value); \
}
#else
#define ATOMIC_SYSCALL_HANDLER_TARGET(name)
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
#endif
/**
*
* @brief Atomic compare-and-set primitive
@ -50,8 +74,8 @@ static struct k_spinlock lock;
* @param new_value value to compare against
* @return Returns 1 if <new_value> is written, 0 otherwise.
*/
int atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
atomic_val_t new_value)
{
k_spinlock_key_t key;
int ret = 0;
@ -68,6 +92,15 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value,
return ret;
}
#ifdef CONFIG_USERSPACE
Z_SYSCALL_HANDLER(atomic_cas, target, old_value, new_value)
{
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
}
#endif /* CONFIG_USERSPACE */
/**
*
* @brief Atomic addition primitive
@ -81,7 +114,7 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value,
*
* @return The previous value from <target>
*/
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -96,6 +129,8 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
/**
*
* @brief Atomic subtraction primitive
@ -109,7 +144,7 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -124,57 +159,7 @@ atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
return ret;
}
/**
*
* @brief Atomic increment primitive
*
* @param target memory location to increment
*
* This routine provides the atomic increment operator. The value at <target>
* is atomically incremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> before the increment
*/
atomic_val_t atomic_inc(atomic_t *target)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
(*target)++;
k_spin_unlock(&lock, key);
return ret;
}
/**
*
* @brief Atomic decrement primitive
*
* @param target memory location to decrement
*
* This routine provides the atomic decrement operator. The value at <target>
* is atomically decremented by 1, and the old value from <target> is returned.
*
* @return The value from <target> prior to the decrement
*/
atomic_val_t atomic_dec(atomic_t *target)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
(*target)--;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
/**
*
@ -205,7 +190,7 @@ atomic_val_t atomic_get(const atomic_t *target)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -220,32 +205,7 @@ atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
return ret;
}
/**
*
* @brief Atomic clear primitive
*
* This routine provides the atomic clear operator. The value of 0 is atomically
* written at <target> and the previous value at <target> is returned. (Hence,
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
*
* @param target the memory location to write
*
* @return The previous value from <target>
*/
atomic_val_t atomic_clear(atomic_t *target)
{
k_spinlock_key_t key;
atomic_val_t ret;
key = k_spin_lock(&lock);
ret = *target;
*target = 0;
k_spin_unlock(&lock, key);
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
/**
*
@ -260,7 +220,7 @@ atomic_val_t atomic_clear(atomic_t *target)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -275,6 +235,8 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
/**
*
* @brief Atomic bitwise exclusive OR (XOR) primitive
@ -288,7 +250,7 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -303,6 +265,8 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
/**
*
* @brief Atomic bitwise AND primitive
@ -316,7 +280,7 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -331,6 +295,8 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
/**
*
* @brief Atomic bitwise NAND primitive
@ -344,7 +310,7 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
*
* @return The previous value from <target>
*/
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
{
k_spinlock_key_t key;
atomic_val_t ret;
@ -358,3 +324,5 @@ atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
return ret;
}
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);

View file

@ -75,7 +75,7 @@ void test_main(void)
ztest_unit_test(test_irq_offload),
ztest_unit_test(test_byteorder_memcpy_swap),
ztest_unit_test(test_byteorder_mem_swap),
ztest_unit_test(test_atomic),
ztest_user_unit_test(test_atomic),
ztest_unit_test(test_bitfield),
ztest_unit_test(test_printk),
ztest_unit_test(test_slist),