kernel: fix atomic ops in user mode on some arches
Most CPUs have instructions like LOCK, LDREX/STREX, etc which allows for atomic operations without locking interrupts that can be invoked from user mode without complication. They typically use compiler builtin atomic operations, or custom assembly to implement them. However, some CPUs may lack these kinds of instructions, such as Cortex-M0 or some ARC. They use these C-based atomic operation implementations instead. Unfortunately these require grabbing a spinlock to ensure proper concurrency with other threads and ISRs. Hence, they will trigger an exception when called from user mode. For these platforms, which support user mode but not atomic operation instructions, the atomic API has been exposed as system calls. Some of the implementations in atomic_c.c which can be instead expressed in terms of other atomic operations have been removed. The kernel test of atomic operations now runs in user mode to prove that this works. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
959abdf1c9
commit
b3eb510f5c
3 changed files with 81 additions and 90 deletions
|
@ -30,6 +30,30 @@
|
|||
*/
|
||||
static struct k_spinlock lock;
|
||||
|
||||
/* For those rare CPUs which support user mode, but not native atomic
|
||||
* operations, the best we can do for them is implement the atomic
|
||||
* functions as system calls, since in user mode locking a spinlock is
|
||||
* forbidden.
|
||||
*/
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#include <syscall_handler.h>
|
||||
|
||||
#define ATOMIC_SYSCALL_HANDLER_TARGET(name) \
|
||||
Z_SYSCALL_HANDLER(name, target) { \
|
||||
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
|
||||
return z_impl_##name((atomic_t *)target); \
|
||||
}
|
||||
|
||||
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name) \
|
||||
Z_SYSCALL_HANDLER(name, target, value) { \
|
||||
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t))); \
|
||||
return z_impl_##name((atomic_t *)target, value); \
|
||||
}
|
||||
#else
|
||||
#define ATOMIC_SYSCALL_HANDLER_TARGET(name)
|
||||
#define ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(name)
|
||||
#endif
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic compare-and-set primitive
|
||||
|
@ -50,8 +74,8 @@ static struct k_spinlock lock;
|
|||
* @param new_value value to compare against
|
||||
* @return Returns 1 if <new_value> is written, 0 otherwise.
|
||||
*/
|
||||
int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value)
|
||||
int z_impl_atomic_cas(atomic_t *target, atomic_val_t old_value,
|
||||
atomic_val_t new_value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
int ret = 0;
|
||||
|
@ -68,6 +92,15 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
Z_SYSCALL_HANDLER(atomic_cas, target, old_value, new_value)
|
||||
{
|
||||
Z_OOPS(Z_SYSCALL_MEMORY_WRITE(target, sizeof(atomic_t)));
|
||||
|
||||
return z_impl_atomic_cas((atomic_t *)target, old_value, new_value);
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic addition primitive
|
||||
|
@ -81,7 +114,7 @@ int atomic_cas(atomic_t *target, atomic_val_t old_value,
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_add(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -96,6 +129,8 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_add);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic subtraction primitive
|
||||
|
@ -109,7 +144,7 @@ atomic_val_t atomic_add(atomic_t *target, atomic_val_t value)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_sub(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -124,57 +159,7 @@ atomic_val_t atomic_sub(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic increment primitive
|
||||
*
|
||||
* @param target memory location to increment
|
||||
*
|
||||
* This routine provides the atomic increment operator. The value at <target>
|
||||
* is atomically incremented by 1, and the old value from <target> is returned.
|
||||
*
|
||||
* @return The value from <target> before the increment
|
||||
*/
|
||||
atomic_val_t atomic_inc(atomic_t *target)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
ret = *target;
|
||||
(*target)++;
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic decrement primitive
|
||||
*
|
||||
* @param target memory location to decrement
|
||||
*
|
||||
* This routine provides the atomic decrement operator. The value at <target>
|
||||
* is atomically decremented by 1, and the old value from <target> is returned.
|
||||
*
|
||||
* @return The value from <target> prior to the decrement
|
||||
*/
|
||||
atomic_val_t atomic_dec(atomic_t *target)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
ret = *target;
|
||||
(*target)--;
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_sub);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -205,7 +190,7 @@ atomic_val_t atomic_get(const atomic_t *target)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_set(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -220,32 +205,7 @@ atomic_val_t atomic_set(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic clear primitive
|
||||
*
|
||||
* This routine provides the atomic clear operator. The value of 0 is atomically
|
||||
* written at <target> and the previous value at <target> is returned. (Hence,
|
||||
* atomic_clear(pAtomicVar) is equivalent to atomic_set(pAtomicVar, 0).)
|
||||
*
|
||||
* @param target the memory location to write
|
||||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_clear(atomic_t *target)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
||||
key = k_spin_lock(&lock);
|
||||
|
||||
ret = *target;
|
||||
*target = 0;
|
||||
|
||||
k_spin_unlock(&lock, key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_set);
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -260,7 +220,7 @@ atomic_val_t atomic_clear(atomic_t *target)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_or(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -275,6 +235,8 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_or);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise exclusive OR (XOR) primitive
|
||||
|
@ -288,7 +250,7 @@ atomic_val_t atomic_or(atomic_t *target, atomic_val_t value)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_xor(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -303,6 +265,8 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_xor);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise AND primitive
|
||||
|
@ -316,7 +280,7 @@ atomic_val_t atomic_xor(atomic_t *target, atomic_val_t value)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_and(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -331,6 +295,8 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_and);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Atomic bitwise NAND primitive
|
||||
|
@ -344,7 +310,7 @@ atomic_val_t atomic_and(atomic_t *target, atomic_val_t value)
|
|||
*
|
||||
* @return The previous value from <target>
|
||||
*/
|
||||
atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
|
||||
atomic_val_t z_impl_atomic_nand(atomic_t *target, atomic_val_t value)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
atomic_val_t ret;
|
||||
|
@ -358,3 +324,5 @@ atomic_val_t atomic_nand(atomic_t *target, atomic_val_t value)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
ATOMIC_SYSCALL_HANDLER_TARGET_VALUE(atomic_nand);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue