lib: os: add sys_mutex data type

For systems without userspace enabled, these work the same
as a k_mutex.

For systems with userspace, the sys_mutex may exist in user
memory. It is still tracked as a kernel object, but has an
underlying k_mutex that is looked up in the kernel object
table.

Future enhancements will optimize sys_mutex to not require
syscalls for uncontended sys_mutexes, using atomic ops
instead.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-03-27 15:44:52 -07:00 committed by Anas Nashif
commit f0835674a3
6 changed files with 248 additions and 2 deletions

149
include/misc/mutex.h Normal file
View file

@ -0,0 +1,149 @@
/*
* Copyright (c) 2019 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_MISC_MUTEX_H_
#define ZEPHYR_INCLUDE_MISC_MUTEX_H_
/*
* sys_mutex behaves almost exactly like k_mutex, with the added advantage
* that a sys_mutex instance can reside in user memory.
*
* Further enhancements will support locking/unlocking uncontended sys_mutexes
* with simple atomic ops instead of syscalls, similar to Linux's
* FUTEX_LOCK_PI and FUTEX_UNLOCK_PI
*/
#ifdef CONFIG_USERSPACE
#include <atomic.h>
#include <zephyr/types.h>
struct sys_mutex {
/* Currently unused, but will be used to store state for fast mutexes
* that can be locked/unlocked with atomic ops if there is no
* contention
*/
atomic_t val;
};
#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name
/**
* @brief Initialize a mutex.
*
* This routine initializes a mutex object, prior to its first use.
*
* Upon completion, the mutex is available and does not have an owner.
*
* This routine is only necessary to call when userspace is disabled
* and the mutex was not created with SYS_MUTEX_DEFINE().
*
* @param mutex Address of the mutex.
*
* @return N/A
*/
static inline void sys_mutex_init(struct sys_mutex *mutex)
{
ARG_UNUSED(mutex);
/* Nothing to do, kernel-side data structures are initialized at
* boot
*/
}
__syscall int z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout);
__syscall int z_sys_mutex_kernel_unlock(struct sys_mutex *mutex);
/**
* @brief Lock a mutex.
*
* This routine locks @a mutex. If the mutex is locked by another thread,
* the calling thread waits until the mutex becomes available or until
* a timeout occurs.
*
* A thread is permitted to lock a mutex it has already locked. The operation
* completes immediately and the lock count is increased by 1.
*
* @param mutex Address of the mutex, which may reside in user memory
* @param timeout Waiting period to lock the mutex (in milliseconds),
* or one of the special values K_NO_WAIT and K_FOREVER.
*
* @retval 0 Mutex locked.
* @retval -EBUSY Returned without waiting.
* @retval -EAGAIN Waiting period timed out.
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel
*/
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_lock(mutex, timeout);
}
/**
* @brief Unlock a mutex.
*
* This routine unlocks @a mutex. The mutex must already be locked by the
* calling thread.
*
* The mutex cannot be claimed by another thread until it has been unlocked by
* the calling thread as many times as it was previously locked by that
* thread.
*
* @param mutex Address of the mutex, which may reside in user memory
* @retval -EACCESS Caller has no access to provided mutex address
* @retval -EINVAL Provided mutex not recognized by the kernel or mutex wasn't
* locked
* @retval -EPERM Caller does not own the mutex
*/
static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
/* For now, make the syscall unconditionally */
return z_sys_mutex_kernel_unlock(mutex);
}
#include <syscalls/mutex.h>
#else
#include <kernel.h>
#include <kernel_structs.h>
struct sys_mutex {
struct k_mutex kernel_mutex;
};
#define SYS_MUTEX_DEFINE(name) \
struct sys_mutex name = { \
.kernel_mutex = _K_MUTEX_INITIALIZER(name.kernel_mutex) \
}
static inline void sys_mutex_init(struct sys_mutex *mutex)
{
k_mutex_init(&mutex->kernel_mutex);
}
static inline int sys_mutex_lock(struct sys_mutex *mutex, s32_t timeout)
{
return k_mutex_lock(&mutex->kernel_mutex, timeout);
}
static inline int sys_mutex_unlock(struct sys_mutex *mutex)
{
if (mutex->kernel_mutex.lock_count == 0) {
return -EINVAL;
}
if (mutex->kernel_mutex.owner != _current) {
return -EPERM;
}
k_mutex_unlock(&mutex->kernel_mutex);
return 0;
}
#endif /* CONFIG_USERSPACE */
#endif /* ZEPHYR_INCLUDE_MISC_MUTEX_H_ */

View file

@ -19,6 +19,7 @@
#include <stdbool.h> #include <stdbool.h>
#include <app_memory/app_memdomain.h> #include <app_memory/app_memdomain.h>
#include <misc/libc-hooks.h> #include <misc/libc-hooks.h>
#include <misc/mutex.h>
#ifdef Z_LIBC_PARTITION_EXISTS #ifdef Z_LIBC_PARTITION_EXISTS
K_APPMEM_PARTITION_DEFINE(z_libc_partition); K_APPMEM_PARTITION_DEFINE(z_libc_partition);

View file

@ -19,3 +19,5 @@ zephyr_sources_if_kconfig(printk.c)
zephyr_sources_if_kconfig(ring_buffer.c) zephyr_sources_if_kconfig(ring_buffer.c)
zephyr_sources_ifdef(CONFIG_ASSERT assert.c) zephyr_sources_ifdef(CONFIG_ASSERT assert.c)
zephyr_sources_ifdef(CONFIG_USERSPACE mutex.c)

78
lib/os/mutex.c Normal file
View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2019 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <misc/mutex.h>
#include <syscall_handler.h>
#include <kernel_structs.h>
static struct k_mutex *get_k_mutex(struct sys_mutex *mutex)
{
struct _k_object *obj;
obj = z_object_find(mutex);
if (obj == NULL || obj->type != K_OBJ_SYS_MUTEX) {
return NULL;
}
return (struct k_mutex *)obj->data;
}
static bool check_sys_mutex_addr(u32_t addr)
{
/* sys_mutex memory is never touched, just used to lookup the
* underlying k_mutex, but we don't want threads using mutexes
* that are outside their memory domain
*/
return Z_SYSCALL_MEMORY_WRITE(addr, sizeof(struct sys_mutex));
}
int z_impl_z_sys_mutex_kernel_lock(struct sys_mutex *mutex, s32_t timeout)
{
struct k_mutex *kernel_mutex = get_k_mutex(mutex);
if (kernel_mutex == NULL) {
return -EINVAL;
}
return k_mutex_lock(kernel_mutex, timeout);
}
Z_SYSCALL_HANDLER(z_sys_mutex_kernel_lock, mutex, timeout)
{
if (check_sys_mutex_addr(mutex)) {
return -EACCES;
}
return z_impl_z_sys_mutex_kernel_lock((struct sys_mutex *)mutex,
timeout);
}
int z_impl_z_sys_mutex_kernel_unlock(struct sys_mutex *mutex)
{
struct k_mutex *kernel_mutex = get_k_mutex(mutex);
if (kernel_mutex == NULL || kernel_mutex->lock_count == 0) {
return -EINVAL;
}
if (kernel_mutex->owner != _current) {
return -EPERM;
}
k_mutex_unlock(kernel_mutex);
return 0;
}
Z_SYSCALL_HANDLER(z_sys_mutex_kernel_unlock, mutex)
{
if (check_sys_mutex_addr(mutex)) {
return -EACCES;
}
return z_impl_z_sys_mutex_kernel_unlock((struct sys_mutex *)mutex);
}

View file

@ -37,6 +37,7 @@ DW_OP_addr = 0x3
DW_OP_fbreg = 0x91 DW_OP_fbreg = 0x91
STACK_TYPE = "_k_thread_stack_element" STACK_TYPE = "_k_thread_stack_element"
thread_counter = 0 thread_counter = 0
sys_mutex_counter = 0
# Global type environment. Populated by pass 1. # Global type environment. Populated by pass 1.
type_env = {} type_env = {}
@ -54,6 +55,7 @@ scr = os.path.basename(sys.argv[0])
class KobjectInstance: class KobjectInstance:
def __init__(self, type_obj, addr): def __init__(self, type_obj, addr):
global thread_counter global thread_counter
global sys_mutex_counter
self.addr = addr self.addr = addr
self.type_obj = type_obj self.type_obj = type_obj
@ -67,6 +69,9 @@ class KobjectInstance:
# permissions to other kernel objects # permissions to other kernel objects
self.data = thread_counter self.data = thread_counter
thread_counter = thread_counter + 1 thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "(u32_t)(&kernel_mutexes[%d])" % sys_mutex_counter
sys_mutex_counter += 1
else: else:
self.data = 0 self.data = 0
@ -558,3 +563,6 @@ class ElfHelper:
def get_thread_counter(self): def get_thread_counter(self):
return thread_counter return thread_counter
def get_sys_mutex_counter(self):
return sys_mutex_counter

View file

@ -83,6 +83,7 @@ kobjects = OrderedDict ([
("k_timer", (None, False)), ("k_timer", (None, False)),
("_k_thread_stack_element", (None, False)), ("_k_thread_stack_element", (None, False)),
("device", (None, False)), ("device", (None, False)),
("sys_mutex", (None, True))
]) ])
@ -122,10 +123,8 @@ header = """%compare-lengths
#include <string.h> #include <string.h>
%} %}
struct _k_object; struct _k_object;
%%
""" """
# Different versions of gperf have different prototypes for the lookup # Different versions of gperf have different prototypes for the lookup
# function, best to implement the wrapper here. The pointer value itself is # function, best to implement the wrapper here. The pointer value itself is
# turned into a string, we told gperf to expect binary strings that are not # turned into a string, we told gperf to expect binary strings that are not
@ -159,7 +158,16 @@ void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
def write_gperf_table(fp, eh, objs, static_begin, static_end): def write_gperf_table(fp, eh, objs, static_begin, static_end):
fp.write(header) fp.write(header)
num_mutexes = eh.get_sys_mutex_counter()
if (num_mutexes != 0):
fp.write("static struct k_mutex kernel_mutexes[%d] = {\n" % num_mutexes)
for i in range(num_mutexes):
fp.write("_K_MUTEX_INITIALIZER(kernel_mutexes[%d])" % i)
if (i != num_mutexes - 1):
fp.write(", ")
fp.write("};\n")
fp.write("%%\n")
# Setup variables for mapping thread indexes # Setup variables for mapping thread indexes
syms = eh.get_symbols() syms = eh.get_symbols()
thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"] thread_max_bytes = syms["CONFIG_MAX_THREAD_BYTES"]