2017-08-22 13:15:23 -07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <misc/printk.h>
|
2017-11-08 16:38:03 -08:00
|
|
|
#include <misc/rb.h>
|
2017-08-22 13:15:23 -07:00
|
|
|
#include <kernel_structs.h>
|
|
|
|
#include <sys_io.h>
|
2017-08-30 14:17:44 -07:00
|
|
|
#include <ksched.h>
|
userspace: flesh out internal syscall interface
* Instead of a common system call entry function, we instead create a
table mapping system call ids to handler skeleton functions which are
invoked directly by the architecture code which receives the system
call.
* system call handler prototype specified. All but the most trivial
system calls will implement one of these. They validate all the
arguments, including verifying kernel/device object pointers, ensuring
that the calling thread has appropriate access to any memory buffers
passed in, and performing other parameter checks that the base system
call implementation does not check, or only checks with __ASSERT().
It's only possible to install a system call implementation directly
inside this table if the implementation has a return value and requires
no validation of any of its arguments.
A sample handler implementation for k_mutex_unlock() might look like:
u32_t _syscall_k_mutex_unlock(u32_t mutex_arg, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, void *ssf)
{
struct k_mutex *mutex = (struct k_mutex *)mutex_arg;
_SYSCALL_ARG1;
_SYSCALL_IS_OBJ(mutex, K_OBJ_MUTEX, 0, ssf);
_SYSCALL_VERIFY(mutex->lock_count > 0, ssf);
_SYSCALL_VERIFY(mutex->owner == _current, ssf);
k_mutex_unlock(mutex);
return 0;
}
* the x86 port modified to work with the system call table instead of
calling a common handler function. fixed an issue where registers being
changed could confuse the compiler has been fixed; all registers, even
ones used for parameters, must be preserved across the system call.
* a new arch API for producing a kernel oops when validating system call
arguments added. The debug information reported will be from the system
call site and not inside the handler function.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-09-13 18:04:21 -07:00
|
|
|
#include <syscall.h>
|
2017-10-10 09:31:32 -07:00
|
|
|
#include <syscall_handler.h>
|
2017-11-08 16:38:03 -08:00
|
|
|
#include <device.h>
|
|
|
|
#include <init.h>
|
|
|
|
#include <logging/sys_log.h>
|
2018-06-27 10:25:45 -07:00
|
|
|
#if defined(CONFIG_NETWORKING) && defined (CONFIG_DYNAMIC_OBJECTS)
|
|
|
|
/* Used by auto-generated obj_size_get() switch body, as we need to
|
|
|
|
* know the size of struct net_context
|
|
|
|
*/
|
|
|
|
#include <net/net_context.h>
|
|
|
|
#endif
|
2017-08-22 13:15:23 -07:00
|
|
|
|
2017-10-16 15:29:30 -07:00
|
|
|
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
|
|
|
|
|
2018-08-08 11:23:16 -07:00
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
|
|
|
extern u8_t _thread_idx_map[CONFIG_MAX_THREAD_BYTES];
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void clear_perms_cb(struct _k_object *ko, void *ctx_ptr);
|
|
|
|
|
2017-08-22 13:15:23 -07:00
|
|
|
const char *otype_to_str(enum k_objects otype)
|
|
|
|
{
|
2018-09-11 13:14:21 -07:00
|
|
|
const char *ret;
|
2017-08-22 13:15:23 -07:00
|
|
|
/* -fdata-sections doesn't work right except in very very recent
|
|
|
|
* GCC and these literal strings would appear in the binary even if
|
|
|
|
* otype_to_str was omitted by the linker
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PRINTK
|
|
|
|
switch (otype) {
|
2018-04-05 13:59:33 -07:00
|
|
|
/* otype-to-str.h is generated automatically during build by
|
|
|
|
* gen_kobject_list.py
|
|
|
|
*/
|
|
|
|
#include <otype-to-str.h>
|
2017-08-22 13:15:23 -07:00
|
|
|
default:
|
2018-09-11 13:14:21 -07:00
|
|
|
ret = "?";
|
|
|
|
break;
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(otype);
|
|
|
|
return NULL;
|
|
|
|
#endif
|
2018-09-11 13:14:21 -07:00
|
|
|
return ret;
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
|
2017-10-05 11:11:02 -07:00
|
|
|
struct perm_ctx {
|
|
|
|
int parent_id;
|
|
|
|
int child_id;
|
|
|
|
struct k_thread *parent;
|
|
|
|
};
|
|
|
|
|
2017-11-08 16:38:03 -08:00
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
|
|
|
struct dyn_obj {
|
|
|
|
struct _k_object kobj;
|
2018-04-24 17:01:37 -07:00
|
|
|
sys_dnode_t obj_list;
|
2017-11-08 16:38:03 -08:00
|
|
|
struct rbnode node; /* must be immediately before data member */
|
|
|
|
u8_t data[]; /* The object itself */
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct _k_object *_k_object_gperf_find(void *obj);
|
|
|
|
extern void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
|
|
|
void *context);
|
|
|
|
|
|
|
|
static int node_lessthan(struct rbnode *a, struct rbnode *b);
|
|
|
|
|
2018-04-24 17:01:37 -07:00
|
|
|
/*
|
|
|
|
* Red/black tree of allocated kernel objects, for reasonably fast lookups
|
|
|
|
* based on object pointer values.
|
|
|
|
*/
|
2017-11-08 16:38:03 -08:00
|
|
|
static struct rbtree obj_rb_tree = {
|
|
|
|
.lessthan_fn = node_lessthan
|
|
|
|
};
|
|
|
|
|
2018-04-24 17:01:37 -07:00
|
|
|
/*
|
|
|
|
* Linked list of allocated kernel objects, for iteration over all allocated
|
|
|
|
* objects (and potentially deleting them during iteration).
|
|
|
|
*/
|
|
|
|
static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT(&obj_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO: Write some hash table code that will replace both obj_rb_tree
|
|
|
|
* and obj_list.
|
|
|
|
*/
|
|
|
|
|
2017-11-08 16:38:03 -08:00
|
|
|
static size_t obj_size_get(enum k_objects otype)
|
|
|
|
{
|
2018-09-11 13:14:21 -07:00
|
|
|
size_t ret;
|
|
|
|
|
2017-11-08 16:38:03 -08:00
|
|
|
switch (otype) {
|
2018-05-16 10:11:17 -07:00
|
|
|
#include <otype-to-size.h>
|
2017-11-08 16:38:03 -08:00
|
|
|
default:
|
2018-09-11 13:14:21 -07:00
|
|
|
ret = sizeof(struct device);
|
|
|
|
break;
|
2017-11-08 16:38:03 -08:00
|
|
|
}
|
2018-09-11 13:14:21 -07:00
|
|
|
|
|
|
|
return ret;
|
2017-11-08 16:38:03 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int node_lessthan(struct rbnode *a, struct rbnode *b)
|
|
|
|
{
|
|
|
|
return a < b;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
|
|
|
|
{
|
|
|
|
return CONTAINER_OF(node, struct dyn_obj, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dyn_obj *dyn_object_find(void *obj)
|
|
|
|
{
|
|
|
|
struct rbnode *node;
|
|
|
|
struct dyn_obj *ret;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2017-11-08 16:38:03 -08:00
|
|
|
|
|
|
|
/* For any dynamically allocated kernel object, the object
|
|
|
|
* pointer is just a member of the conatining struct dyn_obj,
|
|
|
|
* so just a little arithmetic is necessary to locate the
|
|
|
|
* corresponding struct rbnode
|
|
|
|
*/
|
|
|
|
node = (struct rbnode *)((char *)obj - sizeof(struct rbnode));
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
if (rb_contains(&obj_rb_tree, node)) {
|
|
|
|
ret = node_to_dyn_obj(node);
|
|
|
|
} else {
|
|
|
|
ret = NULL;
|
|
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-08 11:23:16 -07:00
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
|
|
|
* @brief Allocate a new thread index for a new thread.
|
|
|
|
*
|
|
|
|
* This finds an unused thread index that can be assigned to a new
|
|
|
|
* thread. If too many threads have been allocated, the kernel will
|
|
|
|
* run out of indexes and this function will fail.
|
|
|
|
*
|
|
|
|
* Note that if an unused index is found, that index will be marked as
|
|
|
|
* used after return of this function.
|
|
|
|
*
|
|
|
|
* @param tidx The new thread index if successful
|
|
|
|
*
|
|
|
|
* @return 1 if successful, 0 if failed
|
|
|
|
**/
|
|
|
|
static int _thread_idx_alloc(u32_t *tidx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int idx;
|
|
|
|
int base;
|
|
|
|
|
|
|
|
base = 0;
|
|
|
|
for (i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
|
|
|
|
idx = find_lsb_set(_thread_idx_map[i]);
|
|
|
|
|
|
|
|
if (idx) {
|
|
|
|
*tidx = base + (idx - 1);
|
|
|
|
|
|
|
|
sys_bitfield_clear_bit((mem_addr_t)_thread_idx_map,
|
|
|
|
*tidx);
|
|
|
|
|
|
|
|
/* Clear permission from all objects */
|
|
|
|
_k_object_wordlist_foreach(clear_perms_cb,
|
|
|
|
(void *)*tidx);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
base += 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
|
|
|
* @brief Free a thread index.
|
|
|
|
*
|
|
|
|
* This frees a thread index so it can be used by another
|
|
|
|
* thread.
|
|
|
|
*
|
|
|
|
* @param tidx The thread index to be freed
|
|
|
|
**/
|
|
|
|
static void _thread_idx_free(u32_t tidx)
|
|
|
|
{
|
|
|
|
/* To prevent leaked permission when index is recycled */
|
|
|
|
_k_object_wordlist_foreach(clear_perms_cb, (void *)tidx);
|
|
|
|
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
|
|
|
|
}
|
|
|
|
|
2018-04-24 17:01:37 -07:00
|
|
|
void *_impl_k_object_alloc(enum k_objects otype)
|
2017-11-08 16:38:03 -08:00
|
|
|
{
|
|
|
|
struct dyn_obj *dyn_obj;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2018-08-08 11:23:16 -07:00
|
|
|
u32_t tidx;
|
2017-11-08 16:38:03 -08:00
|
|
|
|
|
|
|
/* Stacks are not supported, we don't yet have mem pool APIs
|
|
|
|
* to request memory that is aligned
|
|
|
|
*/
|
|
|
|
__ASSERT(otype > K_OBJ_ANY && otype < K_OBJ_LAST &&
|
|
|
|
otype != K_OBJ__THREAD_STACK_ELEMENT,
|
|
|
|
"bad object type requested");
|
|
|
|
|
2018-04-24 17:01:37 -07:00
|
|
|
dyn_obj = z_thread_malloc(sizeof(*dyn_obj) + obj_size_get(otype));
|
2018-09-17 09:39:51 -07:00
|
|
|
if (dyn_obj == NULL) {
|
2017-11-08 16:38:03 -08:00
|
|
|
SYS_LOG_WRN("could not allocate kernel object");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dyn_obj->kobj.name = (char *)&dyn_obj->data;
|
|
|
|
dyn_obj->kobj.type = otype;
|
2018-04-24 17:01:37 -07:00
|
|
|
dyn_obj->kobj.flags = K_OBJ_FLAG_ALLOC;
|
2018-09-11 19:09:03 -07:00
|
|
|
(void)memset(dyn_obj->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
|
2017-11-08 16:38:03 -08:00
|
|
|
|
2018-08-08 11:23:16 -07:00
|
|
|
/* Need to grab a new thread index for k_thread */
|
|
|
|
if (otype == K_OBJ_THREAD) {
|
|
|
|
if (!_thread_idx_alloc(&tidx)) {
|
|
|
|
k_free(dyn_obj);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dyn_obj->kobj.data = tidx;
|
|
|
|
}
|
|
|
|
|
2017-11-08 16:38:03 -08:00
|
|
|
/* The allocating thread implicitly gets permission on kernel objects
|
|
|
|
* that it allocates
|
|
|
|
*/
|
|
|
|
_thread_perms_set(&dyn_obj->kobj, _current);
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
rb_insert(&obj_rb_tree, &dyn_obj->node);
|
2018-04-24 17:01:37 -07:00
|
|
|
sys_dlist_append(&obj_list, &dyn_obj->obj_list);
|
2017-11-08 16:38:03 -08:00
|
|
|
irq_unlock(key);
|
|
|
|
|
|
|
|
return dyn_obj->kobj.name;
|
|
|
|
}
|
|
|
|
|
|
|
|
void k_object_free(void *obj)
|
|
|
|
{
|
|
|
|
struct dyn_obj *dyn_obj;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2017-11-08 16:38:03 -08:00
|
|
|
|
|
|
|
/* This function is intentionally not exposed to user mode.
|
|
|
|
* There's currently no robust way to track that an object isn't
|
|
|
|
* being used by some other thread
|
|
|
|
*/
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
dyn_obj = dyn_object_find(obj);
|
2018-09-17 09:39:51 -07:00
|
|
|
if (dyn_obj != NULL) {
|
2017-11-08 16:38:03 -08:00
|
|
|
rb_remove(&obj_rb_tree, &dyn_obj->node);
|
2018-04-24 17:01:37 -07:00
|
|
|
sys_dlist_remove(&dyn_obj->obj_list);
|
2018-08-08 11:23:16 -07:00
|
|
|
|
|
|
|
if (dyn_obj->kobj.type == K_OBJ_THREAD) {
|
|
|
|
_thread_idx_free(dyn_obj->kobj.data);
|
|
|
|
}
|
2017-11-08 16:38:03 -08:00
|
|
|
}
|
|
|
|
irq_unlock(key);
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (dyn_obj != NULL) {
|
2017-11-08 16:38:03 -08:00
|
|
|
k_free(dyn_obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct _k_object *_k_object_find(void *obj)
|
|
|
|
{
|
|
|
|
struct _k_object *ret;
|
|
|
|
|
|
|
|
ret = _k_object_gperf_find(obj);
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ret == NULL) {
|
2017-11-08 16:38:03 -08:00
|
|
|
struct dyn_obj *dyn_obj;
|
|
|
|
|
|
|
|
dyn_obj = dyn_object_find(obj);
|
2018-09-17 09:39:51 -07:00
|
|
|
if (dyn_obj != NULL) {
|
2017-11-08 16:38:03 -08:00
|
|
|
ret = &dyn_obj->kobj;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
|
|
|
{
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2018-04-24 17:01:37 -07:00
|
|
|
struct dyn_obj *obj, *next;
|
2017-11-08 16:38:03 -08:00
|
|
|
|
|
|
|
_k_object_gperf_wordlist_foreach(func, context);
|
|
|
|
|
|
|
|
key = irq_lock();
|
2018-04-24 17:01:37 -07:00
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&obj_list, obj, next, obj_list) {
|
|
|
|
func(&obj->kobj, context);
|
|
|
|
}
|
2017-11-08 16:38:03 -08:00
|
|
|
irq_unlock(key);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
|
|
|
|
2017-11-03 09:00:35 -07:00
|
|
|
static int thread_index_get(struct k_thread *t)
|
|
|
|
{
|
|
|
|
struct _k_object *ko;
|
|
|
|
|
|
|
|
ko = _k_object_find(t);
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko == NULL) {
|
2017-11-03 09:00:35 -07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ko->data;
|
|
|
|
}
|
|
|
|
|
2018-04-13 14:44:00 -07:00
|
|
|
static void unref_check(struct _k_object *ko)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < CONFIG_MAX_THREAD_BYTES; i++) {
|
|
|
|
if (ko->perms[i]) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This object has no more references. Some objects may have
|
|
|
|
* dynamically allocated resources, require cleanup, or need to be
|
|
|
|
* marked as uninitailized when all references are gone. What
|
|
|
|
* specifically needs to happen depends on the object type.
|
|
|
|
*/
|
|
|
|
switch (ko->type) {
|
2018-04-12 17:38:12 -07:00
|
|
|
case K_OBJ_PIPE:
|
|
|
|
k_pipe_cleanup((struct k_pipe *)ko->name);
|
|
|
|
break;
|
2018-04-12 18:35:56 -07:00
|
|
|
case K_OBJ_MSGQ:
|
|
|
|
k_msgq_cleanup((struct k_msgq *)ko->name);
|
|
|
|
break;
|
2018-05-02 17:44:39 -07:00
|
|
|
case K_OBJ_STACK:
|
|
|
|
k_stack_cleanup((struct k_stack *)ko->name);
|
|
|
|
break;
|
2018-04-13 14:44:00 -07:00
|
|
|
default:
|
2018-09-11 13:14:21 -07:00
|
|
|
/* Nothing to do */
|
2018-04-13 14:44:00 -07:00
|
|
|
break;
|
|
|
|
}
|
2018-04-24 17:01:37 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
|
|
|
if (ko->flags & K_OBJ_FLAG_ALLOC) {
|
|
|
|
struct dyn_obj *dyn_obj =
|
|
|
|
CONTAINER_OF(ko, struct dyn_obj, kobj);
|
|
|
|
rb_remove(&obj_rb_tree, &dyn_obj->node);
|
|
|
|
sys_dlist_remove(&dyn_obj->obj_list);
|
|
|
|
k_free(dyn_obj);
|
|
|
|
}
|
|
|
|
#endif
|
2018-04-13 14:44:00 -07:00
|
|
|
}
|
|
|
|
|
2017-10-05 11:11:02 -07:00
|
|
|
static void wordlist_cb(struct _k_object *ko, void *ctx_ptr)
|
|
|
|
{
|
|
|
|
struct perm_ctx *ctx = (struct perm_ctx *)ctx_ptr;
|
|
|
|
|
|
|
|
if (sys_bitfield_test_bit((mem_addr_t)&ko->perms, ctx->parent_id) &&
|
|
|
|
(struct k_thread *)ko->name != ctx->parent) {
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)&ko->perms, ctx->child_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void _thread_perms_inherit(struct k_thread *parent, struct k_thread *child)
|
|
|
|
{
|
|
|
|
struct perm_ctx ctx = {
|
2017-11-03 09:00:35 -07:00
|
|
|
thread_index_get(parent),
|
|
|
|
thread_index_get(child),
|
2017-10-05 11:11:02 -07:00
|
|
|
parent
|
|
|
|
};
|
|
|
|
|
2017-11-03 09:00:35 -07:00
|
|
|
if ((ctx.parent_id != -1) && (ctx.child_id != -1)) {
|
2017-10-05 11:11:02 -07:00
|
|
|
_k_object_wordlist_foreach(wordlist_cb, &ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-10 09:31:32 -07:00
|
|
|
void _thread_perms_set(struct _k_object *ko, struct k_thread *thread)
|
2017-08-22 13:15:23 -07:00
|
|
|
{
|
2017-11-03 09:00:35 -07:00
|
|
|
int index = thread_index_get(thread);
|
|
|
|
|
|
|
|
if (index != -1) {
|
|
|
|
sys_bitfield_set_bit((mem_addr_t)&ko->perms, index);
|
2017-08-30 14:31:03 -07:00
|
|
|
}
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
|
2017-10-09 14:47:55 -07:00
|
|
|
void _thread_perms_clear(struct _k_object *ko, struct k_thread *thread)
|
|
|
|
{
|
2017-11-03 09:00:35 -07:00
|
|
|
int index = thread_index_get(thread);
|
|
|
|
|
|
|
|
if (index != -1) {
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key = irq_lock();
|
2018-04-13 14:44:00 -07:00
|
|
|
|
2017-11-03 09:00:35 -07:00
|
|
|
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, index);
|
2018-04-13 14:44:00 -07:00
|
|
|
unref_check(ko);
|
|
|
|
irq_unlock(key);
|
2017-10-09 14:47:55 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-13 13:57:07 -07:00
|
|
|
static void clear_perms_cb(struct _k_object *ko, void *ctx_ptr)
|
|
|
|
{
|
|
|
|
int id = (int)ctx_ptr;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key = irq_lock();
|
2017-10-13 13:57:07 -07:00
|
|
|
|
|
|
|
sys_bitfield_clear_bit((mem_addr_t)&ko->perms, id);
|
2018-04-13 14:44:00 -07:00
|
|
|
unref_check(ko);
|
|
|
|
irq_unlock(key);
|
2017-10-13 13:57:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void _thread_perms_all_clear(struct k_thread *thread)
|
|
|
|
{
|
2017-11-03 09:00:35 -07:00
|
|
|
int index = thread_index_get(thread);
|
|
|
|
|
|
|
|
if (index != -1) {
|
|
|
|
_k_object_wordlist_foreach(clear_perms_cb, (void *)index);
|
2017-10-13 13:57:07 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-10 09:31:32 -07:00
|
|
|
static int thread_perms_test(struct _k_object *ko)
|
2017-08-22 13:15:23 -07:00
|
|
|
{
|
2017-11-03 09:00:35 -07:00
|
|
|
int index;
|
|
|
|
|
2017-10-13 13:57:07 -07:00
|
|
|
if (ko->flags & K_OBJ_FLAG_PUBLIC) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-11-03 09:00:35 -07:00
|
|
|
index = thread_index_get(_current);
|
|
|
|
if (index != -1) {
|
|
|
|
return sys_bitfield_test_bit((mem_addr_t)&ko->perms, index);
|
2017-08-30 14:31:03 -07:00
|
|
|
}
|
|
|
|
return 0;
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
|
2017-10-10 09:31:32 -07:00
|
|
|
static void dump_permission_error(struct _k_object *ko)
|
|
|
|
{
|
2017-11-03 09:00:35 -07:00
|
|
|
int index = thread_index_get(_current);
|
2017-10-10 09:31:32 -07:00
|
|
|
printk("thread %p (%d) does not have permission on %s %p [",
|
2017-11-03 09:00:35 -07:00
|
|
|
_current, index,
|
2017-10-10 09:31:32 -07:00
|
|
|
otype_to_str(ko->type), ko->name);
|
|
|
|
for (int i = CONFIG_MAX_THREAD_BYTES - 1; i >= 0; i--) {
|
|
|
|
printk("%02x", ko->perms[i]);
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
2017-10-10 09:31:32 -07:00
|
|
|
printk("]\n");
|
|
|
|
}
|
2017-08-22 13:15:23 -07:00
|
|
|
|
2017-10-10 09:31:32 -07:00
|
|
|
void _dump_object_error(int retval, void *obj, struct _k_object *ko,
|
|
|
|
enum k_objects otype)
|
|
|
|
{
|
|
|
|
switch (retval) {
|
|
|
|
case -EBADF:
|
|
|
|
printk("%p is not a valid %s\n", obj, otype_to_str(otype));
|
|
|
|
break;
|
|
|
|
case -EPERM:
|
|
|
|
dump_permission_error(ko);
|
|
|
|
break;
|
|
|
|
case -EINVAL:
|
|
|
|
printk("%p used before initialization\n", obj);
|
|
|
|
break;
|
2017-10-15 14:22:08 -07:00
|
|
|
case -EADDRINUSE:
|
|
|
|
printk("%p %s in use\n", obj, otype_to_str(otype));
|
2018-09-10 22:54:55 -07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Not handled error */
|
|
|
|
break;
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
2017-10-04 12:10:32 -07:00
|
|
|
}
|
|
|
|
|
2017-10-04 12:25:50 -07:00
|
|
|
void _impl_k_object_access_grant(void *object, struct k_thread *thread)
|
2017-10-04 12:10:32 -07:00
|
|
|
{
|
2017-10-10 09:31:32 -07:00
|
|
|
struct _k_object *ko = _k_object_find(object);
|
2017-10-04 12:10:32 -07:00
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko != NULL) {
|
2017-10-10 09:31:32 -07:00
|
|
|
_thread_perms_set(ko, thread);
|
2017-10-04 12:10:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 13:15:28 -07:00
|
|
|
void k_object_access_revoke(void *object, struct k_thread *thread)
|
2017-10-09 14:47:55 -07:00
|
|
|
{
|
|
|
|
struct _k_object *ko = _k_object_find(object);
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko != NULL) {
|
2017-10-09 14:47:55 -07:00
|
|
|
_thread_perms_clear(ko, thread);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-13 13:15:28 -07:00
|
|
|
void _impl_k_object_release(void *object)
|
|
|
|
{
|
|
|
|
k_object_access_revoke(object, _current);
|
|
|
|
}
|
|
|
|
|
2017-10-14 14:42:23 -07:00
|
|
|
void k_object_access_all_grant(void *object)
|
2017-10-04 12:10:32 -07:00
|
|
|
{
|
2017-10-10 09:31:32 -07:00
|
|
|
struct _k_object *ko = _k_object_find(object);
|
2017-10-04 12:10:32 -07:00
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko != NULL) {
|
2017-10-13 13:57:07 -07:00
|
|
|
ko->flags |= K_OBJ_FLAG_PUBLIC;
|
2017-10-04 12:10:32 -07:00
|
|
|
}
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
|
2017-10-15 14:22:08 -07:00
|
|
|
int _k_object_validate(struct _k_object *ko, enum k_objects otype,
|
|
|
|
enum _obj_init_check init)
|
2017-08-22 13:15:23 -07:00
|
|
|
{
|
2017-10-15 14:22:08 -07:00
|
|
|
if (unlikely(!ko || (otype != K_OBJ_ANY && ko->type != otype))) {
|
2017-08-22 13:15:23 -07:00
|
|
|
return -EBADF;
|
|
|
|
}
|
|
|
|
|
kernel: policy change for uninitailized objects
The old policy was that objects that are not marked as initialized may
be claimed by any thread, user or kernel.
This has some undesirable implications:
- Kernel objects that were initailized at build time via some
_<object name>_INITIALIZER macro, not intended for userspace to ever
use, could be 'stolen' if their memory addresses were figured out and
_k_object_init() was never called on them.
- In general, a malicious thread could initialize all unclaimed objects
it could find, resulting in denial of service for the threads that
these objects were intended for.
Now, performing any operation in user mode on a kernel object,
initialized or not, required that the calling user thread have
permission on it. Such permission would have to be explicitly granted or
inherited from a supervisor thread, as with this change only supervisor
thread will be able to claim uninitialized objects in this way.
If an uninitialized kernel object has permissions granted to multiple
threads, whatever thread actually initializes the object will reset all
permission bits to zero and grant only the calling thread access to that
object.
In other words, granting access to an uninitialized object to several
threads means that "whichever of these threads (or any kernel thread)
who actually initializes this object will obtain exclusive access to
that object, which it then may grant to other threads as it sees fit."
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-10-09 12:46:25 -07:00
|
|
|
/* Manipulation of any kernel objects by a user thread requires that
|
|
|
|
* thread be granted access first, even for uninitialized objects
|
2017-08-22 13:15:23 -07:00
|
|
|
*/
|
2017-10-15 14:22:08 -07:00
|
|
|
if (unlikely(!thread_perms_test(ko))) {
|
2017-08-22 13:15:23 -07:00
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2017-10-15 14:22:08 -07:00
|
|
|
/* Initialization state checks. _OBJ_INIT_ANY, we don't care */
|
|
|
|
if (likely(init == _OBJ_INIT_TRUE)) {
|
|
|
|
/* Object MUST be intialized */
|
|
|
|
if (unlikely(!(ko->flags & K_OBJ_FLAG_INITIALIZED))) {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else if (init < _OBJ_INIT_TRUE) { /* _OBJ_INIT_FALSE case */
|
|
|
|
/* Object MUST NOT be initialized */
|
|
|
|
if (unlikely(ko->flags & K_OBJ_FLAG_INITIALIZED)) {
|
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
2017-08-22 13:15:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void _k_object_init(void *object)
|
|
|
|
{
|
|
|
|
struct _k_object *ko;
|
|
|
|
|
|
|
|
/* By the time we get here, if the caller was from userspace, all the
|
|
|
|
* necessary checks have been done in _k_object_validate(), which takes
|
|
|
|
* place before the object is initialized.
|
|
|
|
*
|
|
|
|
* This function runs after the object has been initialized and
|
|
|
|
* finalizes it
|
|
|
|
*/
|
|
|
|
|
|
|
|
ko = _k_object_find(object);
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko == NULL) {
|
2017-08-22 13:15:23 -07:00
|
|
|
/* Supervisor threads can ignore rules about kernel objects
|
|
|
|
* and may declare them on stacks, etc. Such objects will never
|
|
|
|
* be usable from userspace, but we shouldn't explode.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-10 09:31:32 -07:00
|
|
|
/* Allows non-initialization system calls to be made on this object */
|
2017-08-22 13:15:23 -07:00
|
|
|
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
|
2018-07-31 14:39:11 -07:00
|
|
|
void _k_object_recycle(void *object)
|
|
|
|
{
|
|
|
|
struct _k_object *ko = _k_object_find(object);
|
|
|
|
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko != NULL) {
|
2018-09-11 19:09:03 -07:00
|
|
|
(void)memset(ko->perms, 0, sizeof(ko->perms));
|
2018-07-31 14:39:11 -07:00
|
|
|
_thread_perms_set(ko, k_current_get());
|
|
|
|
ko->flags |= K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-05 12:21:36 -07:00
|
|
|
void _k_object_uninit(void *object)
|
|
|
|
{
|
|
|
|
struct _k_object *ko;
|
|
|
|
|
|
|
|
/* See comments in _k_object_init() */
|
|
|
|
ko = _k_object_find(object);
|
2018-09-17 09:39:51 -07:00
|
|
|
if (ko == NULL) {
|
2017-10-05 12:21:36 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ko->flags &= ~K_OBJ_FLAG_INITIALIZED;
|
|
|
|
}
|
|
|
|
|
2018-06-22 14:31:51 -07:00
|
|
|
/*
|
|
|
|
* Copy to/from helper functions used in syscall handlers
|
|
|
|
*/
|
|
|
|
void *z_user_alloc_from_copy(void *src, size_t size)
|
|
|
|
{
|
|
|
|
void *dst = NULL;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2018-06-22 14:31:51 -07:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
/* Does the caller in user mode have access to read this memory? */
|
|
|
|
if (Z_SYSCALL_MEMORY_READ(src, size)) {
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = z_thread_malloc(size);
|
2018-09-17 09:39:51 -07:00
|
|
|
if (dst == NULL) {
|
2018-06-22 14:31:51 -07:00
|
|
|
printk("out of thread resource pool memory (%zu)", size);
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2018-08-13 15:17:04 -07:00
|
|
|
(void)memcpy(dst, src, size);
|
2018-06-22 14:31:51 -07:00
|
|
|
out_err:
|
|
|
|
irq_unlock(key);
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int user_copy(void *dst, void *src, size_t size, bool to_user)
|
|
|
|
{
|
|
|
|
int ret = EFAULT;
|
2018-08-14 17:57:08 -07:00
|
|
|
unsigned int key;
|
2018-06-22 14:31:51 -07:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
|
|
|
|
/* Does the caller in user mode have access to this memory? */
|
|
|
|
if (to_user ? Z_SYSCALL_MEMORY_WRITE(dst, size) :
|
|
|
|
Z_SYSCALL_MEMORY_READ(src, size)) {
|
|
|
|
goto out_err;
|
|
|
|
}
|
|
|
|
|
2018-08-13 15:17:04 -07:00
|
|
|
(void)memcpy(dst, src, size);
|
2018-06-22 14:31:51 -07:00
|
|
|
ret = 0;
|
|
|
|
out_err:
|
|
|
|
irq_unlock(key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int z_user_from_copy(void *dst, void *src, size_t size)
|
|
|
|
{
|
|
|
|
return user_copy(dst, src, size, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
int z_user_to_copy(void *dst, void *src, size_t size)
|
|
|
|
{
|
|
|
|
return user_copy(dst, src, size, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
char *z_user_string_alloc_copy(char *src, size_t maxlen)
|
|
|
|
{
|
|
|
|
unsigned long actual_len;
|
2018-08-14 17:57:08 -07:00
|
|
|
int err;
|
|
|
|
unsigned int key;
|
2018-06-22 14:31:51 -07:00
|
|
|
char *ret = NULL;
|
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
actual_len = z_user_string_nlen(src, maxlen, &err);
|
|
|
|
if (err) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (actual_len == maxlen) {
|
|
|
|
/* Not NULL terminated */
|
|
|
|
printk("string too long %p (%lu)\n", src, actual_len);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (__builtin_uaddl_overflow(actual_len, 1, &actual_len)) {
|
|
|
|
printk("overflow\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = z_user_alloc_from_copy(src, actual_len);
|
|
|
|
out:
|
|
|
|
irq_unlock(key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int z_user_string_copy(char *dst, char *src, size_t maxlen)
|
|
|
|
{
|
|
|
|
unsigned long actual_len;
|
2018-08-14 17:57:08 -07:00
|
|
|
int ret, err;
|
|
|
|
unsigned int key;
|
2018-06-22 14:31:51 -07:00
|
|
|
|
|
|
|
key = irq_lock();
|
|
|
|
actual_len = z_user_string_nlen(src, maxlen, &err);
|
|
|
|
if (err) {
|
|
|
|
ret = EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (actual_len == maxlen) {
|
|
|
|
/* Not NULL terminated */
|
|
|
|
printk("string too long %p (%lu)\n", src, actual_len);
|
|
|
|
ret = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (__builtin_uaddl_overflow(actual_len, 1, &actual_len)) {
|
|
|
|
printk("overflow\n");
|
|
|
|
ret = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = z_user_from_copy(dst, src, actual_len);
|
|
|
|
out:
|
|
|
|
irq_unlock(key);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default handlers if otherwise unimplemented
|
|
|
|
*/
|
|
|
|
|
2018-03-06 15:08:55 -08:00
|
|
|
static u32_t handler_bad_syscall(u32_t bad_id, u32_t arg2, u32_t arg3,
|
2017-09-19 09:59:42 -07:00
|
|
|
u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
|
2017-09-08 12:10:12 -07:00
|
|
|
{
|
userspace: flesh out internal syscall interface
* Instead of a common system call entry function, we instead create a
table mapping system call ids to handler skeleton functions which are
invoked directly by the architecture code which receives the system
call.
* system call handler prototype specified. All but the most trivial
system calls will implement one of these. They validate all the
arguments, including verifying kernel/device object pointers, ensuring
that the calling thread has appropriate access to any memory buffers
passed in, and performing other parameter checks that the base system
call implementation does not check, or only checks with __ASSERT().
It's only possible to install a system call implementation directly
inside this table if the implementation has a return value and requires
no validation of any of its arguments.
A sample handler implementation for k_mutex_unlock() might look like:
u32_t _syscall_k_mutex_unlock(u32_t mutex_arg, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, void *ssf)
{
struct k_mutex *mutex = (struct k_mutex *)mutex_arg;
_SYSCALL_ARG1;
_SYSCALL_IS_OBJ(mutex, K_OBJ_MUTEX, 0, ssf);
_SYSCALL_VERIFY(mutex->lock_count > 0, ssf);
_SYSCALL_VERIFY(mutex->owner == _current, ssf);
k_mutex_unlock(mutex);
return 0;
}
* the x86 port modified to work with the system call table instead of
calling a common handler function. fixed an issue where registers being
changed could confuse the compiler has been fixed; all registers, even
ones used for parameters, must be preserved across the system call.
* a new arch API for producing a kernel oops when validating system call
arguments added. The debug information reported will be from the system
call site and not inside the handler function.
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
2017-09-13 18:04:21 -07:00
|
|
|
printk("Bad system call id %u invoked\n", bad_id);
|
|
|
|
_arch_syscall_oops(ssf);
|
|
|
|
CODE_UNREACHABLE;
|
2017-09-08 12:10:12 -07:00
|
|
|
}
|
|
|
|
|
2018-03-06 15:08:55 -08:00
|
|
|
static u32_t handler_no_syscall(u32_t arg1, u32_t arg2, u32_t arg3,
|
2017-09-28 16:54:35 -07:00
|
|
|
u32_t arg4, u32_t arg5, u32_t arg6, void *ssf)
|
|
|
|
{
|
|
|
|
printk("Unimplemented system call\n");
|
|
|
|
_arch_syscall_oops(ssf);
|
|
|
|
CODE_UNREACHABLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#include <syscall_dispatch.c>
|
|
|
|
|