userspace: add support for dynamic kernel objects
A red-black tree is maintained containing the metadata for all dynamically created kernel objects, which are allocated out of the system heap. Currently, k_object_alloc() and k_object_free() are supervisor-only. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
513488c937
commit
31bdfc014e
7 changed files with 254 additions and 3 deletions
|
@ -106,6 +106,15 @@ config MAX_THREAD_BYTES
|
|||
bitfield (in bytes) and imposes a limit on how many threads can
|
||||
be created in the system.
|
||||
|
||||
config DYNAMIC_OBJECTS
|
||||
bool "Allow kernel objects to be requested on system heap"
|
||||
default n
|
||||
depends on USERSPACE
|
||||
depends on HEAP_MEM_POOL_SIZE > 0
|
||||
help
|
||||
Enabling this option allows for kernel objects to be requested from
|
||||
the system heap, at a cost in performance and additional memory.
|
||||
|
||||
config SIMPLE_FATAL_ERROR_HANDLER
|
||||
prompt "Simple system fatal error handler"
|
||||
bool
|
||||
|
|
|
@ -299,6 +299,35 @@ __syscall void k_object_access_revoke(void *object, struct k_thread *thread);
|
|||
*/
|
||||
void k_object_access_all_grant(void *object);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_OBJECTS
|
||||
/**
|
||||
* Allocate a kernel object of a designated type
|
||||
*
|
||||
* This will instantiate at runtime a kernel object of the specified type,
|
||||
* returning a pointer to it. The object will be returned in an uninitialized
|
||||
* state, with the calling thread being granted permission on it. The memory
|
||||
* for the object will be allocated out of the kernel's heap.
|
||||
*
|
||||
* Currently, allocation of thread stacks is not supported.
|
||||
*
|
||||
* @param otype Requested kernel object type
|
||||
* @return A pointer to the allocated kernel object, or NULL if memory wasn't
|
||||
* available
|
||||
*/
|
||||
void *k_object_alloc(enum k_objects otype);
|
||||
|
||||
/**
|
||||
* Free a kernel object previously allocated with k_object_alloc()
|
||||
*
|
||||
* This will return memory for a kernel object back to the system heap.
|
||||
* Care must be exercised that the object will not be used during or after
|
||||
* when this call is made.
|
||||
*
|
||||
* @param obj Pointer to the kernel object memory address.
|
||||
*/
|
||||
void k_object_free(void *obj);
|
||||
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
||||
|
||||
/* Using typedef deliberately here, this is quite intended to be an opaque
|
||||
* type. K_THREAD_STACK_BUFFER() should be used to access the data within.
|
||||
*
|
||||
|
|
|
@ -18,8 +18,13 @@
|
|||
*(".kobject_data.text*")
|
||||
_kobject_text_area_end = .;
|
||||
#ifndef LINKER_PASS2
|
||||
#ifdef CONFIG_DYNAMIC_OBJECTS
|
||||
PROVIDE(_k_object_gperf_find = .);
|
||||
PROVIDE(_k_object_gperf_wordlist_foreach = .);
|
||||
#else
|
||||
PROVIDE(_k_object_find = .);
|
||||
PROVIDE(_k_object_wordlist_foreach = .);
|
||||
#endif
|
||||
#endif
|
||||
. += KOBJECT_TEXT_AREA - (_kobject_text_area_end - _kobject_text_area_start);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
|
@ -8,11 +8,15 @@
|
|||
#include <kernel.h>
|
||||
#include <string.h>
|
||||
#include <misc/printk.h>
|
||||
#include <misc/rb.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <sys_io.h>
|
||||
#include <ksched.h>
|
||||
#include <syscall.h>
|
||||
#include <syscall_handler.h>
|
||||
#include <device.h>
|
||||
#include <init.h>
|
||||
#include <logging/sys_log.h>
|
||||
|
||||
#define MAX_THREAD_BITS (CONFIG_MAX_THREAD_BYTES * 8)
|
||||
|
||||
|
@ -94,6 +98,185 @@ struct perm_ctx {
|
|||
struct k_thread *parent;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_OBJECTS
|
||||
struct dyn_obj {
|
||||
struct _k_object kobj;
|
||||
struct rbnode node; /* must be immediately before data member */
|
||||
u8_t data[]; /* The object itself */
|
||||
};
|
||||
|
||||
struct visit_ctx {
|
||||
_wordlist_cb_func_t func;
|
||||
void *original_context;
|
||||
};
|
||||
|
||||
extern struct _k_object *_k_object_gperf_find(void *obj);
|
||||
extern void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
||||
void *context);
|
||||
|
||||
static int node_lessthan(struct rbnode *a, struct rbnode *b);
|
||||
|
||||
static struct rbtree obj_rb_tree = {
|
||||
.lessthan_fn = node_lessthan
|
||||
};
|
||||
|
||||
/* TODO: incorporate auto-gen with Leandro's patch */
|
||||
static size_t obj_size_get(enum k_objects otype)
|
||||
{
|
||||
switch (otype) {
|
||||
case K_OBJ_ALERT:
|
||||
return sizeof(struct k_alert);
|
||||
case K_OBJ_MSGQ:
|
||||
return sizeof(struct k_msgq);
|
||||
case K_OBJ_MUTEX:
|
||||
return sizeof(struct k_mutex);
|
||||
case K_OBJ_PIPE:
|
||||
return sizeof(struct k_pipe);
|
||||
case K_OBJ_SEM:
|
||||
return sizeof(struct k_sem);
|
||||
case K_OBJ_STACK:
|
||||
return sizeof(struct k_stack);
|
||||
case K_OBJ_THREAD:
|
||||
return sizeof(struct k_thread);
|
||||
case K_OBJ_TIMER:
|
||||
return sizeof(struct k_timer);
|
||||
default:
|
||||
return sizeof(struct device);
|
||||
}
|
||||
}
|
||||
|
||||
static int node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||
{
|
||||
return a < b;
|
||||
}
|
||||
|
||||
static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
|
||||
{
|
||||
return CONTAINER_OF(node, struct dyn_obj, node);
|
||||
}
|
||||
|
||||
static struct dyn_obj *dyn_object_find(void *obj)
|
||||
{
|
||||
struct rbnode *node;
|
||||
struct dyn_obj *ret;
|
||||
int key;
|
||||
|
||||
/* For any dynamically allocated kernel object, the object
|
||||
* pointer is just a member of the conatining struct dyn_obj,
|
||||
* so just a little arithmetic is necessary to locate the
|
||||
* corresponding struct rbnode
|
||||
*/
|
||||
node = (struct rbnode *)((char *)obj - sizeof(struct rbnode));
|
||||
|
||||
key = irq_lock();
|
||||
if (rb_contains(&obj_rb_tree, node)) {
|
||||
ret = node_to_dyn_obj(node);
|
||||
} else {
|
||||
ret = NULL;
|
||||
}
|
||||
irq_unlock(key);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *k_object_alloc(enum k_objects otype)
|
||||
{
|
||||
struct dyn_obj *dyn_obj;
|
||||
int key;
|
||||
|
||||
/* Stacks are not supported, we don't yet have mem pool APIs
|
||||
* to request memory that is aligned
|
||||
*/
|
||||
__ASSERT(otype > K_OBJ_ANY && otype < K_OBJ_LAST &&
|
||||
otype != K_OBJ__THREAD_STACK_ELEMENT,
|
||||
"bad object type requested");
|
||||
|
||||
dyn_obj = k_malloc(sizeof(*dyn_obj) + obj_size_get(otype));
|
||||
if (!dyn_obj) {
|
||||
SYS_LOG_WRN("could not allocate kernel object");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dyn_obj->kobj.name = (char *)&dyn_obj->data;
|
||||
dyn_obj->kobj.type = otype;
|
||||
dyn_obj->kobj.flags = 0;
|
||||
memset(dyn_obj->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
|
||||
|
||||
/* The allocating thread implicitly gets permission on kernel objects
|
||||
* that it allocates
|
||||
*/
|
||||
_thread_perms_set(&dyn_obj->kobj, _current);
|
||||
|
||||
key = irq_lock();
|
||||
rb_insert(&obj_rb_tree, &dyn_obj->node);
|
||||
irq_unlock(key);
|
||||
|
||||
return dyn_obj->kobj.name;
|
||||
}
|
||||
|
||||
void k_object_free(void *obj)
|
||||
{
|
||||
struct dyn_obj *dyn_obj;
|
||||
int key;
|
||||
|
||||
/* This function is intentionally not exposed to user mode.
|
||||
* There's currently no robust way to track that an object isn't
|
||||
* being used by some other thread
|
||||
*/
|
||||
|
||||
key = irq_lock();
|
||||
dyn_obj = dyn_object_find(obj);
|
||||
if (dyn_obj) {
|
||||
rb_remove(&obj_rb_tree, &dyn_obj->node);
|
||||
}
|
||||
irq_unlock(key);
|
||||
|
||||
if (dyn_obj) {
|
||||
k_free(dyn_obj);
|
||||
}
|
||||
}
|
||||
|
||||
struct _k_object *_k_object_find(void *obj)
|
||||
{
|
||||
struct _k_object *ret;
|
||||
|
||||
ret = _k_object_gperf_find(obj);
|
||||
|
||||
if (!ret) {
|
||||
struct dyn_obj *dyn_obj;
|
||||
|
||||
dyn_obj = dyn_object_find(obj);
|
||||
if (dyn_obj) {
|
||||
ret = &dyn_obj->kobj;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void visit_fn(struct rbnode *node, void *context)
|
||||
{
|
||||
struct visit_ctx *vctx = context;
|
||||
|
||||
vctx->func(&node_to_dyn_obj(node)->kobj, vctx->original_context);
|
||||
}
|
||||
|
||||
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||
{
|
||||
struct visit_ctx vctx;
|
||||
int key;
|
||||
|
||||
_k_object_gperf_wordlist_foreach(func, context);
|
||||
|
||||
vctx.func = func;
|
||||
vctx.original_context = context;
|
||||
|
||||
key = irq_lock();
|
||||
rb_walk(&obj_rb_tree, visit_fn, &vctx);
|
||||
irq_unlock(key);
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_OBJECTS */
|
||||
|
||||
static int thread_index_get(struct k_thread *t)
|
||||
{
|
||||
struct _k_object *ko;
|
||||
|
|
|
@ -52,6 +52,7 @@ header = """%compare-lengths
|
|||
%struct-type
|
||||
%{
|
||||
#include <kernel.h>
|
||||
#include <toolchain.h>
|
||||
#include <syscall_handler.h>
|
||||
#include <string.h>
|
||||
%}
|
||||
|
@ -65,12 +66,12 @@ struct _k_object;
|
|||
# turned into a string, we told gperf to expect binary strings that are not
|
||||
# NULL-terminated.
|
||||
footer = """%%
|
||||
struct _k_object *_k_object_find(void *obj)
|
||||
struct _k_object *_k_object_gperf_find(void *obj)
|
||||
{
|
||||
return _k_object_lookup((const char *)obj, sizeof(void *));
|
||||
}
|
||||
|
||||
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||
void _k_object_gperf_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -80,6 +81,14 @@ void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_OBJECTS
|
||||
struct _k_object *_k_object_find(void *obj)
|
||||
ALIAS_OF(_k_object_gperf_find);
|
||||
|
||||
void _k_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||
ALIAS_OF(_k_object_gperf_wordlist_foreach);
|
||||
#endif
|
||||
"""
|
||||
|
||||
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
CONFIG_ZTEST=y
|
||||
CONFIG_USERSPACE=y
|
||||
CONFIG_DYNAMIC_OBJECTS=y
|
||||
CONFIG_HEAP_MEM_POOL_SIZE=8192
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
#define SEM_ARRAY_SIZE 16
|
||||
|
||||
static __kernel struct k_sem semarray[SEM_ARRAY_SIZE];
|
||||
static struct k_sem *dyn_sem[SEM_ARRAY_SIZE];
|
||||
|
||||
K_SEM_DEFINE(sem1, 0, 1);
|
||||
static __kernel struct k_sem sem2;
|
||||
static __kernel char bad_sem[sizeof(struct k_sem)];
|
||||
|
@ -31,7 +33,8 @@ static int test_object(struct k_sem *sem, int retval)
|
|||
}
|
||||
|
||||
if (ret != retval) {
|
||||
TC_PRINT("FAIL check of %p is not %d\n", sem, retval);
|
||||
TC_PRINT("FAIL check of %p is not %d, got %d instead\n", sem,
|
||||
retval, ret);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
@ -75,6 +78,17 @@ void test_generic_object(void)
|
|||
|
||||
for (int i = 0; i < SEM_ARRAY_SIZE; i++) {
|
||||
object_permission_checks(&semarray[i], false);
|
||||
dyn_sem[i] = k_object_alloc(K_OBJ_SEM);
|
||||
}
|
||||
|
||||
/* dynamic object table well-populated with semaphores at this point */
|
||||
for (int i = 0; i < SEM_ARRAY_SIZE; i++) {
|
||||
/* Should have permission granted but be uninitialized */
|
||||
zassert_false(test_object(dyn_sem[i], -EINVAL), NULL);
|
||||
k_object_access_revoke(dyn_sem[i], k_current_get());
|
||||
object_permission_checks(dyn_sem[i], false);
|
||||
k_object_free(dyn_sem[i]);
|
||||
zassert_false(test_object(dyn_sem[i], -EBADF), NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue