kernel: userspace: Dynamic thread stack object

Add support for dynamic thread stack objects. A new container
for this kernel object was added to avoid its alignment constraint
to all dynamic objects.

Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
Flavio Ceolin 2023-06-23 09:34:14 -07:00 committed by Chris Friedt
commit 3b7e0b672e
4 changed files with 150 additions and 54 deletions

View file

@ -49,6 +49,25 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
#ifdef CONFIG_DYNAMIC_OBJECTS
static struct k_spinlock lists_lock; /* kobj rbtree/dlist */
static struct k_spinlock objfree_lock; /* k_object_free */
#ifdef CONFIG_GEN_PRIV_STACKS
/* On ARM MPU we may have two different alignment requirement
* when dynamically allocating thread stacks, one for the privileged
* stack and other for the user stack, so we need to account the
* worst alignment scenario and reserve space for that.
*/
#ifdef CONFIG_ARM_MPU
#define STACK_ELEMENT_DATA_SIZE(size) \
(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
Z_THREAD_STACK_OBJ_ALIGN(size) + Z_THREAD_STACK_SIZE_ADJUST(size))
#else
#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
Z_THREAD_STACK_SIZE_ADJUST(size))
#endif /* CONFIG_ARM_MPU */
#else
#define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size)
#endif /* CONFIG_GEN_PRIV_STACKS */
#endif
static struct k_spinlock obj_lock; /* kobj struct data */
@ -130,18 +149,45 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
#endif
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
#ifndef CONFIG_MPU_STACK_GUARD
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
#endif /* !CONFIG_MPU_STACK_GUARD */
#else
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
#define DYN_OBJ_DATA_ALIGN \
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
struct dyn_obj {
struct dyn_obj_base {
struct z_object kobj;
sys_dnode_t dobj_list;
struct rbnode node; /* must be immediately before data member */
};
struct dyn_obj {
struct dyn_obj_base base;
/* The object itself */
uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
};
/* Thread stacks impose a very restrict alignment. Use this alignment
* (generally page size) for all objects will cause a lot waste memory.
*/
struct dyn_obj_stack {
struct dyn_obj_base base;
/* The object itself */
void *data;
};
extern struct z_object *z_object_gperf_find(const void *obj);
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
void *context);
@ -193,6 +239,9 @@ static size_t obj_align_get(enum k_objects otype)
ret = __alignof(struct dyn_obj);
#endif
break;
case K_OBJ_THREAD_STACK_ELEMENT:
ret = __alignof(struct dyn_obj_stack);
break;
default:
ret = __alignof(struct dyn_obj);
break;
@ -206,36 +255,30 @@ static bool node_lessthan(struct rbnode *a, struct rbnode *b)
return a < b;
}
static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
{
return CONTAINER_OF(node, struct dyn_obj, node);
}
static inline struct rbnode *dyn_obj_to_node(void *obj)
{
struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
return &dobj->node;
}
static struct dyn_obj *dyn_object_find(void *obj)
static struct dyn_obj_base *dyn_object_find(void *obj)
{
struct rbnode *node;
struct dyn_obj *ret;
struct dyn_obj_base *ret;
k_spinlock_key_t key;
/* For any dynamically allocated kernel object, the object
* pointer is just a member of the containing struct dyn_obj,
* so just a little arithmetic is necessary to locate the
* corresponding struct rbnode
*/
node = dyn_obj_to_node(obj);
key = k_spin_lock(&lists_lock);
k_spinlock_key_t key = k_spin_lock(&lists_lock);
if (rb_contains(&obj_rb_tree, node)) {
ret = node_to_dyn_obj(node);
} else {
ret = NULL;
RB_FOR_EACH(&obj_rb_tree, node) {
ret = CONTAINER_OF(node, struct dyn_obj_base, node);
if (ret->kobj.name == obj) {
goto end;
}
}
/* No object found */
ret = NULL;
end:
k_spin_unlock(&lists_lock, key);
return ret;
@ -304,18 +347,61 @@ static void thread_idx_free(uintptr_t tidx)
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
}
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
static struct z_object *dynamic_object_create(enum k_objects otype, size_t align,
size_t size)
{
struct dyn_obj *dyn;
struct dyn_obj_base *dyn;
dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
if (dyn == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
return NULL;
if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
struct dyn_obj_stack *stack;
size_t adjusted_size;
if (size == 0) {
return NULL;
}
adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
stack = z_thread_aligned_alloc(align, sizeof(struct dyn_obj_stack));
if (stack == NULL) {
return NULL;
}
dyn = &stack->base;
stack->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
adjusted_size);
if (stack->data == NULL) {
k_free(stack);
return NULL;
}
#ifdef CONFIG_GEN_PRIV_STACKS
struct z_stack_data *stack_data = (struct z_stack_data *)
((uint8_t *)stack->data + adjusted_size - sizeof(*stack_data));
stack_data->priv = (uint8_t *)stack->data;
dyn->kobj.data.stack_data = stack_data;
#ifdef CONFIG_ARM_MPU
dyn->kobj.name = (void *)ROUND_UP(
((uint8_t *)stack->data + CONFIG_PRIVILEGED_STACK_SIZE),
Z_THREAD_STACK_OBJ_ALIGN(size));
#else
dyn->kobj.name = stack->data;
#endif
#else
dyn->kobj.name = stack->data;
#endif
} else {
struct dyn_obj *obj;
obj = z_thread_aligned_alloc(align,
sizeof(struct dyn_obj) + obj_size_get(otype) + size);
if (obj == NULL) {
return NULL;
}
dyn = &obj->base;
dyn->kobj.name = &obj->data;
}
dyn->kobj.name = &dyn->data;
dyn->kobj.type = K_OBJ_ANY;
dyn->kobj.type = otype;
dyn->kobj.flags = 0;
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
@ -328,7 +414,18 @@ struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
return &dyn->kobj;
}
static void *z_object_alloc(enum k_objects otype, size_t object_size)
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
{
struct z_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
if (obj == NULL) {
LOG_ERR("could not allocate kernel object, out of memory");
}
return obj;
}
static void *z_object_alloc(enum k_objects otype, size_t size)
{
struct z_object *zo;
uintptr_t tidx = 0;
@ -348,8 +445,6 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
/* The following are currently not allowed at all */
case K_OBJ_FUTEX: /* Lives in user memory */
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
case K_OBJ_THREAD_STACK_ELEMENT:
break;
case K_OBJ_NET_SOCKET: /* Indeterminate size */
LOG_ERR("forbidden object type '%s' requested",
otype_to_str(otype));
@ -359,15 +454,13 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
break;
}
zo = z_dynamic_object_aligned_create(obj_align_get(otype),
object_size);
zo = dynamic_object_create(otype, obj_align_get(otype), size);
if (zo == NULL) {
if (otype == K_OBJ_THREAD) {
thread_idx_free(tidx);
}
return NULL;
}
zo->type = otype;
if (otype == K_OBJ_THREAD) {
zo->data.thread_id = tidx;
@ -388,7 +481,7 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
void *z_impl_k_object_alloc(enum k_objects otype)
{
return z_object_alloc(otype, obj_size_get(otype));
return z_object_alloc(otype, 0);
}
void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
@ -398,7 +491,7 @@ void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
void k_object_free(void *obj)
{
struct dyn_obj *dyn;
struct dyn_obj_base *dyn;
/* This function is intentionally not exposed to user mode.
* There's currently no robust way to track that an object isn't
@ -430,15 +523,15 @@ struct z_object *z_object_find(const void *obj)
ret = z_object_gperf_find(obj);
if (ret == NULL) {
struct dyn_obj *dynamic_obj;
struct dyn_obj_base *dyn;
/* The cast to pointer-to-non-const violates MISRA
* 11.8 but is justified since we know dynamic objects
* were not declared with a const qualifier.
*/
dynamic_obj = dyn_object_find((void *)obj);
if (dynamic_obj != NULL) {
ret = &dynamic_obj->kobj;
dyn = dyn_object_find((void *)obj);
if (dyn != NULL) {
ret = &dyn->kobj;
}
}
@ -447,7 +540,7 @@ struct z_object *z_object_find(const void *obj)
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
{
struct dyn_obj *obj, *next;
struct dyn_obj_base *obj, *next;
z_object_gperf_wordlist_foreach(func, context);
@ -487,7 +580,7 @@ static void unref_check(struct z_object *ko, uintptr_t index)
void *vko = ko;
struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
struct dyn_obj_base *dyn = CONTAINER_OF(vko, struct dyn_obj_base, kobj);
__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");

View file

@ -6,7 +6,7 @@ CONFIG_MAX_THREAD_BYTES=5
CONFIG_DYNAMIC_THREAD=y
CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
CONFIG_DYNAMIC_THREAD_ALLOC=y
CONFIG_HEAP_MEM_POOL_SIZE=16384
CONFIG_HEAP_MEM_POOL_SIZE=20480
CONFIG_ZTEST_STACK_SIZE=2048
CONFIG_MAIN_STACK_SIZE=2048

View file

@ -17,7 +17,7 @@
#define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE)
ZTEST_DMEM bool flag[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
ZTEST_DMEM bool flag[MAX(CONFIG_DYNAMIC_THREAD_POOL_SIZE, MAX_HEAP_STACKS)];
static void func(void *arg1, void *arg2, void *arg3)
{
@ -63,6 +63,7 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool)
/* spawn our threads */
for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
flag[i] = false;
tid[i] = k_thread_create(&th[i], stack[i],
CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
&flag[i], NULL, NULL, 0,
@ -86,7 +87,6 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
{
size_t N;
static k_tid_t tid[MAX_HEAP_STACKS];
static bool flag[MAX_HEAP_STACKS];
static struct k_thread th[MAX_HEAP_STACKS];
static k_thread_stack_t *stack[MAX_HEAP_STACKS];
@ -102,18 +102,17 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
for (N = 0; N < MAX_HEAP_STACKS; ++N) {
stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
zassert_not_null(stack[N]);
}
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE == 0) {
/* ensure that no more thread stacks can be allocated from the heap */
zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0));
if (stack[N] == NULL) {
break;
}
}
/* spwan our threads */
for (size_t i = 0; i < N; ++i) {
tid[i] = k_thread_create(&th[i], stack[i], 0, func, &flag[i], NULL, NULL, 0,
flag[i] = false;
tid[i] = k_thread_create(&th[i], stack[i],
CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
&flag[i], NULL, NULL, 0,
K_USER | K_INHERIT_PERMS, K_NO_WAIT);
}

View file

@ -33,6 +33,7 @@ tests:
# 010
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
- CONFIG_DYNAMIC_THREAD_ALLOC=y
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
- CONFIG_USERSPACE=n
kernel.threads.dynamic_thread.stack.no_pool.alloc.user:
tags: userspace
@ -40,6 +41,7 @@ tests:
# 011
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
- CONFIG_DYNAMIC_THREAD_ALLOC=y
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
- CONFIG_USERSPACE=y
kernel.threads.dynamic_thread.stack.pool.no_alloc.no_user:
extra_configs:
@ -57,6 +59,7 @@ tests:
kernel.threads.dynamic_thread.stack.pool.alloc.no_user:
extra_configs:
# 110
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
- CONFIG_DYNAMIC_THREAD_ALLOC=y
- CONFIG_USERSPACE=n
@ -64,6 +67,7 @@ tests:
tags: userspace
extra_configs:
# 111
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
- CONFIG_DYNAMIC_THREAD_ALLOC=y
- CONFIG_USERSPACE=y