kernel: userspace: Dynamic thread stack object
Add support for dynamic thread stack objects. A new container for this kernel object was added to avoid its alignment constraint to all dynamic objects. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com>
This commit is contained in:
parent
fc6d9eeb3e
commit
3b7e0b672e
4 changed files with 150 additions and 54 deletions
|
@ -49,6 +49,25 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
||||||
#ifdef CONFIG_DYNAMIC_OBJECTS
|
#ifdef CONFIG_DYNAMIC_OBJECTS
|
||||||
static struct k_spinlock lists_lock; /* kobj rbtree/dlist */
|
static struct k_spinlock lists_lock; /* kobj rbtree/dlist */
|
||||||
static struct k_spinlock objfree_lock; /* k_object_free */
|
static struct k_spinlock objfree_lock; /* k_object_free */
|
||||||
|
|
||||||
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||||
|
/* On ARM MPU we may have two different alignment requirement
|
||||||
|
* when dynamically allocating thread stacks, one for the privileged
|
||||||
|
* stack and other for the user stack, so we need to account the
|
||||||
|
* worst alignment scenario and reserve space for that.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_ARM_MPU
|
||||||
|
#define STACK_ELEMENT_DATA_SIZE(size) \
|
||||||
|
(sizeof(struct z_stack_data) + CONFIG_PRIVILEGED_STACK_SIZE + \
|
||||||
|
Z_THREAD_STACK_OBJ_ALIGN(size) + Z_THREAD_STACK_SIZE_ADJUST(size))
|
||||||
|
#else
|
||||||
|
#define STACK_ELEMENT_DATA_SIZE(size) (sizeof(struct z_stack_data) + \
|
||||||
|
Z_THREAD_STACK_SIZE_ADJUST(size))
|
||||||
|
#endif /* CONFIG_ARM_MPU */
|
||||||
|
#else
|
||||||
|
#define STACK_ELEMENT_DATA_SIZE(size) Z_THREAD_STACK_SIZE_ADJUST(size)
|
||||||
|
#endif /* CONFIG_GEN_PRIV_STACKS */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
static struct k_spinlock obj_lock; /* kobj struct data */
|
static struct k_spinlock obj_lock; /* kobj struct data */
|
||||||
|
|
||||||
|
@ -130,18 +149,45 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
|
||||||
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD (sizeof(void *))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
|
||||||
|
#ifndef CONFIG_MPU_STACK_GUARD
|
||||||
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
||||||
|
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_PRIVILEGED_STACK_SIZE)
|
||||||
|
#else
|
||||||
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
||||||
|
Z_THREAD_STACK_OBJ_ALIGN(CONFIG_DYNAMIC_THREAD_STACK_SIZE)
|
||||||
|
#endif /* !CONFIG_MPU_STACK_GUARD */
|
||||||
|
#else
|
||||||
|
#define DYN_OBJ_DATA_ALIGN_K_THREAD_STACK \
|
||||||
|
Z_THREAD_STACK_OBJ_ALIGN(ARCH_STACK_PTR_ALIGN)
|
||||||
|
#endif /* CONFIG_DYNAMIC_THREAD_STACK_SIZE */
|
||||||
|
|
||||||
#define DYN_OBJ_DATA_ALIGN \
|
#define DYN_OBJ_DATA_ALIGN \
|
||||||
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
|
MAX(DYN_OBJ_DATA_ALIGN_K_THREAD, (sizeof(void *)))
|
||||||
|
|
||||||
struct dyn_obj {
|
struct dyn_obj_base {
|
||||||
struct z_object kobj;
|
struct z_object kobj;
|
||||||
sys_dnode_t dobj_list;
|
sys_dnode_t dobj_list;
|
||||||
struct rbnode node; /* must be immediately before data member */
|
struct rbnode node; /* must be immediately before data member */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct dyn_obj {
|
||||||
|
struct dyn_obj_base base;
|
||||||
|
|
||||||
/* The object itself */
|
/* The object itself */
|
||||||
uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
|
uint8_t data[] __aligned(DYN_OBJ_DATA_ALIGN_K_THREAD);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Thread stacks impose a very restrict alignment. Use this alignment
|
||||||
|
* (generally page size) for all objects will cause a lot waste memory.
|
||||||
|
*/
|
||||||
|
struct dyn_obj_stack {
|
||||||
|
struct dyn_obj_base base;
|
||||||
|
|
||||||
|
/* The object itself */
|
||||||
|
void *data;
|
||||||
|
};
|
||||||
|
|
||||||
extern struct z_object *z_object_gperf_find(const void *obj);
|
extern struct z_object *z_object_gperf_find(const void *obj);
|
||||||
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
extern void z_object_gperf_wordlist_foreach(_wordlist_cb_func_t func,
|
||||||
void *context);
|
void *context);
|
||||||
|
@ -193,6 +239,9 @@ static size_t obj_align_get(enum k_objects otype)
|
||||||
ret = __alignof(struct dyn_obj);
|
ret = __alignof(struct dyn_obj);
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
|
case K_OBJ_THREAD_STACK_ELEMENT:
|
||||||
|
ret = __alignof(struct dyn_obj_stack);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
ret = __alignof(struct dyn_obj);
|
ret = __alignof(struct dyn_obj);
|
||||||
break;
|
break;
|
||||||
|
@ -206,36 +255,30 @@ static bool node_lessthan(struct rbnode *a, struct rbnode *b)
|
||||||
return a < b;
|
return a < b;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct dyn_obj *node_to_dyn_obj(struct rbnode *node)
|
static struct dyn_obj_base *dyn_object_find(void *obj)
|
||||||
{
|
|
||||||
return CONTAINER_OF(node, struct dyn_obj, node);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct rbnode *dyn_obj_to_node(void *obj)
|
|
||||||
{
|
|
||||||
struct dyn_obj *dobj = CONTAINER_OF(obj, struct dyn_obj, data);
|
|
||||||
|
|
||||||
return &dobj->node;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct dyn_obj *dyn_object_find(void *obj)
|
|
||||||
{
|
{
|
||||||
struct rbnode *node;
|
struct rbnode *node;
|
||||||
struct dyn_obj *ret;
|
struct dyn_obj_base *ret;
|
||||||
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
/* For any dynamically allocated kernel object, the object
|
/* For any dynamically allocated kernel object, the object
|
||||||
* pointer is just a member of the containing struct dyn_obj,
|
* pointer is just a member of the containing struct dyn_obj,
|
||||||
* so just a little arithmetic is necessary to locate the
|
* so just a little arithmetic is necessary to locate the
|
||||||
* corresponding struct rbnode
|
* corresponding struct rbnode
|
||||||
*/
|
*/
|
||||||
node = dyn_obj_to_node(obj);
|
key = k_spin_lock(&lists_lock);
|
||||||
|
|
||||||
k_spinlock_key_t key = k_spin_lock(&lists_lock);
|
RB_FOR_EACH(&obj_rb_tree, node) {
|
||||||
if (rb_contains(&obj_rb_tree, node)) {
|
ret = CONTAINER_OF(node, struct dyn_obj_base, node);
|
||||||
ret = node_to_dyn_obj(node);
|
if (ret->kobj.name == obj) {
|
||||||
} else {
|
goto end;
|
||||||
ret = NULL;
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* No object found */
|
||||||
|
ret = NULL;
|
||||||
|
|
||||||
|
end:
|
||||||
k_spin_unlock(&lists_lock, key);
|
k_spin_unlock(&lists_lock, key);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -304,18 +347,61 @@ static void thread_idx_free(uintptr_t tidx)
|
||||||
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
|
sys_bitfield_set_bit((mem_addr_t)_thread_idx_map, tidx);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
|
static struct z_object *dynamic_object_create(enum k_objects otype, size_t align,
|
||||||
|
size_t size)
|
||||||
{
|
{
|
||||||
struct dyn_obj *dyn;
|
struct dyn_obj_base *dyn;
|
||||||
|
|
||||||
dyn = z_thread_aligned_alloc(align, sizeof(*dyn) + size);
|
if (otype == K_OBJ_THREAD_STACK_ELEMENT) {
|
||||||
if (dyn == NULL) {
|
struct dyn_obj_stack *stack;
|
||||||
LOG_ERR("could not allocate kernel object, out of memory");
|
size_t adjusted_size;
|
||||||
return NULL;
|
|
||||||
|
if (size == 0) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
adjusted_size = STACK_ELEMENT_DATA_SIZE(size);
|
||||||
|
stack = z_thread_aligned_alloc(align, sizeof(struct dyn_obj_stack));
|
||||||
|
if (stack == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
dyn = &stack->base;
|
||||||
|
|
||||||
|
stack->data = z_thread_aligned_alloc(DYN_OBJ_DATA_ALIGN_K_THREAD_STACK,
|
||||||
|
adjusted_size);
|
||||||
|
if (stack->data == NULL) {
|
||||||
|
k_free(stack);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||||
|
struct z_stack_data *stack_data = (struct z_stack_data *)
|
||||||
|
((uint8_t *)stack->data + adjusted_size - sizeof(*stack_data));
|
||||||
|
stack_data->priv = (uint8_t *)stack->data;
|
||||||
|
dyn->kobj.data.stack_data = stack_data;
|
||||||
|
#ifdef CONFIG_ARM_MPU
|
||||||
|
dyn->kobj.name = (void *)ROUND_UP(
|
||||||
|
((uint8_t *)stack->data + CONFIG_PRIVILEGED_STACK_SIZE),
|
||||||
|
Z_THREAD_STACK_OBJ_ALIGN(size));
|
||||||
|
#else
|
||||||
|
dyn->kobj.name = stack->data;
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
dyn->kobj.name = stack->data;
|
||||||
|
#endif
|
||||||
|
} else {
|
||||||
|
struct dyn_obj *obj;
|
||||||
|
|
||||||
|
obj = z_thread_aligned_alloc(align,
|
||||||
|
sizeof(struct dyn_obj) + obj_size_get(otype) + size);
|
||||||
|
if (obj == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
dyn = &obj->base;
|
||||||
|
dyn->kobj.name = &obj->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
dyn->kobj.name = &dyn->data;
|
dyn->kobj.type = otype;
|
||||||
dyn->kobj.type = K_OBJ_ANY;
|
|
||||||
dyn->kobj.flags = 0;
|
dyn->kobj.flags = 0;
|
||||||
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
|
(void)memset(dyn->kobj.perms, 0, CONFIG_MAX_THREAD_BYTES);
|
||||||
|
|
||||||
|
@ -328,7 +414,18 @@ struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
|
||||||
return &dyn->kobj;
|
return &dyn->kobj;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *z_object_alloc(enum k_objects otype, size_t object_size)
|
struct z_object *z_dynamic_object_aligned_create(size_t align, size_t size)
|
||||||
|
{
|
||||||
|
struct z_object *obj = dynamic_object_create(K_OBJ_ANY, align, size);
|
||||||
|
|
||||||
|
if (obj == NULL) {
|
||||||
|
LOG_ERR("could not allocate kernel object, out of memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *z_object_alloc(enum k_objects otype, size_t size)
|
||||||
{
|
{
|
||||||
struct z_object *zo;
|
struct z_object *zo;
|
||||||
uintptr_t tidx = 0;
|
uintptr_t tidx = 0;
|
||||||
|
@ -348,8 +445,6 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
|
||||||
/* The following are currently not allowed at all */
|
/* The following are currently not allowed at all */
|
||||||
case K_OBJ_FUTEX: /* Lives in user memory */
|
case K_OBJ_FUTEX: /* Lives in user memory */
|
||||||
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
|
case K_OBJ_SYS_MUTEX: /* Lives in user memory */
|
||||||
case K_OBJ_THREAD_STACK_ELEMENT:
|
|
||||||
break;
|
|
||||||
case K_OBJ_NET_SOCKET: /* Indeterminate size */
|
case K_OBJ_NET_SOCKET: /* Indeterminate size */
|
||||||
LOG_ERR("forbidden object type '%s' requested",
|
LOG_ERR("forbidden object type '%s' requested",
|
||||||
otype_to_str(otype));
|
otype_to_str(otype));
|
||||||
|
@ -359,15 +454,13 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
zo = z_dynamic_object_aligned_create(obj_align_get(otype),
|
zo = dynamic_object_create(otype, obj_align_get(otype), size);
|
||||||
object_size);
|
|
||||||
if (zo == NULL) {
|
if (zo == NULL) {
|
||||||
if (otype == K_OBJ_THREAD) {
|
if (otype == K_OBJ_THREAD) {
|
||||||
thread_idx_free(tidx);
|
thread_idx_free(tidx);
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
zo->type = otype;
|
|
||||||
|
|
||||||
if (otype == K_OBJ_THREAD) {
|
if (otype == K_OBJ_THREAD) {
|
||||||
zo->data.thread_id = tidx;
|
zo->data.thread_id = tidx;
|
||||||
|
@ -388,7 +481,7 @@ static void *z_object_alloc(enum k_objects otype, size_t object_size)
|
||||||
|
|
||||||
void *z_impl_k_object_alloc(enum k_objects otype)
|
void *z_impl_k_object_alloc(enum k_objects otype)
|
||||||
{
|
{
|
||||||
return z_object_alloc(otype, obj_size_get(otype));
|
return z_object_alloc(otype, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
|
void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
|
||||||
|
@ -398,7 +491,7 @@ void *z_impl_k_object_alloc_size(enum k_objects otype, size_t size)
|
||||||
|
|
||||||
void k_object_free(void *obj)
|
void k_object_free(void *obj)
|
||||||
{
|
{
|
||||||
struct dyn_obj *dyn;
|
struct dyn_obj_base *dyn;
|
||||||
|
|
||||||
/* This function is intentionally not exposed to user mode.
|
/* This function is intentionally not exposed to user mode.
|
||||||
* There's currently no robust way to track that an object isn't
|
* There's currently no robust way to track that an object isn't
|
||||||
|
@ -430,15 +523,15 @@ struct z_object *z_object_find(const void *obj)
|
||||||
ret = z_object_gperf_find(obj);
|
ret = z_object_gperf_find(obj);
|
||||||
|
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
struct dyn_obj *dynamic_obj;
|
struct dyn_obj_base *dyn;
|
||||||
|
|
||||||
/* The cast to pointer-to-non-const violates MISRA
|
/* The cast to pointer-to-non-const violates MISRA
|
||||||
* 11.8 but is justified since we know dynamic objects
|
* 11.8 but is justified since we know dynamic objects
|
||||||
* were not declared with a const qualifier.
|
* were not declared with a const qualifier.
|
||||||
*/
|
*/
|
||||||
dynamic_obj = dyn_object_find((void *)obj);
|
dyn = dyn_object_find((void *)obj);
|
||||||
if (dynamic_obj != NULL) {
|
if (dyn != NULL) {
|
||||||
ret = &dynamic_obj->kobj;
|
ret = &dyn->kobj;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,7 +540,7 @@ struct z_object *z_object_find(const void *obj)
|
||||||
|
|
||||||
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
void z_object_wordlist_foreach(_wordlist_cb_func_t func, void *context)
|
||||||
{
|
{
|
||||||
struct dyn_obj *obj, *next;
|
struct dyn_obj_base *obj, *next;
|
||||||
|
|
||||||
z_object_gperf_wordlist_foreach(func, context);
|
z_object_gperf_wordlist_foreach(func, context);
|
||||||
|
|
||||||
|
@ -487,7 +580,7 @@ static void unref_check(struct z_object *ko, uintptr_t index)
|
||||||
|
|
||||||
void *vko = ko;
|
void *vko = ko;
|
||||||
|
|
||||||
struct dyn_obj *dyn = CONTAINER_OF(vko, struct dyn_obj, kobj);
|
struct dyn_obj_base *dyn = CONTAINER_OF(vko, struct dyn_obj_base, kobj);
|
||||||
|
|
||||||
__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
|
__ASSERT(IS_PTR_ALIGNED(dyn, struct dyn_obj), "unaligned z_object");
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ CONFIG_MAX_THREAD_BYTES=5
|
||||||
CONFIG_DYNAMIC_THREAD=y
|
CONFIG_DYNAMIC_THREAD=y
|
||||||
CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
||||||
CONFIG_DYNAMIC_THREAD_ALLOC=y
|
CONFIG_DYNAMIC_THREAD_ALLOC=y
|
||||||
CONFIG_HEAP_MEM_POOL_SIZE=16384
|
CONFIG_HEAP_MEM_POOL_SIZE=20480
|
||||||
CONFIG_ZTEST_STACK_SIZE=2048
|
CONFIG_ZTEST_STACK_SIZE=2048
|
||||||
CONFIG_MAIN_STACK_SIZE=2048
|
CONFIG_MAIN_STACK_SIZE=2048
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
#define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE)
|
#define MAX_HEAP_STACKS (CONFIG_HEAP_MEM_POOL_SIZE / STACK_OBJ_SIZE)
|
||||||
|
|
||||||
ZTEST_DMEM bool flag[CONFIG_DYNAMIC_THREAD_POOL_SIZE];
|
ZTEST_DMEM bool flag[MAX(CONFIG_DYNAMIC_THREAD_POOL_SIZE, MAX_HEAP_STACKS)];
|
||||||
|
|
||||||
static void func(void *arg1, void *arg2, void *arg3)
|
static void func(void *arg1, void *arg2, void *arg3)
|
||||||
{
|
{
|
||||||
|
@ -63,6 +63,7 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_pool)
|
||||||
|
|
||||||
/* spawn our threads */
|
/* spawn our threads */
|
||||||
for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
|
for (size_t i = 0; i < CONFIG_DYNAMIC_THREAD_POOL_SIZE; ++i) {
|
||||||
|
flag[i] = false;
|
||||||
tid[i] = k_thread_create(&th[i], stack[i],
|
tid[i] = k_thread_create(&th[i], stack[i],
|
||||||
CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
|
CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
|
||||||
&flag[i], NULL, NULL, 0,
|
&flag[i], NULL, NULL, 0,
|
||||||
|
@ -86,7 +87,6 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
|
||||||
{
|
{
|
||||||
size_t N;
|
size_t N;
|
||||||
static k_tid_t tid[MAX_HEAP_STACKS];
|
static k_tid_t tid[MAX_HEAP_STACKS];
|
||||||
static bool flag[MAX_HEAP_STACKS];
|
|
||||||
static struct k_thread th[MAX_HEAP_STACKS];
|
static struct k_thread th[MAX_HEAP_STACKS];
|
||||||
static k_thread_stack_t *stack[MAX_HEAP_STACKS];
|
static k_thread_stack_t *stack[MAX_HEAP_STACKS];
|
||||||
|
|
||||||
|
@ -102,18 +102,17 @@ ZTEST(dynamic_thread_stack, test_dynamic_thread_stack_alloc)
|
||||||
for (N = 0; N < MAX_HEAP_STACKS; ++N) {
|
for (N = 0; N < MAX_HEAP_STACKS; ++N) {
|
||||||
stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
|
stack[N] = k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
|
||||||
IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
|
IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0);
|
||||||
zassert_not_null(stack[N]);
|
if (stack[N] == NULL) {
|
||||||
}
|
break;
|
||||||
|
}
|
||||||
if (CONFIG_DYNAMIC_THREAD_POOL_SIZE == 0) {
|
|
||||||
/* ensure that no more thread stacks can be allocated from the heap */
|
|
||||||
zassert_is_null(k_thread_stack_alloc(CONFIG_DYNAMIC_THREAD_STACK_SIZE,
|
|
||||||
IS_ENABLED(CONFIG_USERSPACE) ? K_USER : 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* spwan our threads */
|
/* spwan our threads */
|
||||||
for (size_t i = 0; i < N; ++i) {
|
for (size_t i = 0; i < N; ++i) {
|
||||||
tid[i] = k_thread_create(&th[i], stack[i], 0, func, &flag[i], NULL, NULL, 0,
|
flag[i] = false;
|
||||||
|
tid[i] = k_thread_create(&th[i], stack[i],
|
||||||
|
CONFIG_DYNAMIC_THREAD_STACK_SIZE, func,
|
||||||
|
&flag[i], NULL, NULL, 0,
|
||||||
K_USER | K_INHERIT_PERMS, K_NO_WAIT);
|
K_USER | K_INHERIT_PERMS, K_NO_WAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ tests:
|
||||||
# 010
|
# 010
|
||||||
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
|
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
|
||||||
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
||||||
|
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
|
||||||
- CONFIG_USERSPACE=n
|
- CONFIG_USERSPACE=n
|
||||||
kernel.threads.dynamic_thread.stack.no_pool.alloc.user:
|
kernel.threads.dynamic_thread.stack.no_pool.alloc.user:
|
||||||
tags: userspace
|
tags: userspace
|
||||||
|
@ -40,6 +41,7 @@ tests:
|
||||||
# 011
|
# 011
|
||||||
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
|
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=0
|
||||||
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
||||||
|
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
|
||||||
- CONFIG_USERSPACE=y
|
- CONFIG_USERSPACE=y
|
||||||
kernel.threads.dynamic_thread.stack.pool.no_alloc.no_user:
|
kernel.threads.dynamic_thread.stack.pool.no_alloc.no_user:
|
||||||
extra_configs:
|
extra_configs:
|
||||||
|
@ -57,6 +59,7 @@ tests:
|
||||||
kernel.threads.dynamic_thread.stack.pool.alloc.no_user:
|
kernel.threads.dynamic_thread.stack.pool.alloc.no_user:
|
||||||
extra_configs:
|
extra_configs:
|
||||||
# 110
|
# 110
|
||||||
|
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
|
||||||
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
||||||
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
||||||
- CONFIG_USERSPACE=n
|
- CONFIG_USERSPACE=n
|
||||||
|
@ -64,6 +67,7 @@ tests:
|
||||||
tags: userspace
|
tags: userspace
|
||||||
extra_configs:
|
extra_configs:
|
||||||
# 111
|
# 111
|
||||||
|
- CONFIG_DYNAMIC_THREAD_PREFER_ALLOC=y
|
||||||
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
- CONFIG_DYNAMIC_THREAD_POOL_SIZE=2
|
||||||
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
- CONFIG_DYNAMIC_THREAD_ALLOC=y
|
||||||
- CONFIG_USERSPACE=y
|
- CONFIG_USERSPACE=y
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue