kernel: stacks: add k_stack_alloc() init
Similar to what has been done with pipes and message queues, user mode can't be trusted to provide a buffer for the kernel to use. Remove k_stack_init() as a syscall and offer k_stack_alloc_init() which allocates a buffer from the caller's resource pool. Fixes #7285 Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
0fe789ff2e
commit
f3bee951b1
8 changed files with 135 additions and 16 deletions
|
@ -54,7 +54,10 @@ Defining a Stack
|
|||
================
|
||||
|
||||
A stack is defined using a variable of type :c:type:`struct k_stack`.
|
||||
It must then be initialized by calling :cpp:func:`k_stack_init()`.
|
||||
It must then be initialized by calling :cpp:func:`k_stack_init()` or
|
||||
:cpp:func:`k_stack_alloc_init()`. In the latter case, a buffer is not
|
||||
provided and it is instead allocated from the calling thread's resource
|
||||
pool.
|
||||
|
||||
The following code defines and initializes an empty stack capable of holding
|
||||
up to ten 32-bit data values.
|
||||
|
|
|
@ -2146,12 +2146,14 @@ struct k_lifo {
|
|||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
#define K_STACK_FLAG_ALLOC BIT(0) /* Buffer was allocated */
|
||||
|
||||
struct k_stack {
|
||||
_wait_q_t wait_q;
|
||||
u32_t *base, *next, *top;
|
||||
|
||||
_OBJECT_TRACING_NEXT_PTR(k_stack);
|
||||
u8_t flags;
|
||||
};
|
||||
|
||||
#define _K_STACK_INITIALIZER(obj, stack_buffer, stack_num_entries) \
|
||||
|
@ -2186,8 +2188,37 @@ struct k_stack {
|
|||
*
|
||||
* @return N/A
|
||||
*/
|
||||
__syscall void k_stack_init(struct k_stack *stack,
|
||||
u32_t *buffer, unsigned int num_entries);
|
||||
void k_stack_init(struct k_stack *stack,
|
||||
u32_t *buffer, unsigned int num_entries);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Initialize a stack.
|
||||
*
|
||||
* This routine initializes a stack object, prior to its first use. Internal
|
||||
* buffers will be allocated from the calling thread's resource pool.
|
||||
* This memory will be released if k_stack_cleanup() is called, or
|
||||
* userspace is enabled and the stack object loses all references to it.
|
||||
*
|
||||
* @param stack Address of the stack.
|
||||
* @param num_entries Maximum number of values that can be stacked.
|
||||
*
|
||||
* @return -ENOMEM if memory couldn't be allocated
|
||||
*/
|
||||
|
||||
__syscall int k_stack_alloc_init(struct k_stack *stack,
|
||||
unsigned int num_entries);
|
||||
|
||||
/**
|
||||
* @brief Release a stack's allocated buffer
|
||||
*
|
||||
* If a stack object was given a dynamically allocated buffer via
|
||||
* k_stack_alloc_init(), this will free it. This function does nothing
|
||||
* if the buffer wasn't dynamically allocated.
|
||||
*
|
||||
* @param stack Address of the stack.
|
||||
*/
|
||||
void k_stack_cleanup(struct k_stack *stack);
|
||||
|
||||
/**
|
||||
* @brief Push an element onto a stack.
|
||||
|
|
|
@ -45,7 +45,7 @@ SYS_INIT(init_stack_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
|||
|
||||
#endif /* CONFIG_OBJECT_TRACING */
|
||||
|
||||
void _impl_k_stack_init(struct k_stack *stack, u32_t *buffer,
|
||||
void k_stack_init(struct k_stack *stack, u32_t *buffer,
|
||||
unsigned int num_entries)
|
||||
{
|
||||
sys_dlist_init(&stack->wait_q);
|
||||
|
@ -56,18 +56,45 @@ void _impl_k_stack_init(struct k_stack *stack, u32_t *buffer,
|
|||
_k_object_init(stack);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
_SYSCALL_HANDLER(k_stack_init, stack, buffer, num_entries)
|
||||
int _impl_k_stack_alloc_init(struct k_stack *stack, unsigned int num_entries)
|
||||
{
|
||||
_SYSCALL_OBJ_INIT(stack, K_OBJ_STACK);
|
||||
_SYSCALL_MEMORY_ARRAY_WRITE(buffer, num_entries, sizeof(u32_t));
|
||||
void *buffer;
|
||||
int ret;
|
||||
|
||||
_impl_k_stack_init((struct k_stack *)stack, (u32_t *)buffer,
|
||||
num_entries);
|
||||
buffer = z_thread_malloc(num_entries);
|
||||
if (buffer) {
|
||||
k_stack_init(stack, buffer, num_entries);
|
||||
stack->flags = K_STACK_FLAG_ALLOC;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
_SYSCALL_HANDLER(k_stack_alloc_init, stack, num_entries)
|
||||
{
|
||||
_SYSCALL_OBJ_NEVER_INIT(stack, K_OBJ_STACK);
|
||||
_SYSCALL_VERIFY(num_entries > 0);
|
||||
|
||||
_impl_k_stack_alloc_init((struct k_stack *)stack, num_entries);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void k_stack_cleanup(struct k_stack *stack)
|
||||
{
|
||||
__ASSERT_NO_MSG(sys_dlist_is_empty(&stack->wait_q));
|
||||
|
||||
if (stack->flags & K_STACK_FLAG_ALLOC) {
|
||||
k_free(stack->base);
|
||||
stack->base = NULL;
|
||||
stack->flags &= ~K_STACK_FLAG_ALLOC;
|
||||
}
|
||||
}
|
||||
|
||||
void _impl_k_stack_push(struct k_stack *stack, u32_t data)
|
||||
{
|
||||
struct k_thread *first_pending_thread;
|
||||
|
|
|
@ -264,6 +264,9 @@ static void unref_check(struct _k_object *ko)
|
|||
case K_OBJ_MSGQ:
|
||||
k_msgq_cleanup((struct k_msgq *)ko->name);
|
||||
break;
|
||||
case K_OBJ_STACK:
|
||||
k_stack_cleanup((struct k_stack *)ko->name);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
CONFIG_ZTEST=y
|
||||
CONFIG_IRQ_OFFLOAD=y
|
||||
CONFIG_USERSPACE=y
|
||||
CONFIG_DYNAMIC_OBJECTS=y
|
||||
|
|
|
@ -15,6 +15,21 @@
|
|||
extern void test_stack_thread2thread(void);
|
||||
extern void test_stack_thread2isr(void);
|
||||
extern void test_stack_pop_fail(void);
|
||||
#ifdef CONFIG_USERSPACE
|
||||
extern void test_stack_user_thread2thread(void);
|
||||
extern void test_stack_user_pop_fail(void);
|
||||
|
||||
K_MEM_POOL_DEFINE(test_pool, 128, 128, 2, 4);
|
||||
#else
|
||||
#define dummy_test(_name) \
|
||||
static void _name(void) \
|
||||
{ \
|
||||
ztest_test_skip(); \
|
||||
}
|
||||
|
||||
dummy_test(test_stack_user_thread2thread);
|
||||
dummy_test(test_stack_user_pop_fail);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
extern struct k_stack kstack;
|
||||
extern struct k_stack stack;
|
||||
|
@ -28,9 +43,15 @@ void test_main(void)
|
|||
k_thread_access_grant(k_current_get(), &kstack, &stack, &thread_data,
|
||||
&end_sema, &threadstack, NULL);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
k_thread_resource_pool_assign(k_current_get(), &test_pool);
|
||||
#endif
|
||||
|
||||
ztest_test_suite(stack_api,
|
||||
ztest_user_unit_test(test_stack_thread2thread),
|
||||
ztest_unit_test(test_stack_thread2thread),
|
||||
ztest_user_unit_test(test_stack_user_thread2thread),
|
||||
ztest_unit_test(test_stack_thread2isr),
|
||||
ztest_user_unit_test(test_stack_pop_fail));
|
||||
ztest_unit_test(test_stack_pop_fail),
|
||||
ztest_user_unit_test(test_stack_user_pop_fail));
|
||||
ztest_run_test_suite(stack_api);
|
||||
}
|
||||
|
|
|
@ -109,6 +109,19 @@ void test_stack_thread2thread(void)
|
|||
tstack_thread_thread(&kstack);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
void test_stack_user_thread2thread(void)
|
||||
{
|
||||
struct k_stack *stack = k_object_alloc(K_OBJ_STACK);
|
||||
|
||||
zassert_not_null(stack, "couldn't allocate stack object");
|
||||
zassert_false(k_stack_alloc_init(stack, STACK_LEN),
|
||||
"stack init failed");
|
||||
|
||||
tstack_thread_thread(stack);
|
||||
}
|
||||
#endif
|
||||
|
||||
void test_stack_thread2isr(void)
|
||||
{
|
||||
/**TESTPOINT: test k_stack_init stack*/
|
||||
|
|
|
@ -24,13 +24,32 @@ static u32_t data[STACK_LEN];
|
|||
extern struct k_stack stack;
|
||||
|
||||
/*test cases*/
|
||||
void test_stack_pop_fail(void *p1, void *p2, void *p3)
|
||||
static void stack_pop_fail(struct k_stack *stack)
|
||||
{
|
||||
u32_t rx_data;
|
||||
|
||||
k_stack_init(&stack, data, STACK_LEN);
|
||||
/**TESTPOINT: stack pop returns -EBUSY*/
|
||||
zassert_equal(k_stack_pop(&stack, &rx_data, K_NO_WAIT), -EBUSY, NULL);
|
||||
zassert_equal(k_stack_pop(stack, &rx_data, K_NO_WAIT), -EBUSY, NULL);
|
||||
/**TESTPOINT: stack pop returns -EAGAIN*/
|
||||
zassert_equal(k_stack_pop(&stack, &rx_data, TIMEOUT), -EAGAIN, NULL);
|
||||
zassert_equal(k_stack_pop(stack, &rx_data, TIMEOUT), -EAGAIN, NULL);
|
||||
}
|
||||
|
||||
void test_stack_pop_fail(void)
|
||||
{
|
||||
k_stack_init(&stack, data, STACK_LEN);
|
||||
|
||||
stack_pop_fail(&stack);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
void test_stack_user_pop_fail(void)
|
||||
{
|
||||
struct k_stack *alloc_stack = k_object_alloc(K_OBJ_STACK);
|
||||
|
||||
zassert_not_null(alloc_stack, "couldn't allocate stack object");
|
||||
zassert_false(k_stack_alloc_init(alloc_stack, STACK_LEN),
|
||||
"stack init failed");
|
||||
|
||||
stack_pop_fail(alloc_stack);
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue