k_queue: allow user mode access via allocators

User mode may now use queue objects. Instead of embedding the kernel's
linked list information directly in the data item, a container struct
is allocated from the caller's resource pool which is then added to
the queue. The new sflist type is now used to store a flag indicating
whether a data item needs to be freed when removed from the queue.

FIFO/LIFOs are derived from k_queues and have had allocator functions
added.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2018-04-27 13:21:22 -07:00 committed by Anas Nashif
commit 2b9b4b2cf7
12 changed files with 414 additions and 42 deletions

View file

@ -25,9 +25,12 @@ A fifo has the following key properties:
A fifo must be initialized before it can be used. This sets its queue to empty.
FIFO data items must be aligned on a 4-byte boundary, as the kernel reserves
the first 32 bits of an item for use as a pointer to the next data item in
the queue. Consequently, a data item that holds N bytes of application data
requires N+4 bytes of memory.
the first 32 bits of an item for use as a pointer to the next data item in the
queue. Consequently, a data item that holds N bytes of application data
requires N+4 bytes of memory. There are no alignment or reserved space
requirements for data items if they are added with
:cpp:func:`k_fifo_alloc_put()`, instead additional memory is temporarily
allocated from the calling thread's resource pool.
A data item may be **added** to a fifo by a thread or an ISR.
The item is given directly to a waiting thread, if one exists;
@ -112,6 +115,11 @@ to send data to one or more consumer threads.
Additionally, a singly-linked list of data items can be added to a fifo
by calling :cpp:func:`k_fifo_put_list()` or :cpp:func:`k_fifo_put_slist()`.
Finally, a data item can be added to a fifo with :cpp:func:`k_fifo_alloc_put()`.
With this API, there is no need to reserve space for the kernel's use in
the data item, instead additional memory will be allocated from the calling
thread's resource pool until the item is read.
Reading from a FIFO
===================
@ -155,6 +163,7 @@ The following fifo APIs are provided by :file:`kernel.h`:
* :c:macro:`K_FIFO_DEFINE`
* :cpp:func:`k_fifo_init()`
* :cpp:func:`k_fifo_alloc_put()`
* :cpp:func:`k_fifo_put()`
* :cpp:func:`k_fifo_put_list()`
* :cpp:func:`k_fifo_put_slist()`

View file

@ -25,9 +25,12 @@ A lifo has the following key properties:
A lifo must be initialized before it can be used. This sets its queue to empty.
LIFO data items must be aligned on a 4-byte boundary, as the kernel reserves
the first 32 bits of an item for use as a pointer to the next data item in
the queue. Consequently, a data item that holds N bytes of application data
requires N+4 bytes of memory.
the first 32 bits of an item for use as a pointer to the next data item in the
queue. Consequently, a data item that holds N bytes of application data
requires N+4 bytes of memory. There are no alignment or reserved space
requirements for data items if they are added with
:cpp:func:`k_lifo_alloc_put()`, instead additional memory is temporarily
allocated from the calling thread's resource pool.
A data item may be **added** to a lifo by a thread or an ISR.
The item is given directly to a waiting thread, if one exists;
@ -100,6 +103,11 @@ to send data to one or more consumer threads.
}
}
A data item can be added to a lifo with :cpp:func:`k_lifo_alloc_put()`.
With this API, there is no need to reserve space for the kernel's use in
the data item, instead additional memory will be allocated from the calling
thread's resource pool until the item is read.
Reading from a LIFO
===================

View file

@ -24,6 +24,7 @@
#include <misc/__assert.h>
#include <misc/dlist.h>
#include <misc/slist.h>
#include <misc/sflist.h>
#include <misc/util.h>
#include <misc/mempool_base.h>
#include <kernel_version.h>
@ -1634,7 +1635,7 @@ extern u32_t k_uptime_delta_32(s64_t *reftime);
*/
struct k_queue {
sys_slist_t data_q;
sys_sflist_t data_q;
union {
_wait_q_t wait_q;
@ -1654,6 +1655,8 @@ struct k_queue {
#define K_QUEUE_INITIALIZER DEPRECATED_MACRO _K_QUEUE_INITIALIZER
extern void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free);
/**
* INTERNAL_HIDDEN @endcond
*/
@ -1673,7 +1676,7 @@ struct k_queue {
*
* @return N/A
*/
extern void k_queue_init(struct k_queue *queue);
__syscall void k_queue_init(struct k_queue *queue);
/**
* @brief Cancel waiting on a queue.
@ -1687,7 +1690,7 @@ extern void k_queue_init(struct k_queue *queue);
*
* @return N/A
*/
extern void k_queue_cancel_wait(struct k_queue *queue);
__syscall void k_queue_cancel_wait(struct k_queue *queue);
/**
* @brief Append an element to the end of a queue.
@ -1705,6 +1708,23 @@ extern void k_queue_cancel_wait(struct k_queue *queue);
*/
extern void k_queue_append(struct k_queue *queue, void *data);
/**
* @brief Append an element to a queue.
*
* This routine appends a data item to @a queue. There is an implicit
* memory allocation from the calling thread's resource pool, which is
* automatically freed when the item is removed from the queue.
*
* @note Can be called by ISRs.
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
__syscall int k_queue_alloc_append(struct k_queue *queue, void *data);
/**
* @brief Prepend an element to a queue.
*
@ -1721,6 +1741,23 @@ extern void k_queue_append(struct k_queue *queue, void *data);
*/
extern void k_queue_prepend(struct k_queue *queue, void *data);
/**
* @brief Prepend an element to a queue.
*
* This routine prepends a data item to @a queue. There is an implicit
* memory allocation from the calling thread's resource pool, which is
* automatically freed when the item is removed from the queue.
*
* @note Can be called by ISRs.
*
* @param queue Address of the queue.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
__syscall int k_queue_alloc_prepend(struct k_queue *queue, void *data);
/**
* @brief Inserts an element to a queue.
*
@ -1787,7 +1824,7 @@ extern void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list);
* @return Address of the data item if successful; NULL if returned
* without waiting, or waiting period timed out.
*/
extern void *k_queue_get(struct k_queue *queue, s32_t timeout);
__syscall void *k_queue_get(struct k_queue *queue, s32_t timeout);
/**
* @brief Remove an element from a queue.
@ -1805,7 +1842,7 @@ extern void *k_queue_get(struct k_queue *queue, s32_t timeout);
*/
static inline bool k_queue_remove(struct k_queue *queue, void *data)
{
return sys_slist_find_and_remove(&queue->data_q, (sys_snode_t *)data);
return sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data);
}
/**
@ -1821,9 +1858,11 @@ static inline bool k_queue_remove(struct k_queue *queue, void *data)
* @return Non-zero if the queue is empty.
* @return 0 if data is available.
*/
static inline int k_queue_is_empty(struct k_queue *queue)
__syscall int k_queue_is_empty(struct k_queue *queue);
static inline int _impl_k_queue_is_empty(struct k_queue *queue)
{
return (int)sys_slist_is_empty(&queue->data_q);
return (int)sys_sflist_is_empty(&queue->data_q);
}
/**
@ -1835,9 +1874,11 @@ static inline int k_queue_is_empty(struct k_queue *queue)
*
* @return Head element, or NULL if queue is empty.
*/
static inline void *k_queue_peek_head(struct k_queue *queue)
__syscall void *k_queue_peek_head(struct k_queue *queue);
static inline void *_impl_k_queue_peek_head(struct k_queue *queue)
{
return sys_slist_peek_head(&queue->data_q);
return z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false);
}
/**
@ -1849,9 +1890,11 @@ static inline void *k_queue_peek_head(struct k_queue *queue)
*
* @return Tail element, or NULL if queue is empty.
*/
static inline void *k_queue_peek_tail(struct k_queue *queue)
__syscall void *k_queue_peek_tail(struct k_queue *queue);
static inline void *_impl_k_queue_peek_tail(struct k_queue *queue)
{
return sys_slist_peek_tail(&queue->data_q);
return z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false);
}
/**
@ -1940,6 +1983,24 @@ struct k_fifo {
#define k_fifo_put(fifo, data) \
k_queue_append((struct k_queue *) fifo, data)
/**
* @brief Add an element to a FIFO queue.
*
* This routine adds a data item to @a fifo. There is an implicit
* memory allocation from the calling thread's resource pool, which is
* automatically freed when the item is removed.
*
* @note Can be called by ISRs.
*
* @param fifo Address of the FIFO.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
#define k_fifo_alloc_put(fifo, data) \
k_queue_alloc_append((struct k_queue *) fifo, data)
/**
* @brief Atomically add a list of elements to a FIFO.
*
@ -2111,6 +2172,24 @@ struct k_lifo {
#define k_lifo_put(lifo, data) \
k_queue_prepend((struct k_queue *) lifo, data)
/**
* @brief Add an element to a LIFO queue.
*
* This routine adds a data item to @a lifo. There is an implicit
* memory allocation from the calling thread's resource pool, which is
* automatically freed when the item is removed.
*
* @note Can be called by ISRs.
*
* @param lifo Address of the LIFO.
* @param data Address of the data item.
*
* @retval 0 on success
* @retval -ENOMEM if there isn't sufficient RAM in the caller's resource pool
*/
#define k_lifo_alloc_put(lifo, data) \
k_queue_alloc_prepend((struct k_queue *) lifo, data)
/**
* @brief Get an element from a LIFO queue.
*

View file

@ -18,12 +18,46 @@
#include <linker/sections.h>
#include <wait_q.h>
#include <ksched.h>
#include <misc/slist.h>
#include <misc/sflist.h>
#include <init.h>
#include <syscall_handler.h>
extern struct k_queue _k_queue_list_start[];
extern struct k_queue _k_queue_list_end[];
struct alloc_node {
sys_sfnode_t node;
void *data;
};
void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free)
{
void *ret;
if (node && sys_sfnode_flags_get(node)) {
/* If the flag is set, then the enqueue operation for this item
* did a behind-the scenes memory allocation of an alloc_node
* struct, which is what got put in the queue. Free it and pass
* back the data pointer.
*/
struct alloc_node *anode;
anode = CONTAINER_OF(node, struct alloc_node, node);
ret = anode->data;
if (needs_free) {
k_free(anode);
}
} else {
/* Data was directly placed in the queue, the first 4 bytes
* reserved for the linked list. User mode isn't allowed to
* do this, although it can get data sent this way.
*/
ret = (void *)node;
}
return ret;
}
#ifdef CONFIG_OBJECT_TRACING
struct k_queue *_trace_list_k_queue;
@ -47,17 +81,29 @@ SYS_INIT(init_queue_module, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
#endif /* CONFIG_OBJECT_TRACING */
void k_queue_init(struct k_queue *queue)
void _impl_k_queue_init(struct k_queue *queue)
{
sys_slist_init(&queue->data_q);
sys_sflist_init(&queue->data_q);
sys_dlist_init(&queue->wait_q);
#if defined(CONFIG_POLL)
sys_dlist_init(&queue->poll_events);
#endif
SYS_TRACING_OBJ_INIT(k_queue, queue);
_k_object_init(queue);
}
#ifdef CONFIG_USERSPACE
_SYSCALL_HANDLER(k_queue_init, queue_ptr)
{
struct k_queue *queue = (struct k_queue *)queue_ptr;
_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE);
_impl_k_queue_init(queue);
return 0;
}
#endif
#if !defined(CONFIG_POLL)
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
@ -73,7 +119,7 @@ static inline void handle_poll_events(struct k_queue *queue, u32_t state)
#endif
}
void k_queue_cancel_wait(struct k_queue *queue)
void _impl_k_queue_cancel_wait(struct k_queue *queue)
{
unsigned int key = irq_lock();
#if !defined(CONFIG_POLL)
@ -91,7 +137,13 @@ void k_queue_cancel_wait(struct k_queue *queue)
_reschedule(key);
}
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
#ifdef CONFIG_USERSPACE
_SYSCALL_HANDLER1_SIMPLE_VOID(k_queue_cancel_wait, K_OBJ_QUEUE,
struct k_queue *);
#endif
static int queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc)
{
unsigned int key = irq_lock();
#if !defined(CONFIG_POLL)
@ -102,29 +154,80 @@ void k_queue_insert(struct k_queue *queue, void *prev, void *data)
if (first_pending_thread) {
prepare_thread_to_run(first_pending_thread, data);
_reschedule(key);
return;
return 0;
}
#endif /* !CONFIG_POLL */
sys_slist_insert(&queue->data_q, prev, data);
/* Only need to actually allocate if no threads are pending */
if (alloc) {
struct alloc_node *anode;
anode = z_thread_malloc(sizeof(*anode));
if (!anode) {
return -ENOMEM;
}
anode->data = data;
sys_sfnode_init(&anode->node, 0x1);
data = anode;
} else {
sys_sfnode_init(data, 0x0);
}
sys_sflist_insert(&queue->data_q, prev, data);
#if defined(CONFIG_POLL)
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* CONFIG_POLL */
_reschedule(key);
return 0;
}
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
{
queue_insert(queue, prev, data, false);
}
void k_queue_append(struct k_queue *queue, void *data)
{
return k_queue_insert(queue, queue->data_q.tail, data);
queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data, false);
}
void k_queue_prepend(struct k_queue *queue, void *data)
{
return k_queue_insert(queue, NULL, data);
queue_insert(queue, NULL, data, false);
}
int _impl_k_queue_alloc_append(struct k_queue *queue, void *data)
{
return queue_insert(queue, sys_sflist_peek_tail(&queue->data_q), data,
true);
}
#ifdef CONFIG_USERSPACE
_SYSCALL_HANDLER(k_queue_alloc_append, queue, data)
{
_SYSCALL_OBJ(queue, K_OBJ_QUEUE);
return _impl_k_queue_alloc_append((struct k_queue *)queue,
(void *)data);
}
#endif
int _impl_k_queue_alloc_prepend(struct k_queue *queue, void *data)
{
return queue_insert(queue, NULL, data, true);
}
#ifdef CONFIG_USERSPACE
_SYSCALL_HANDLER(k_queue_alloc_prepend, queue, data)
{
_SYSCALL_OBJ(queue, K_OBJ_QUEUE);
return _impl_k_queue_alloc_prepend((struct k_queue *)queue,
(void *)data);
}
#endif
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{
__ASSERT(head && tail, "invalid head or tail");
@ -139,11 +242,11 @@ void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
}
if (head) {
sys_slist_append_list(&queue->data_q, head, tail);
sys_sflist_append_list(&queue->data_q, head, tail);
}
#else
sys_slist_append_list(&queue->data_q, head, tail);
sys_sflist_append_list(&queue->data_q, head, tail);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* !CONFIG_POLL */
@ -159,6 +262,9 @@ void k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
* - the slist implementation keeps the next pointer as the first
* field of the node object type
* - list->tail->next = NULL.
* - sflist implementation only differs from slist by stuffing
* flag bytes in the lower order bits of the data pointer
* - source list is really an slist and not an sflist with flags set
*/
k_queue_append_list(queue, list->head, list->tail);
sys_slist_init(list);
@ -186,11 +292,11 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
__ASSERT_NO_MSG(event.state ==
K_POLL_STATE_FIFO_DATA_AVAILABLE);
/* sys_slist_* aren't threadsafe, so must be always protected by
* irq_lock.
/* sys_sflist_* aren't threadsafe, so must be always protected
* by irq_lock.
*/
key = irq_lock();
val = sys_slist_get(&queue->data_q);
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
irq_unlock(key);
} while (!val && timeout == K_FOREVER);
@ -198,15 +304,18 @@ static void *k_queue_poll(struct k_queue *queue, s32_t timeout)
}
#endif /* CONFIG_POLL */
void *k_queue_get(struct k_queue *queue, s32_t timeout)
void *_impl_k_queue_get(struct k_queue *queue, s32_t timeout)
{
unsigned int key;
void *data;
key = irq_lock();
if (likely(!sys_slist_is_empty(&queue->data_q))) {
data = sys_slist_get_not_empty(&queue->data_q);
if (likely(!sys_sflist_is_empty(&queue->data_q))) {
sys_sfnode_t *node;
node = sys_sflist_get_not_empty(&queue->data_q);
data = z_queue_node_peek(node, true);
irq_unlock(key);
return data;
}
@ -227,3 +336,18 @@ void *k_queue_get(struct k_queue *queue, s32_t timeout)
return ret ? NULL : _current->base.swap_data;
#endif /* CONFIG_POLL */
}
#ifdef CONFIG_USERSPACE
_SYSCALL_HANDLER(k_queue_get, queue, timeout_p)
{
s32_t timeout = timeout_p;
_SYSCALL_OBJ(queue, K_OBJ_QUEUE);
return (u32_t)_impl_k_queue_get((struct k_queue *)queue, timeout);
}
_SYSCALL_HANDLER1_SIMPLE(k_queue_is_empty, K_OBJ_QUEUE, struct k_queue *);
_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_head, K_OBJ_QUEUE, struct k_queue *);
_SYSCALL_HANDLER1_SIMPLE(k_queue_peek_tail, K_OBJ_QUEUE, struct k_queue *);
#endif /* CONFIG_USERSPACE */

View file

@ -16,6 +16,7 @@ kobjects = [
"k_msgq",
"k_mutex",
"k_pipe",
"k_queue",
"k_sem",
"k_stack",
"k_thread",

View file

@ -1,2 +1,4 @@
CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_USERSPACE=y
CONFIG_DYNAMIC_OBJECTS=y

View file

@ -1,3 +1,5 @@
CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_POLL=y
CONFIG_USERSPACE=y
CONFIG_DYNAMIC_OBJECTS=y

View file

@ -12,17 +12,26 @@
*/
#include <ztest.h>
extern void test_queue_thread2thread(void);
extern void test_queue_thread2isr(void);
extern void test_queue_isr2thread(void);
extern void test_queue_get_2threads(void);
extern void test_queue_get_fail(void);
extern void test_queue_loop(void);
#include "test_queue.h"
#ifndef CONFIG_USERSPACE
static void test_queue_supv_to_user(void)
{
ztest_test_skip();
}
static void test_auto_free(void)
{
ztest_test_skip();
}
#endif
/*test case main entry*/
void test_main(void)
{
ztest_test_suite(queue_api,
ztest_unit_test(test_queue_supv_to_user),
ztest_unit_test(test_auto_free),
ztest_unit_test(test_queue_thread2thread),
ztest_unit_test(test_queue_thread2isr),
ztest_unit_test(test_queue_isr2thread),

View file

@ -10,8 +10,20 @@
#include <ztest.h>
#include <irq_offload.h>
extern void test_queue_thread2thread(void);
extern void test_queue_thread2isr(void);
extern void test_queue_isr2thread(void);
extern void test_queue_get_2threads(void);
extern void test_queue_get_fail(void);
extern void test_queue_loop(void);
#ifdef CONFIG_USERSPACE
extern void test_queue_supv_to_user(void);
extern void test_auto_free(void);
#endif
typedef struct qdata {
sys_snode_t snode;
u32_t data;
bool allocated;
} qdata_t;
#endif

View file

@ -20,7 +20,7 @@
#define TIMEOUT 100
/*test cases*/
void test_queue_get_fail(void *p1, void *p2, void *p3)
void test_queue_get_fail(void)
{
struct k_queue queue;

View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "test_queue.h"
#ifdef CONFIG_USERSPACE
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define LIST_LEN 5
static K_THREAD_STACK_DEFINE(child_stack, STACK_SIZE);
static __kernel struct k_thread child_thread;
static struct qdata qdata[LIST_LEN * 2];
K_MEM_POOL_DEFINE(test_pool, 16, 64, 4, 4);
/* Higher priority than the thread putting stuff in the queue */
void child_thread_get(void *p1, void *p2, void *p3)
{
struct qdata *qd;
struct k_queue *q = p1;
struct k_sem *sem = p2;
zassert_false(k_queue_is_empty(q), NULL);
qd = k_queue_peek_head(q);
zassert_equal(qd->data, 0, NULL);
qd = k_queue_peek_tail(q);
zassert_equal(qd->data, (LIST_LEN * 2) - 1,
"got %d expected %d", qd->data, (LIST_LEN * 2) - 1);
for (int i = 0; i < (LIST_LEN * 2); i++) {
qd = k_queue_get(q, K_FOREVER);
zassert_equal(qd->data, i, NULL);
if (qd->allocated) {
/* snode should never have been touched */
zassert_is_null(qd->snode.next, NULL);
}
}
zassert_true(k_queue_is_empty(q), NULL);
/* This one gets canceled */
qd = k_queue_get(q, K_FOREVER);
zassert_is_null(qd, NULL);
k_sem_give(sem);
}
void test_queue_supv_to_user(void)
{
/* Supervisor mode will add a bunch of data, some with alloc
* and some not
*/
struct k_queue *q;
struct k_sem *sem;
k_thread_resource_pool_assign(k_current_get(), &test_pool);
q = k_object_alloc(K_OBJ_QUEUE);
zassert_not_null(q, "no memory for allocated queue object\n");
k_queue_init(q);
sem = k_object_alloc(K_OBJ_SEM);
zassert_not_null(sem, "no memory for semaphore object\n");
k_sem_init(sem, 0, 1);
for (int i = 0; i < (LIST_LEN * 2); i = i + 2) {
/* Just for test purposes -- not safe to do this in the
* real world as user mode shouldn't have any access to the
* snode struct
*/
qdata[i].data = i;
qdata[i].allocated = false;
qdata[i].snode.next = NULL;
k_queue_append(q, &qdata[i]);
qdata[i + 1].data = i + 1;
qdata[i + 1].allocated = true;
qdata[i + 1].snode.next = NULL;
zassert_false(k_queue_alloc_append(q, &qdata[i + 1]), NULL);
}
k_thread_create(&child_thread, child_stack, STACK_SIZE,
child_thread_get, q, sem, NULL, K_HIGHEST_THREAD_PRIO,
K_USER | K_INHERIT_PERMS, 0);
k_yield();
/* child thread runs until blocking on the last k_queue_get() call */
k_queue_cancel_wait(q);
k_sem_take(sem, K_FOREVER);
}
void test_auto_free(void)
{
/* Ensure any resources requested by the previous test were released
* by allocating the entire pool. It would have allocated two kernel
* objects and five queue elements. The queue elements should be
* auto-freed when they are de-queued, and the objects when all
* threads with permissions exit.
*/
struct k_mem_block b[4];
int i;
for (i = 0; i < 4; i++) {
zassert_false(k_mem_pool_alloc(&test_pool, &b[i], 64,
K_FOREVER),
"memory not auto released!");
}
/* Free everything so that the pool is back to a pristine state in
* case we want to use it again.
*/
for (i = 0; i < 4; i++) {
k_mem_pool_free(&b[i]);
}
}
#endif /* CONFIG_USERSPACE */

View file

@ -3,4 +3,4 @@ tests:
tags: kernel
kernel.queue.poll:
extra_args: CONF_FILE="prj_poll.conf"
tags: kernel
tags: kernel userspace