tests/kernel: Remove mem_pool tests

This API is being deprecated, and the underlying sys_heap code has its
tests elsewhere.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-09-22 13:34:55 -07:00 committed by Anas Nashif
commit 27b1394331
22 changed files with 0 additions and 935 deletions

View file

@ -1,8 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(mem_pool)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -1,43 +0,0 @@
Title: Memory Pool APIs
Description:
This test verifies that the memory pool and heap APIs operate as expected.
--------------------------------------------------------------------------------
Building and Running Project:
This project outputs to the console. It can be built and executed
on QEMU as follows:
make run
--------------------------------------------------------------------------------
Troubleshooting:
Problems caused by out-dated project information can be addressed by
issuing one of the following commands then rebuilding the project:
make clean # discard results of previous builds
# but keep existing configuration info
or
make pristine # discard results of previous builds
# and restore pre-defined configuration info
--------------------------------------------------------------------------------
Sample Output:
***** BOOTING ZEPHYR OS xxxx - BUILD: xxxxxxx *****
tc_start() - Test Memory Pool and Heap APIs
Testing k_mem_pool_alloc(K_NO_WAIT) ...
Testing k_mem_pool_alloc(timeout) ...
Testing k_mem_pool_alloc(K_FOREVER) ...
Testing k_malloc() and k_free() ...
===================================================================
PASS - RegressionTask.
===================================================================
PROJECT EXECUTION SUCCESSFUL

View file

@ -1,3 +0,0 @@
CONFIG_HEAP_MEM_POOL_SIZE=256
CONFIG_ZTEST=y
CONFIG_MP_NUM_CPUS=1

View file

@ -1,372 +0,0 @@
/*
* Copyright (c) 2012-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
*
* Test memory pool and heap APIs
*
* This modules tests the following memory pool routines:
*
* - k_mem_pool_alloc(),
* - k_mem_pool_free(),
* - k_malloc(),
* - k_free()
*/
#include <zephyr.h>
#include <ztest.h>
#include <tc_util.h>
#include <sys/util.h>
#define ONE_SECOND 1000
#define TENTH_SECOND 100
#define NUM_BLOCKS 64
/* This test is written to a set of known allocation patterns and
* their results, making free assumptions about the fragmentation
* behavior of the original mem_pool implementation. The newer k_heap
* backend is more flexible, and also has allocation behavior that
* depends on words size. So we keep separate tables for different
* configs.
*/
#ifdef CONFIG_64BIT
# define HEAP64
#else
# define HEAP32
#endif
/* size of stack area used by each thread */
#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
K_SEM_DEFINE(ALTERNATE_SEM, 0, 1);
K_SEM_DEFINE(REGRESS_SEM, 0, 1);
K_SEM_DEFINE(HELPER_SEM, 0, 1);
K_MEM_POOL_DEFINE(POOL_ID, 64, 4096, 1, 4);
K_MEM_POOL_DEFINE(SECOND_POOL_ID, 16, 1024, 5, 4);
struct TEST_CASE {
struct k_mem_block *block; /* pointer to block data */
struct k_mem_pool *pool_id; /* pool ID */
int size; /* request size in bytes */
int32_t timeout; /* # of ticks to wait */
int rcode; /* expected return code */
};
typedef int (*pool_block_get_func_t)(struct k_mem_block *, struct k_mem_pool *,
int, int32_t);
typedef int (*pool_move_block_func_t)(struct k_mem_block *, struct k_mem_pool *);
static volatile int evidence;
static struct k_mem_block block_list[NUM_BLOCKS];
static struct k_mem_block helper_block;
static struct TEST_CASE get_set[] = {
{ &block_list[0], &POOL_ID, 0, 0, 0 },
{ &block_list[1], &POOL_ID, 1, 0, 0 },
{ &block_list[2], &POOL_ID, 32, 0, 0 },
{ &block_list[3], &POOL_ID, 64, 0, 0 },
{ &block_list[4], &POOL_ID, 128, 0, 0 },
{ &block_list[5], &POOL_ID, 256, 0, 0 },
{ &block_list[6], &POOL_ID, 512, 0, 0 },
{ &block_list[7], &POOL_ID, 1024, 0, 0 },
#if defined(HEAP32) || defined(HEAP64)
{ &block_list[8], &POOL_ID, 2048, 0, 0 },
#else
{ &block_list[8], &POOL_ID, 2048, 0, -ENOMEM },
#endif
{ &block_list[9], &POOL_ID, 4096, 0, -ENOMEM }
};
static struct TEST_CASE get_set2[] = {
{ &block_list[0], &POOL_ID, 4096, 0, 0 },
{ &block_list[1], &POOL_ID, 2048, 0, -ENOMEM },
{ &block_list[2], &POOL_ID, 1024, 0, -ENOMEM },
#if defined(HEAP32)
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
{ &block_list[4], &POOL_ID, 256, 0, 0 }
#elif defined(HEAP64)
{ &block_list[3], &POOL_ID, 512, 0, 0 },
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
#else
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
#endif
};
static struct TEST_CASE getwt_set[] = {
{ &block_list[0], &POOL_ID, 4096, TENTH_SECOND, 0 },
{ &block_list[1], &POOL_ID, 2048, TENTH_SECOND, -EAGAIN },
{ &block_list[2], &POOL_ID, 1024, TENTH_SECOND, -EAGAIN },
#if defined(HEAP32)
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, 0 }
#elif defined(HEAP64)
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, 0 },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
#else
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
#endif
};
/**
* @brief Wrapper for k_mem_pool_alloc()
*
* @return k_mem_pool_alloc() return value
*/
static int pool_block_get_func(struct k_mem_block *block, struct k_mem_pool *pool,
int size, int32_t unused)
{
ARG_UNUSED(unused);
return k_mem_pool_alloc(pool, block, size, K_NO_WAIT);
}
/**
*
* @brief Wrapper for k_mem_pool_alloc(timeout)
*
* @return k_mem_pool_alloc(timeout) return value
*/
static int pool_block_get_wt_func(struct k_mem_block *block, struct k_mem_pool *pool,
int size, int32_t timeout)
{
return k_mem_pool_alloc(pool, block, size, K_MSEC(timeout));
}
/**
*
* @brief Free any blocks allocated in the test set
*
* @return N/A
*/
static void free_blocks(struct TEST_CASE *tests, int n_tests)
{
int i;
for (i = 0; i < n_tests; i++) {
if (tests[i].rcode == 0) {
k_mem_pool_free(tests[i].block);
}
}
}
/**
* @brief Perform the work of getting blocks
*
*/
static void pool_block_get_work(char *string, pool_block_get_func_t func,
struct TEST_CASE *tests, int n_tests)
{
int i;
int rv;
for (i = 0; i < n_tests; i++) {
rv = func(tests[i].block, tests[i].pool_id, tests[i].size,
tests[i].timeout);
zassert_equal(rv, tests[i].rcode, "%s() expected %d, got %d\n"
"size: %d, timeout: %d\n", string, tests[i].rcode, rv,
tests[i].size, tests[i].timeout);
}
}
/**
* @ingroup kernel_memory_pool_tests
* @brief Test the k_mem_pool_alloc(K_NO_WAIT) API
*
* The pool is 4 k_b in size.
*
* @see k_mem_pool_alloc()
*/
static void test_pool_block_get(void)
{
int j; /* loop counter */
for (j = 0; j < 8; j++) {
pool_block_get_work("k_mem_pool_alloc", pool_block_get_func,
get_set, ARRAY_SIZE(get_set));
free_blocks(get_set, ARRAY_SIZE(get_set));
pool_block_get_work("k_mem_pool_alloc", pool_block_get_func,
get_set2, ARRAY_SIZE(get_set2));
free_blocks(get_set2, ARRAY_SIZE(get_set2));
}
}
/**
* @brief Helper task to test_pool_block_get_timeout()
*
* @return N/A
*/
void helper_task(void)
{
k_sem_take(&HELPER_SEM, K_FOREVER);
k_sem_give(&REGRESS_SEM);
k_mem_pool_free(&helper_block);
}
/**
* @ingroup kernel_memory_pool_tests
* @brief Test k_mem_pool_alloc(timeout)
*
* @see k_mem_pool_alloc()
*/
static void test_pool_block_get_timeout(void)
{
struct k_mem_block block;
int rv; /* return value from k_mem_pool_alloc() */
int j; /* loop counter */
for (j = 0; j < 8; j++) {
pool_block_get_work("k_mem_pool_alloc", pool_block_get_wt_func,
getwt_set, ARRAY_SIZE(getwt_set));
free_blocks(getwt_set, ARRAY_SIZE(getwt_set));
}
rv = k_mem_pool_alloc(&POOL_ID, &helper_block, 3148, K_MSEC(5));
zassert_true(rv == 0,
"Failed to get size 3148 byte block from POOL_ID");
rv = k_mem_pool_alloc(&POOL_ID, &block, 3148, K_NO_WAIT);
zassert_true(rv == -ENOMEM, "Unexpectedly got size 3148 "
"byte block from POOL_ID");
k_sem_give(&HELPER_SEM); /* Activate helper_task */
rv = k_mem_pool_alloc(&POOL_ID, &block, 3148, K_MSEC(20));
zassert_true(rv == 0, "Failed to get size 3148 byte block from POOL_ID");
rv = k_sem_take(&REGRESS_SEM, K_NO_WAIT);
zassert_true(rv == 0, "Failed to get size 3148 "
"byte block within 20 ticks");
k_mem_pool_free(&block);
}
/**
* @ingroup kernel_memory_pool_tests
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
static void test_pool_block_get_wait(void)
{
int rv;
rv = k_mem_pool_alloc(&POOL_ID, &block_list[0], 3000, K_FOREVER);
zassert_equal(rv, 0, "k_mem_pool_alloc(3000) expected %d, got %d\n", 0, rv);
k_sem_give(&ALTERNATE_SEM); /* Wake alternate_task */
evidence = 0;
rv = k_mem_pool_alloc(&POOL_ID, &block_list[1], 128, K_FOREVER);
zassert_true(rv == 0, "k_mem_pool_alloc(128) expected %d, got %d\n", 0, rv);
switch (evidence) {
case 0:
zassert_true(evidence == 0, "k_mem_pool_alloc(128) did not block!");
case 1:
break;
case 2:
default:
zassert_true(1, "Rescheduling did not occur "
"after k_mem_pool_free()");
}
k_mem_pool_free(&block_list[1]);
}
/**
* @brief Alternate task in the test suite
*
* This routine runs at a lower priority than main thread.
*
* @return N/A
*/
void alternate_task(void)
{
k_sem_take(&ALTERNATE_SEM, K_FOREVER);
evidence = 1;
k_mem_pool_free(&block_list[0]);
evidence = 2;
}
/**
* @ingroup kernel_memory_pool_tests
* @brief Test the k_malloc() and k_free() APIs
*
* The heap memory pool is 256 bytes in size, and thus has only 4 blocks
* of 64 bytes or a single block of 256 bytes. (Each block has a lesser
* amount of usable space, due to the hidden block descriptor info the
* kernel adds at the start of any block allocated from this memory pool.)
*
* @see k_malloc(), k_free()
*/
static void test_pool_malloc(void)
{
char *block[4];
int j; /* loop counter */
/* allocate a large block (which consumes the entire pool buffer) */
block[0] = k_malloc(150);
zassert_not_null(block[0], "150 byte allocation failed");
/* return the large block */
k_free(block[0]);
/* allocate a small block (triggers block splitting)*/
block[0] = k_malloc(16);
zassert_not_null(block[0], "16 byte allocation 0 failed");
/* ensure all remaining small blocks can be allocated */
for (j = 1; j < 4; j++) {
block[j] = k_malloc(16);
zassert_not_null(block[j], "16 byte allocation %d failed\n", j);
}
/* return the small blocks to pool in a "random" order */
k_free(block[2]);
k_free(block[0]);
k_free(block[3]);
k_free(block[1]);
/* allocate large block (triggers autodefragmentation) */
block[0] = k_malloc(100);
zassert_not_null(block[0], "100 byte allocation failed");
/* ensure overflow detection is working */
zassert_is_null(k_malloc(0xffffffff), "overflow check failed");
zassert_is_null(k_calloc(0xffffffff, 2), "overflow check failed");
}
K_THREAD_DEFINE(t_alternate, STACKSIZE, alternate_task, NULL, NULL, NULL,
6, 0, 0);
K_THREAD_DEFINE(t_helper, STACKSIZE, helper_task, NULL, NULL, NULL,
7, 0, 0);
void test_main(void)
{
ztest_test_suite(mempool,
ztest_unit_test(test_pool_block_get),
ztest_1cpu_unit_test(test_pool_block_get_timeout),
ztest_unit_test(test_pool_block_get_wait),
ztest_unit_test(test_pool_malloc)
);
ztest_run_test_suite(mempool);
}

View file

@ -1,3 +0,0 @@
tests:
kernel.memory_pool:
tags: kernel mem_pool

View file

@ -1,13 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(mem_pool_api)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})
target_include_directories(app PRIVATE
${ZEPHYR_BASE}/kernel/include
${ZEPHYR_BASE}/arch/${ARCH}/include
)

View file

@ -1,3 +0,0 @@
CONFIG_ZTEST=y
CONFIG_IRQ_OFFLOAD=y
CONFIG_HEAP_MEM_POOL_SIZE=128

View file

@ -1,34 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief Memory Pool Tests
* @defgroup kernel_memory_pool_tests Memory Pool
* @ingroup all_tests
* @{
* @}
*/
#include <ztest.h>
extern void test_mpool_alloc_free_thread(void);
extern void test_mpool_alloc_free_isr(void);
extern void test_mpool_kdefine_extern(void);
extern void test_mpool_alloc_timeout(void);
extern void test_sys_heap_mem_pool_assign(void);
/*test case main entry*/
void test_main(void)
{
ztest_test_suite(mpool_api,
ztest_unit_test(test_mpool_alloc_free_thread),
ztest_unit_test(test_mpool_alloc_free_isr),
ztest_unit_test(test_mpool_kdefine_extern),
ztest_unit_test(test_mpool_alloc_timeout),
ztest_unit_test(test_sys_heap_mem_pool_assign)
);
ztest_run_test_suite(mpool_api);
}

View file

@ -1,20 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef __TEST_MPOOL_H__
#define __TEST_MPOOL_H__
#define TIMEOUT_MS 100
#define TIMEOUT K_MSEC(TIMEOUT_MS)
#define BLK_SIZE_MIN 16
#define BLK_SIZE_MAX 256
#define BLK_NUM_MIN 32
#define BLK_NUM_MAX 2
#define BLK_ALIGN BLK_SIZE_MIN
extern void tmpool_alloc_free(const void *data);
#endif /*__TEST_MPOOL_H__*/

View file

@ -1,151 +0,0 @@
/*
* Copyright (c) 2016, 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include <irq_offload.h>
#include <kernel_internal.h>
#include "test_mpool.h"
/** TESTPOINT: Statically define and initialize a memory pool*/
K_MEM_POOL_DEFINE(kmpool, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
void tmpool_alloc_free(const void *data)
{
ARG_UNUSED(data);
static struct k_mem_block block[BLK_NUM_MIN];
for (int i = 0; i < BLK_NUM_MIN; i++) {
/**
* TESTPOINT: This routine allocates a memory block from a
* memory pool.
*/
/**
* TESTPOINT: @retval 0 Memory allocated. The @a data field of
* the block descriptor is set to the starting address of the
* memory block.
*/
zassert_true(k_mem_pool_alloc(&kmpool, &block[i], BLK_SIZE_MIN,
K_NO_WAIT) == 0, NULL);
zassert_not_null(block[i].data, NULL);
}
for (int i = 0; i < BLK_NUM_MIN; i++) {
/**
* TESTPOINT: This routine releases a previously allocated
* memory block back to its memory pool.
*/
k_mem_pool_free(&block[i]);
block[i].data = NULL;
}
/**
* TESTPOINT: The memory pool's buffer contains @a n_max blocks that are
* @a max_size bytes long.
*/
for (int i = 0; i < BLK_NUM_MAX; i++) {
zassert_true(k_mem_pool_alloc(&kmpool, &block[i], BLK_SIZE_MAX,
K_NO_WAIT) == 0, NULL);
zassert_not_null(block[i].data, NULL);
}
for (int i = 0; i < BLK_NUM_MAX; i++) {
k_mem_pool_free(&block[i]);
block[i].data = NULL;
}
}
/*test cases*/
/**
* @ingroup kernel_memory_pool_tests
* @brief Verify alloc and free of different block sizes.
*
* @details The test is basically checking if allocation
* happens for MAX_SIZE and MIN_SIZE defined in memory pool.
*
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
void test_mpool_alloc_free_thread(void)
{
tmpool_alloc_free(NULL);
}
/**
* @ingroup kernel_memory_pool_tests
* @brief Test to validate alloc and free on IRQ context
*
* @details The test is run on IRQ context.
* The test checks allocation of MAX_SIZE and MIN_SIZE
* defined in memory pool.
*
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
void test_mpool_alloc_free_isr(void)
{
irq_offload(tmpool_alloc_free, NULL);
}
/**
* @see k_mem_pool_alloc(), k_mem_pool_free()
* @brief Verify memory pool allocation with timeouts
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
void test_mpool_alloc_timeout(void)
{
static struct k_mem_block block[2 * BLK_NUM_MIN], fblock;
int64_t tms;
int nb;
/* allocate all blocks */
for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
if (k_mem_pool_alloc(&kmpool, &block[nb], BLK_SIZE_MIN,
K_NO_WAIT) != 0) {
break;
}
}
zassert_true(nb >= BLK_NUM_MIN, NULL);
/** TESTPOINT: Use K_NO_WAIT to return without waiting*/
/** TESTPOINT: @retval -ENOMEM Returned without waiting*/
zassert_equal(k_mem_pool_alloc(&kmpool, &fblock, BLK_SIZE_MIN,
K_NO_WAIT), -ENOMEM, NULL);
/** TESTPOINT: @retval -EAGAIN Waiting period timed out*/
tms = k_uptime_get();
zassert_equal(k_mem_pool_alloc(&kmpool, &fblock, BLK_SIZE_MIN, TIMEOUT),
-EAGAIN, NULL);
/**
* TESTPOINT: Maximum time to wait for operation to complete (in
* milliseconds)
*/
zassert_true(k_uptime_delta(&tms) >= TIMEOUT_MS, NULL);
for (int i = 0; i < nb; i++) {
k_mem_pool_free(&block[i]);
block[i].data = NULL;
}
}
/**
* @brief Validate allocation and free from system heap memory pool
*
* @details System shall support assigning the common system heap
* as its memory pool, also the kernel shall support freeing memory drawn
* from a thread's resource pool.
*
* @see k_thread_system_pool_assign(), z_thread_malloc(), k_free()
*/
void test_sys_heap_mem_pool_assign(void)
{
void *ptr;
k_thread_system_pool_assign(k_current_get());
ptr = (char *)z_thread_malloc(BLK_SIZE_MIN/2);
zassert_not_null(ptr, "bytes allocation failed from system pool");
k_free(ptr);
zassert_is_null((char *)z_thread_malloc(BLK_SIZE_MAX * 2),
"overflow check failed");
}

View file

@ -1,24 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include "test_mpool.h"
extern struct k_mem_pool kmpool;
/**
* @brief Test exern define
*
* If the pool is to be accessed outside the module where it is
* defined, it can be declared via @code extern struct k_mem_pool <name>
* @endcode
*
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
void test_mpool_kdefine_extern(void)
{
tmpool_alloc_free(NULL);
}

View file

@ -1,3 +0,0 @@
tests:
kernel.memory_pool.api:
tags: kernel mem_pool

View file

@ -1,8 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(mem_pool_concept)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -1 +0,0 @@
CONFIG_ZTEST=y

View file

@ -1,16 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
extern void test_mpool_alloc_wait_prio(void);
/*test case main entry*/
void test_main(void)
{
ztest_test_suite(mpool_concept,
ztest_1cpu_unit_test(test_mpool_alloc_wait_prio));
ztest_run_test_suite(mpool_concept);
}

View file

@ -1,17 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#define TIMEOUT K_MSEC(2000)
#ifdef CONFIG_RISCV
#define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACKSIZE)
#else
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#endif
#define BLK_SIZE_MIN 16
#define BLK_SIZE_MAX 64
#define BLK_NUM_MIN 8
#define BLK_NUM_MAX 2
#define BLK_ALIGN BLK_SIZE_MIN

View file

@ -1,105 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include "test_mpool.h"
#define THREAD_NUM 3
K_MEM_POOL_DEFINE(mpool1, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREAD_NUM, STACK_SIZE);
static struct k_thread tdata[THREAD_NUM];
static struct k_sem sync_sema;
static struct k_mem_block block_ok;
/*thread entry*/
void tmpool_alloc_wait_timeout(void *p1, void *p2, void *p3)
{
struct k_mem_block block;
zassert_true(k_mem_pool_alloc(&mpool1, &block, BLK_SIZE_MIN,
TIMEOUT) == -EAGAIN, NULL);
k_sem_give(&sync_sema);
}
void tmpool_alloc_wait_ok(void *p1, void *p2, void *p3)
{
zassert_true(k_mem_pool_alloc(&mpool1, &block_ok, BLK_SIZE_MIN,
TIMEOUT) == 0, NULL);
k_sem_give(&sync_sema);
}
/*test cases*/
/**
* @brief Verify alloc and free with different prio threads
*
* @ingroup kernel_memory_pool_tests
*
* @details The test case allocates 3 blocks of 64 bytes,
* and spawns 3 threads with lowest priority T1 and other
* 2 threads, T2 and T3 of same but higher than T1 with
* delayed start 10ms and 20ms respectively. Then checks
* the behavior of allocation.
*
* @see k_mem_pool_alloc()
* @see k_mem_pool_free()
*/
void test_mpool_alloc_wait_prio(void)
{
struct k_mem_block block[2 * BLK_NUM_MIN];
k_tid_t tid[THREAD_NUM];
int nb;
k_sem_init(&sync_sema, 0, THREAD_NUM);
/*allocated up all blocks*/
for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
if (k_mem_pool_alloc(&mpool1, &block[nb], BLK_SIZE_MIN,
K_NO_WAIT) != 0) {
break;
}
}
zassert_true(nb >= BLK_NUM_MIN, "nb %d want %d", nb, BLK_NUM_MIN);
/**
* TESTPOINT: when a suitable memory block becomes available, it is
* given to the highest-priority thread that has waited the longest
*/
/**
* TESTPOINT: If a block of the desired size is unavailable, a thread
* can optionally wait for one to become available
*/
/*the low-priority thread*/
tid[0] = k_thread_create(&tdata[0], tstack[0], STACK_SIZE,
tmpool_alloc_wait_timeout, NULL, NULL, NULL,
K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
/*the highest-priority thread that has waited the longest*/
tid[1] = k_thread_create(&tdata[1], tstack[1], STACK_SIZE,
tmpool_alloc_wait_ok, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), 0, K_MSEC(10));
/*the highest-priority thread that has waited shorter*/
tid[2] = k_thread_create(&tdata[2], tstack[2], STACK_SIZE,
tmpool_alloc_wait_timeout, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), 0, K_MSEC(20));
/*relinquish CPU for above threads to start */
k_sleep(K_MSEC(30));
/*free one block, expected to unblock thread "tid[1]"*/
k_mem_pool_free(&block[0]);
/*wait for all threads exit*/
for (int i = 0; i < THREAD_NUM; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
/*test case tear down*/
for (int i = 0; i < THREAD_NUM; i++) {
k_thread_abort(tid[i]);
}
k_mem_pool_free(&block_ok);
for (int i = 1; i < nb; i++) {
k_mem_pool_free(&block[i]);
}
}

View file

@ -1,3 +0,0 @@
tests:
kernel.memory_pool.concept:
tags: kernel mem_pool

View file

@ -1,8 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(mem_pool_threadsafe)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -1,2 +0,0 @@
CONFIG_ZTEST=y
CONFIG_TIMESLICE_SIZE=1

View file

@ -1,95 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include <sys/atomic.h>
#define THREAD_NUM 4
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define POOL_NUM 2
#define LOOPS 10
#define TIMEOUT K_MSEC(200)
#define BLK_SIZE_MIN _MPOOL_MINBLK
#define BLK_SIZE_MAX (4 * BLK_SIZE_MIN)
#define BLK_NUM_MIN 8
#define BLK_NUM_MAX 2
#define BLK_ALIGN sizeof(void *)
K_MEM_POOL_DEFINE(mpool1, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
K_MEM_POOL_DEFINE(mpool2, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREAD_NUM, STACK_SIZE);
static struct k_thread tdata[THREAD_NUM];
static struct k_mem_pool *pools[POOL_NUM] = { &mpool1, &mpool2 };
static struct k_sem sync_sema;
static atomic_t pool_id;
/* thread entry simply invoke the APIs*/
static void tmpool_api(void *p1, void *p2, void *p3)
{
struct k_mem_block block[BLK_NUM_MIN];
int ret[BLK_NUM_MIN];
struct k_mem_pool *pool = pools[atomic_inc(&pool_id) % POOL_NUM];
(void)memset(block, 0, sizeof(block));
for (int loops = 0; loops < LOOPS; loops++) {
for (int i = 0; i < 4; i++) {
ret[i] = k_mem_pool_alloc(pool, &block[i],
BLK_SIZE_MIN, TIMEOUT);
}
ret[4] = k_mem_pool_alloc(pool, &block[4], BLK_SIZE_MAX,
TIMEOUT);
for (int i = 0; i < 5; i++) {
if (ret[i] == 0) {
k_mem_pool_free(&block[i]);
}
}
}
k_sem_give(&sync_sema);
}
/* test cases*/
/**
* @brief Test alloc and free on different priority threads
*
* @ingroup kernel_memory_pool_tests
*
* @details The test creates 4 threads of equal priority and
* invokes memory pool APIs on same memory domain. Checks for
* the synchronization of threads on the resource memory pool.
* Each thread allocates 4 blocks of size 4 bytes (all blocks
* in memory pool) with timeout of 200 ms and frees up all the
* blocks
*/
void test_mpool_threadsafe(void)
{
k_tid_t tid[THREAD_NUM];
k_sem_init(&sync_sema, 0, THREAD_NUM);
/* create multiple threads to invoke same memory pool APIs*/
for (int i = 0; i < THREAD_NUM; i++) {
tid[i] = k_thread_create(&tdata[i], tstack[i], STACK_SIZE,
tmpool_api, NULL, NULL, NULL,
K_PRIO_PREEMPT(1), 0, K_NO_WAIT);
}
/* TESTPOINT: all threads complete and exit the entry function*/
for (int i = 0; i < THREAD_NUM; i++) {
k_sem_take(&sync_sema, K_FOREVER);
}
/* test case tear down*/
for (int i = 0; i < THREAD_NUM; i++) {
k_thread_abort(tid[i]);
}
}
void test_main(void)
{
ztest_test_suite(mpool_threadsafe,
ztest_unit_test(test_mpool_threadsafe));
ztest_run_test_suite(mpool_threadsafe);
}

View file

@ -1,3 +0,0 @@
tests:
kernel.memory_pool.threadsafe:
tags: kernel mem_pool