kernel: Make the "heap" backend to mem_pool default

Remove the MEM_POOL_HEAP_BACKEND kconfig, treating it as true always.
Now the legacy mem_pool cannot be enabled and all usage uses the
k_heap/sys_heap backend.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-09-22 12:20:48 -07:00 committed by Anas Nashif
commit 8a6aee9cac
21 changed files with 6 additions and 560 deletions

View file

@ -4546,7 +4546,7 @@ void k_heap_free(struct k_heap *h, void *mem);
* If the pool is to be accessed outside the module where it is defined, it
* can be declared via
*
* @note When @option{CONFIG_MEM_POOL_HEAP_BACKEND} is enabled, the k_mem_pool
* @note The k_mem_pool
* API is implemented on top of a k_heap, which is a more general
* purpose allocator which does not make the same promises about
* splitting or alignment detailed above. Blocks will be aligned only

View file

@ -27,11 +27,7 @@
#include <sys/util.h>
#include <sys/mempool_base.h>
#include <kernel_structs.h>
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
#include <mempool_heap.h>
#else
#include <mempool_sys.h>
#endif
#include <kernel_version.h>
#include <syscall.h>
#include <sys/printk.h>

View file

@ -1,55 +0,0 @@
/*
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_MEMPOOL_SYS_H_
/**
* @defgroup mem_pool_apis Memory Pool APIs
* @ingroup kernel_apis
* @{
*/
/* Note on sizing: the use of a 20 bit field for block means that,
* assuming a reasonable minimum block size of 16 bytes, we're limited
* to 16M of memory managed by a single pool. Long term it would be
* good to move to a variable bit size based on configuration.
*/
struct k_mem_block_id {
uint32_t pool : 8;
uint32_t level : 4;
uint32_t block : 20;
};
struct k_mem_block {
void *data;
struct k_mem_block_id id;
};
/** @} */
struct k_mem_pool {
struct sys_mem_pool_base base;
_wait_q_t wait_q;
};
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
struct sys_mem_pool_lvl \
_mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
.base = { \
.buf = _mpool_buf_##name, \
.max_sz = WB_UP(maxsz), \
.n_max = nmax, \
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
.levels = _mpool_lvls_##name, \
.flags = SYS_MEM_POOL_KERNEL \
} \
}; \
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK)
#endif /* ZEPHYR_INCLUDE_MEMPOOL_SYS_H_ */

View file

@ -52,11 +52,6 @@ target_sources_ifdef(CONFIG_POLL kernel PRIVATE poll.c)
if(${CONFIG_KERNEL_MEM_POOL})
target_sources(kernel PRIVATE mempool.c)
if(${CONFIG_MEM_POOL_HEAP_BACKEND})
else()
target_sources(kernel PRIVATE mempool_sys.c)
endif()
endif()
if(NOT CONFIG_MULTITHREADING)

View file

@ -502,16 +502,6 @@ config KERNEL_MEM_POOL
if KERNEL_MEM_POOL
config MEM_POOL_HEAP_BACKEND
bool "Use k_heap as the backend for k_mem_pool"
default y
help
This selects a backend implementation for k_mem_pool based
on the sys_heap abstraction instead of the legacy
sys_mem_pool. This backend has significantly better
performance and memory utilization for general purpose
workloads.
config HEAP_MEM_POOL_SIZE
int "Heap memory pool size (in bytes)"
default 0 if !POSIX_MQUEUE

View file

@ -64,7 +64,6 @@ void k_heap_free(struct k_heap *h, void *mem)
}
}
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
/* Compatibility layer for legacy k_mem_pool code on top of a k_heap
* backend.
*/
@ -89,5 +88,3 @@ void k_mem_pool_free_id(struct k_mem_block_id *id)
{
k_heap_free(id->heap, id->data);
}
#endif /* CONFIG_MEM_POOL_HEAP_BACKEND */

View file

@ -1,110 +0,0 @@
/*
* Copyright (c) 2017, 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <init.h>
static struct k_spinlock lock;
static struct k_mem_pool *get_pool(int id)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return &_k_mem_pool_list_start[id];
}
static int pool_id(struct k_mem_pool *pool)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return pool - &_k_mem_pool_list_start[0];
}
static void k_mem_pool_init(struct k_mem_pool *p)
{
z_waitq_init(&p->wait_q);
z_sys_mem_pool_base_init(&p->base);
}
int init_static_pools(const struct device *unused)
{
ARG_UNUSED(unused);
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
k_mem_pool_init(p);
}
return 0;
}
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, k_timeout_t timeout)
{
int ret;
uint64_t end = 0;
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
end = z_timeout_end_calc(timeout);
while (true) {
uint32_t level_num, block_num;
ret = z_sys_mem_pool_block_alloc(&p->base, size,
&level_num, &block_num,
&block->data);
block->id.pool = pool_id(p);
block->id.level = level_num;
block->id.block = block_num;
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
ret != -ENOMEM) {
return ret;
}
z_pend_curr_unlocked(&p->wait_q, timeout);
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
int64_t remaining = end - z_tick_get();
if (remaining <= 0) {
break;
}
timeout = Z_TIMEOUT_TICKS(remaining);
}
}
return -EAGAIN;
}
void k_mem_pool_free_id(struct k_mem_block_id *id)
{
int need_sched = 0;
struct k_mem_pool *p = get_pool(id->pool);
z_sys_mem_pool_block_free(&p->base, id->level, id->block);
/* Wake up anyone blocked on this pool and let them repeat
* their allocation attempts
*
* (Note that this spinlock only exists because z_unpend_all()
* is unsynchronized. Maybe we want to put the lock into the
* wait_q instead and make the API safe?)
*/
k_spinlock_key_t key = k_spin_lock(&lock);
need_sched = z_unpend_all(&p->wait_q);
if (need_sched != 0) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
}

View file

@ -28,7 +28,6 @@ void test_main(void)
ztest_unit_test(test_mheap_malloc_free),
ztest_unit_test(test_mheap_calloc),
ztest_unit_test(test_mheap_malloc_align4),
ztest_unit_test(test_mheap_min_block_size),
ztest_unit_test(test_mheap_block_desc),
ztest_unit_test(test_mheap_block_release));
ztest_run_test_suite(mheap_api);

View file

@ -45,57 +45,6 @@ void test_mheap_malloc_align4(void)
}
}
/**
* @brief The test case to ensure heap minimum block size is 64 bytes.
*
* @ingroup kernel_heap_tests
*
* @see k_malloc(), k_free()
*
* @details Heap pool's minimum block size is 64 bytes. The test case tries
* to ensure it by allocating blocks lesser than minimum block size.
* The test allocates 8 blocks of size 0. The algorithm has to allocate 64
* bytes of blocks, this is ensured by allocating one more block of max size
* which results in failure. Finally all the blocks are freed and added back
* to heap memory pool.
*/
void test_mheap_min_block_size(void)
{
void *block[BLK_NUM_MAX], *block_fail;
/* The k_heap backend doesn't have the splitting behavior
* expected here, this test is too specific, and a more
* general version of the same test is available in
* test_mheap_malloc_free()
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/**
* TESTPOINT: The heap memory pool also defines a minimum block
* size of 64 bytes.
* Test steps:
* initial memory heap status (F for free, U for used):
* 64F, 64F, 64F, 64F
* 1. request 4 blocks: each 0-byte plus 16-byte block desc,
* indeed 64-byte allocated
* 2. verify no more free blocks, any further allocation failed
*/
for (int i = 0; i < BLK_NUM_MAX; i++) {
block[i] = k_malloc(TEST_SIZE_0);
zassert_not_null(block[i], NULL);
}
/* verify no more free blocks available*/
block_fail = k_malloc(BLK_SIZE_MIN);
zassert_is_null(block_fail, NULL);
/* test case tear down*/
for (int i = 0; i < BLK_NUM_MAX; i++) {
k_free(block[i]);
}
}
/**
* @brief Verify if the block descriptor is included
* in every block which is allocated

View file

@ -34,12 +34,10 @@
* depends on words size. So we keep separate tables for different
* configs.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
# ifdef CONFIG_64BIT
# define HEAP64
# else
# define HEAP32
# endif
#ifdef CONFIG_64BIT
# define HEAP64
#else
# define HEAP32
#endif
/* size of stack area used by each thread */
@ -317,12 +315,6 @@ void alternate_task(void)
* amount of usable space, due to the hidden block descriptor info the
* kernel adds at the start of any block allocated from this memory pool.)
*
* NOTE: when CONFIG_MEM_POOL_HEAP_BACKEND is in use, the splitting
* algorithm being exercised by this test is not used. In fact the
* k_heap backend is significantly more fragmentation resistant, so
* calls expected to fail here actually succeed. These are disabled
* here.
*
* @see k_malloc(), k_free()
*/
static void test_pool_malloc(void)
@ -334,12 +326,6 @@ static void test_pool_malloc(void)
block[0] = k_malloc(150);
zassert_not_null(block[0], "150 byte allocation failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */
block[1] = k_malloc(16);
zassert_is_null(block[1], "16 byte allocation did not fail");
#endif
/* return the large block */
k_free(block[0]);
@ -347,23 +333,12 @@ static void test_pool_malloc(void)
block[0] = k_malloc(16);
zassert_not_null(block[0], "16 byte allocation 0 failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a large block can no longer be allocated */
block[1] = k_malloc(80);
zassert_is_null(block[1], "80 byte allocation did not fail");
#endif
/* ensure all remaining small blocks can be allocated */
for (j = 1; j < 4; j++) {
block[j] = k_malloc(16);
zassert_not_null(block[j], "16 byte allocation %d failed\n", j);
}
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */
zassert_is_null(k_malloc(8), "8 byte allocation did not fail");
#endif
/* return the small blocks to pool in a "random" order */
k_free(block[2]);
k_free(block[0]);
@ -374,12 +349,6 @@ static void test_pool_malloc(void)
block[0] = k_malloc(100);
zassert_not_null(block[0], "100 byte allocation failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */
zassert_is_null(k_malloc(32), "32 byte allocation did not fail");
#endif
/* ensure overflow detection is working */
zassert_is_null(k_malloc(0xffffffff), "overflow check failed");
zassert_is_null(k_calloc(0xffffffff, 2), "overflow check failed");

View file

@ -1,8 +1,3 @@
tests:
kernel.memory_pool:
tags: kernel mem_pool
kernel.memory_pool.legacy:
min_ram: 32
tags: kernel mem_pool
extra_configs:
- CONFIG_MEM_POOL_HEAP_BACKEND=n

View file

@ -17,7 +17,6 @@
extern void test_mpool_alloc_free_thread(void);
extern void test_mpool_alloc_free_isr(void);
extern void test_mpool_kdefine_extern(void);
extern void test_mpool_alloc_size(void);
extern void test_mpool_alloc_timeout(void);
extern void test_sys_heap_mem_pool_assign(void);
@ -28,7 +27,6 @@ void test_main(void)
ztest_unit_test(test_mpool_alloc_free_thread),
ztest_unit_test(test_mpool_alloc_free_isr),
ztest_unit_test(test_mpool_kdefine_extern),
ztest_unit_test(test_mpool_alloc_size),
ztest_unit_test(test_mpool_alloc_timeout),
ztest_unit_test(test_sys_heap_mem_pool_assign)
);

View file

@ -87,64 +87,6 @@ void test_mpool_alloc_free_isr(void)
irq_offload(tmpool_alloc_free, NULL);
}
/**
* @ingroup kernel_memory_pool_tests
* @brief Validates breaking a block into quarters feature
*
* @details The test case validates how a mem_pool provides
* functionality to break a block into quarters and repeatedly
* allocate and free the blocks.
* @see k_mem_pool_alloc(), k_mem_pool_free()
*/
void test_mpool_alloc_size(void)
{
static struct k_mem_block block[BLK_NUM_MIN];
size_t size = BLK_SIZE_MAX;
int i = 0;
/* The sys_heap backend doesn't use the specific block
* breaking algorithm tested here. This is a test of the
* legacy sys_mem_pool allocator only.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/**TESTPOINT: The memory pool allows blocks to be repeatedly partitioned
* into quarters, down to blocks of @a min_size bytes long.
*/
while (size >= BLK_SIZE_MIN) {
zassert_true(k_mem_pool_alloc(&kmpool, &block[i], size,
K_NO_WAIT) == 0, NULL);
zassert_not_null(block[i].data, NULL);
zassert_true((uintptr_t)(block[i].data) % BLK_ALIGN == 0, NULL);
i++;
size = size >> 2;
}
while (i--) {
k_mem_pool_free(&block[i]);
block[i].data = NULL;
}
i = 0;
size = BLK_SIZE_MIN;
/**TESTPOINT: To ensure that all blocks in the buffer are similarly
* aligned to this boundary, min_size must also be a multiple of align.
*/
while (size <= BLK_SIZE_MAX) {
zassert_true(k_mem_pool_alloc(&kmpool, &block[i], size,
K_NO_WAIT) == 0, NULL);
zassert_not_null(block[i].data, NULL);
zassert_true((uintptr_t)(block[i].data) % BLK_ALIGN == 0, NULL);
i++;
size = size << 2;
}
while (i--) {
k_mem_pool_free(&block[i]);
block[i].data = NULL;
}
}
/**
* @see k_mem_pool_alloc(), k_mem_pool_free()
* @brief Verify memory pool allocation with timeouts
@ -164,16 +106,7 @@ void test_mpool_alloc_timeout(void)
}
}
/* The original mem_pool would always be able to allocate
* exactly "min blocks" before running out of space, the
* heuristics used to size the sys_heap backend are more
* flexible.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
zassert_true(nb >= BLK_NUM_MIN, NULL);
#else
zassert_true(nb == BLK_NUM_MIN, NULL);
#endif
/** TESTPOINT: Use K_NO_WAIT to return without waiting*/
/** TESTPOINT: @retval -ENOMEM Returned without waiting*/

View file

@ -1,7 +1,3 @@
tests:
kernel.memory_pool.api:
tags: kernel mem_pool
kernel.memory_pool.api.legacy:
tags: kernel mem_pool
extra_configs:
- CONFIG_MEM_POOL_HEAP_BACKEND=n

View file

@ -6,17 +6,11 @@
#include <ztest.h>
extern void test_mpool_alloc_wait_prio(void);
extern void test_mpool_alloc_size_roundup(void);
extern void test_mpool_alloc_merge_failed_diff_size(void);
extern void test_mpool_alloc_merge_failed_diff_parent(void);
/*test case main entry*/
void test_main(void)
{
ztest_test_suite(mpool_concept,
ztest_1cpu_unit_test(test_mpool_alloc_wait_prio),
ztest_unit_test(test_mpool_alloc_size_roundup),
ztest_unit_test(test_mpool_alloc_merge_failed_diff_size),
ztest_unit_test(test_mpool_alloc_merge_failed_diff_parent));
ztest_1cpu_unit_test(test_mpool_alloc_wait_prio));
ztest_run_test_suite(mpool_concept);
}

View file

@ -1,54 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include "test_mpool.h"
#define TEST_SIZE ((BLK_SIZE_MAX >> 2) + 1)
extern struct k_mem_pool mpool1;
/*test cases*/
/**
* @brief Test alloc and free of different blocks sizes
*
* @ingroup kernel_memory_pool_tests
*
* @details The test demonstrates how the request is handled
* to allocate the minimum available size block in memory pool
* to satisfy the requirement of the application.
*/
void test_mpool_alloc_size_roundup(void)
{
struct k_mem_block block[BLK_NUM_MAX], block_fail;
/* This test is written to assume specific heap layout, in
* fact the sys_heap backend can routinely see more than "min
* blocks" allocated.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/**
* TESTPOINT: When an application issues a request for a memory block,
* the memory pool first determines the size of the smallest block that
* will satisfy the request
*/
for (int i = 0; i < BLK_NUM_MAX; i++) {
/*request a size for the mpool to round up to "BLK_SIZE_MAX"*/
zassert_true(k_mem_pool_alloc(&mpool1, &block[i], TEST_SIZE,
K_NO_WAIT) == 0, NULL);
}
/*verify consequently no more blocks available*/
zassert_true(k_mem_pool_alloc(&mpool1, &block_fail, BLK_SIZE_MIN,
K_NO_WAIT) == -ENOMEM, NULL);
/*test case tear down*/
for (int i = 0; i < BLK_NUM_MAX; i++) {
k_mem_pool_free(&block[i]);
}
}

View file

@ -63,17 +63,7 @@ void test_mpool_alloc_wait_prio(void)
}
}
/* The original mem_pool would always be able to allocate
* exactly "min blocks" before running out of space, the
* heuristics used to size the sys_heap backend are more
* flexible.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
zassert_true(nb >= BLK_NUM_MIN, "nb %d want %d", nb, BLK_NUM_MIN);
#else
zassert_true(nb == BLK_NUM_MIN, NULL);
#endif
/**
* TESTPOINT: when a suitable memory block becomes available, it is

View file

@ -1,57 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#include "test_mpool.h"
extern struct k_mem_pool mpool1;
/*test cases*/
/**
* @brief Test to verify merge of blocks of different quad-block
*
* @ingroup kernel_memory_pool_tests
*
* @details TESTPOINT: The algo cannot merge adjacent free blocks
* of the same size if they belong to different parent quad-blocks
* Test steps: 1. allocate block [0~7] in minimum block size
* 2. free block [2~5], belong to diff parental quad-blocks
* 3. request a big block
* verify blocks [2, 3] and blocks [4, 5] can't be merged
* 4. tear down, free blocks [0, 1, 6, 7]
*/
void test_mpool_alloc_merge_failed_diff_parent(void)
{
/* The heap backend doesn't use the splitting mechanism tested
* here, and in fact is significantly more fragmentation
* resistant and succeeds at the "failed" allocation desired
* below.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
struct k_mem_block block[BLK_NUM_MIN], block_fail;
for (int i = 0; i < BLK_NUM_MIN; i++) {
/* 1. allocated up all blocks*/
zassert_true(k_mem_pool_alloc(&mpool1, &block[i], BLK_SIZE_MIN,
K_NO_WAIT) == 0, NULL);
}
/* 2. free adjacent blocks belong to different parent quad-blocks*/
for (int i = 2; i < 6; i++) {
k_mem_pool_free(&block[i]);
}
/* 3. request a big block, expected failed to merge*/
zassert_true(k_mem_pool_alloc(&mpool1, &block_fail, BLK_SIZE_MAX,
TIMEOUT) == -EAGAIN, NULL);
/* 4. test case tear down*/
k_mem_pool_free(&block[0]);
k_mem_pool_free(&block[1]);
k_mem_pool_free(&block[6]);
k_mem_pool_free(&block[7]);
}

View file

@ -1,71 +0,0 @@
/*
* Copyright (c) 2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <ztest.h>
#define TIMEOUT K_MSEC(2000)
#define BLK_SIZE_MIN 16
#define BLK_SIZE_MID 32
#define BLK_SIZE_MAX 256
#define BLK_NUM_MIN 32
#define BLK_NUM_MAX 2
#define BLK_ALIGN BLK_SIZE_MIN
K_MEM_POOL_DEFINE(mpool3, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
/*test cases*/
/**
* @brief Verify blocks of different sizes cannot be merged.
*
* @ingroup kernel_memory_pool_tests
*
* @details The merging algorithm cannot combine adjacent free blocks
* of different sizes
* Test steps: 1. allocate 14 blocks in different sizes
* 2. free block [2~8], in different sizes
* 3. request a big block verify blocks [2~8] can't be merged
* 4. tear down, free blocks [0, 1, 9~13]
*/
void test_mpool_alloc_merge_failed_diff_size(void)
{
/* The heap backend doesn't use the splitting mechanism tested
* here, and in fact is significantly more fragmentation
* resistant and succeeds at the "failed" allocation desired
* below.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
struct k_mem_block block[BLK_NUM_MIN], block_fail;
size_t block_size[] = {
BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,
BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID,
BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,
BLK_SIZE_MID, BLK_SIZE_MID, BLK_SIZE_MID
};
int block_count = ARRAY_SIZE(block_size);
for (int i = 0; i < block_count; i++) {
/* 1. allocate blocks in different sizes*/
zassert_true(k_mem_pool_alloc(&mpool3, &block[i], block_size[i],
K_NO_WAIT) == 0, NULL);
}
/* 2. free block [2~8], in different sizes*/
for (int i = 2; i < 9; i++) {
k_mem_pool_free(&block[i]);
}
/* 3. request a big block, expected failed to merge*/
zassert_true(k_mem_pool_alloc(&mpool3, &block_fail, BLK_SIZE_MAX,
TIMEOUT) == -EAGAIN, NULL);
/* 4. test case tear down*/
k_mem_pool_free(&block[0]);
k_mem_pool_free(&block[1]);
for (int i = 9; i < block_count; i++) {
k_mem_pool_free(&block[i]);
}
}

View file

@ -1,7 +1,3 @@
tests:
kernel.memory_pool.concept:
tags: kernel mem_pool
kernel.memory_pool.concept.legacy:
tags: kernel mem_pool
extra_configs:
- CONFIG_MEM_POOL_HEAP_BACKEND=n

View file

@ -1,7 +1,3 @@
tests:
kernel.memory_pool.threadsafe:
tags: kernel mem_pool
kernel.memory_pool.threadsafe.legacy:
tags: kernel mem_pool
extra_configs:
- CONFIG_MEM_POOL_HEAP_BACKEND=n