tests/kernel/mem_pool: Adjust tests to work with k_heap backend

The original k_mem_pool tests were a mix of code that tests routine
allocator behavior, the synchronization layer above that, and a
significant amount of code that made low-level assumptions about the
specific memory layout of the original allocator, which doesn't run
out of memory in exactly the same way.

Adjust the expectations as needed for the backend.  A few test cases
were skipped if they were too specific.  Most have been generalized
(for example, iteratively allocating to use up all memory instead of
assuming that it will be empty after N allocations).

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-04-03 16:49:04 -07:00 committed by Andrew Boie
commit e582bc128a
6 changed files with 124 additions and 10 deletions

View file

@ -27,6 +27,21 @@
#define NUM_BLOCKS 64 #define NUM_BLOCKS 64
/* This test is written to a set of known allocation patterns and
* their results, making free assumptions about the fragmentation
* behavior of the original mem_pool implementation. The newer k_heap
* backend is more flexible, and also has allocation behavior that
* depends on words size. So we keep separate tables for different
* configs.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
# ifdef CONFIG_64BIT
# define HEAP64
# else
# define HEAP32
# endif
#endif
/* size of stack area used by each thread */ /* size of stack area used by each thread */
#define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE) #define STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
@ -63,7 +78,11 @@ static struct TEST_CASE get_set[] = {
{ &block_list[5], &POOL_ID, 256, 0, 0 }, { &block_list[5], &POOL_ID, 256, 0, 0 },
{ &block_list[6], &POOL_ID, 512, 0, 0 }, { &block_list[6], &POOL_ID, 512, 0, 0 },
{ &block_list[7], &POOL_ID, 1024, 0, 0 }, { &block_list[7], &POOL_ID, 1024, 0, 0 },
#if defined(HEAP32) || defined(HEAP64)
{ &block_list[8], &POOL_ID, 2048, 0, 0 },
#else
{ &block_list[8], &POOL_ID, 2048, 0, -ENOMEM }, { &block_list[8], &POOL_ID, 2048, 0, -ENOMEM },
#endif
{ &block_list[9], &POOL_ID, 4096, 0, -ENOMEM } { &block_list[9], &POOL_ID, 4096, 0, -ENOMEM }
}; };
@ -71,16 +90,32 @@ static struct TEST_CASE get_set2[] = {
{ &block_list[0], &POOL_ID, 4096, 0, 0 }, { &block_list[0], &POOL_ID, 4096, 0, 0 },
{ &block_list[1], &POOL_ID, 2048, 0, -ENOMEM }, { &block_list[1], &POOL_ID, 2048, 0, -ENOMEM },
{ &block_list[2], &POOL_ID, 1024, 0, -ENOMEM }, { &block_list[2], &POOL_ID, 1024, 0, -ENOMEM },
#if defined(HEAP32)
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
{ &block_list[4], &POOL_ID, 256, 0, 0 }
#elif defined(HEAP64)
{ &block_list[3], &POOL_ID, 512, 0, 0 },
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
#else
{ &block_list[3], &POOL_ID, 512, 0, -ENOMEM }, { &block_list[3], &POOL_ID, 512, 0, -ENOMEM },
{ &block_list[4], &POOL_ID, 256, 0, -ENOMEM } { &block_list[4], &POOL_ID, 256, 0, -ENOMEM }
#endif
}; };
static struct TEST_CASE getwt_set[] = { static struct TEST_CASE getwt_set[] = {
{ &block_list[0], &POOL_ID, 4096, TENTH_SECOND, 0 }, { &block_list[0], &POOL_ID, 4096, TENTH_SECOND, 0 },
{ &block_list[1], &POOL_ID, 2048, TENTH_SECOND, -EAGAIN }, { &block_list[1], &POOL_ID, 2048, TENTH_SECOND, -EAGAIN },
{ &block_list[2], &POOL_ID, 1024, TENTH_SECOND, -EAGAIN }, { &block_list[2], &POOL_ID, 1024, TENTH_SECOND, -EAGAIN },
#if defined(HEAP32)
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, 0 }
#elif defined(HEAP64)
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, 0 },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
#else
{ &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN }, { &block_list[3], &POOL_ID, 512, TENTH_SECOND, -EAGAIN },
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN } { &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
#endif
}; };
@ -282,6 +317,11 @@ void alternate_task(void)
* amount of usable space, due to the hidden block descriptor info the * amount of usable space, due to the hidden block descriptor info the
* kernel adds at the start of any block allocated from this memory pool.) * kernel adds at the start of any block allocated from this memory pool.)
* *
* NOTE: when CONFIG_MEM_POOL_HEAP_BACKEND is in use, the splitting
* algorithm being exercised by this test is not used. In fact the
* k_heap backend is significantly more fragmentation resistant, so
* calls expected to fail here actually succeed. These are disabled
* here.
* *
* @see k_malloc(), k_free() * @see k_malloc(), k_free()
*/ */
@ -294,9 +334,11 @@ static void test_pool_malloc(void)
block[0] = k_malloc(150); block[0] = k_malloc(150);
zassert_not_null(block[0], "150 byte allocation failed"); zassert_not_null(block[0], "150 byte allocation failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */ /* ensure a small block can no longer be allocated */
block[1] = k_malloc(16); block[1] = k_malloc(16);
zassert_is_null(block[1], "16 byte allocation did not fail"); zassert_is_null(block[1], "16 byte allocation did not fail");
#endif
/* return the large block */ /* return the large block */
k_free(block[0]); k_free(block[0]);
@ -305,9 +347,11 @@ static void test_pool_malloc(void)
block[0] = k_malloc(16); block[0] = k_malloc(16);
zassert_not_null(block[0], "16 byte allocation 0 failed"); zassert_not_null(block[0], "16 byte allocation 0 failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a large block can no longer be allocated */ /* ensure a large block can no longer be allocated */
block[1] = k_malloc(80); block[1] = k_malloc(80);
zassert_is_null(block[1], "80 byte allocation did not fail"); zassert_is_null(block[1], "80 byte allocation did not fail");
#endif
/* ensure all remaining small blocks can be allocated */ /* ensure all remaining small blocks can be allocated */
for (j = 1; j < 4; j++) { for (j = 1; j < 4; j++) {
@ -315,8 +359,10 @@ static void test_pool_malloc(void)
zassert_not_null(block[j], "16 byte allocation %d failed\n", j); zassert_not_null(block[j], "16 byte allocation %d failed\n", j);
} }
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */ /* ensure a small block can no longer be allocated */
zassert_is_null(k_malloc(8), "8 byte allocation did not fail"); zassert_is_null(k_malloc(8), "8 byte allocation did not fail");
#endif
/* return the small blocks to pool in a "random" order */ /* return the small blocks to pool in a "random" order */
k_free(block[2]); k_free(block[2]);
@ -328,8 +374,11 @@ static void test_pool_malloc(void)
block[0] = k_malloc(100); block[0] = k_malloc(100);
zassert_not_null(block[0], "100 byte allocation failed"); zassert_not_null(block[0], "100 byte allocation failed");
#ifndef CONFIG_MEM_POOL_HEAP_BACKEND
/* ensure a small block can no longer be allocated */ /* ensure a small block can no longer be allocated */
zassert_is_null(k_malloc(32), "32 byte allocation did not fail"); zassert_is_null(k_malloc(32), "32 byte allocation did not fail");
#endif
/* ensure overflow detection is working */ /* ensure overflow detection is working */
zassert_is_null(k_malloc(0xffffffff), "overflow check failed"); zassert_is_null(k_malloc(0xffffffff), "overflow check failed");

View file

@ -102,6 +102,14 @@ void test_mpool_alloc_size(void)
size_t size = BLK_SIZE_MAX; size_t size = BLK_SIZE_MAX;
int i = 0; int i = 0;
/* The sys_heap backend doesn't use the specific block
* breaking algorithm tested here. This is a test of the
* legacy sys_mem_pool allocator only.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/**TESTPOINT: The memory pool allows blocks to be repeatedly partitioned /**TESTPOINT: The memory pool allows blocks to be repeatedly partitioned
* into quarters, down to blocks of @a min_size bytes long. * into quarters, down to blocks of @a min_size bytes long.
*/ */
@ -144,14 +152,29 @@ void test_mpool_alloc_size(void)
*/ */
void test_mpool_alloc_timeout(void) void test_mpool_alloc_timeout(void)
{ {
static struct k_mem_block block[BLK_NUM_MIN], fblock; static struct k_mem_block block[2 * BLK_NUM_MIN], fblock;
s64_t tms; s64_t tms;
int nb;
for (int i = 0; i < BLK_NUM_MIN; i++) { /* allocate all blocks */
zassert_equal(k_mem_pool_alloc(&kmpool, &block[i], BLK_SIZE_MIN, for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
K_NO_WAIT), 0, NULL); if (k_mem_pool_alloc(&kmpool, &block[nb], BLK_SIZE_MIN,
K_NO_WAIT) != 0) {
break;
}
} }
/* The original mem_pool would always be able to allocate
* exactly "min blocks" before running out of space, the
* heuristics used to size the sys_heap backend are more
* flexible.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
zassert_true(nb >= BLK_NUM_MIN, NULL);
#else
zassert_true(nb == BLK_NUM_MIN, NULL);
#endif
/** TESTPOINT: Use K_NO_WAIT to return without waiting*/ /** TESTPOINT: Use K_NO_WAIT to return without waiting*/
/** TESTPOINT: @retval -ENOMEM Returned without waiting*/ /** TESTPOINT: @retval -ENOMEM Returned without waiting*/
zassert_equal(k_mem_pool_alloc(&kmpool, &fblock, BLK_SIZE_MIN, zassert_equal(k_mem_pool_alloc(&kmpool, &fblock, BLK_SIZE_MIN,
@ -166,7 +189,7 @@ void test_mpool_alloc_timeout(void)
*/ */
zassert_true(k_uptime_delta(&tms) >= TIMEOUT_MS, NULL); zassert_true(k_uptime_delta(&tms) >= TIMEOUT_MS, NULL);
for (int i = 0; i < BLK_NUM_MIN; i++) { for (int i = 0; i < nb; i++) {
k_mem_pool_free(&block[i]); k_mem_pool_free(&block[i]);
block[i].data = NULL; block[i].data = NULL;
} }

View file

@ -25,6 +25,14 @@ void test_mpool_alloc_size_roundup(void)
{ {
struct k_mem_block block[BLK_NUM_MAX], block_fail; struct k_mem_block block[BLK_NUM_MAX], block_fail;
/* This test is written to assume specific heap layout, in
* fact the sys_heap backend can routinely see more than "min
* blocks" allocated.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/** /**
* TESTPOINT: When an application issues a request for a memory block, * TESTPOINT: When an application issues a request for a memory block,
* the memory pool first determines the size of the smallest block that * the memory pool first determines the size of the smallest block that

View file

@ -49,16 +49,32 @@ void tmpool_alloc_wait_ok(void *p1, void *p2, void *p3)
*/ */
void test_mpool_alloc_wait_prio(void) void test_mpool_alloc_wait_prio(void)
{ {
struct k_mem_block block[BLK_NUM_MIN]; struct k_mem_block block[2 * BLK_NUM_MIN];
k_tid_t tid[THREAD_NUM]; k_tid_t tid[THREAD_NUM];
int nb;
k_sem_init(&sync_sema, 0, THREAD_NUM); k_sem_init(&sync_sema, 0, THREAD_NUM);
/*allocated up all blocks*/ /*allocated up all blocks*/
for (int i = 0; i < BLK_NUM_MIN; i++) { for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
zassert_true(k_mem_pool_alloc(&mpool1, &block[i], BLK_SIZE_MIN, if (k_mem_pool_alloc(&mpool1, &block[nb], BLK_SIZE_MIN,
K_NO_WAIT) == 0, NULL); K_NO_WAIT) != 0) {
break;
}
} }
/* The original mem_pool would always be able to allocate
* exactly "min blocks" before running out of space, the
* heuristics used to size the sys_heap backend are more
* flexible.
*/
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
zassert_true(nb >= BLK_NUM_MIN, "nb %d want %d", nb, BLK_NUM_MIN);
#else
zassert_true(nb == BLK_NUM_MIN, NULL);
#endif
/** /**
* TESTPOINT: when a suitable memory block becomes available, it is * TESTPOINT: when a suitable memory block becomes available, it is
* given to the highest-priority thread that has waited the longest * given to the highest-priority thread that has waited the longest
@ -93,7 +109,7 @@ void test_mpool_alloc_wait_prio(void)
k_thread_abort(tid[i]); k_thread_abort(tid[i]);
} }
k_mem_pool_free(&block_ok); k_mem_pool_free(&block_ok);
for (int i = 1; i < BLK_NUM_MIN; i++) { for (int i = 1; i < nb; i++) {
k_mem_pool_free(&block[i]); k_mem_pool_free(&block[i]);
} }
} }

View file

@ -25,6 +25,15 @@ extern struct k_mem_pool mpool1;
*/ */
void test_mpool_alloc_merge_failed_diff_parent(void) void test_mpool_alloc_merge_failed_diff_parent(void)
{ {
/* The heap backend doesn't use the splitting mechanism tested
* here, and in fact is significantly more fragmentation
* resistant and succeeds at the "failed" allocation desired
* below.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
struct k_mem_block block[BLK_NUM_MIN], block_fail; struct k_mem_block block[BLK_NUM_MIN], block_fail;
for (int i = 0; i < BLK_NUM_MIN; i++) { for (int i = 0; i < BLK_NUM_MIN; i++) {

View file

@ -30,6 +30,15 @@ K_MEM_POOL_DEFINE(mpool3, BLK_SIZE_MIN, BLK_SIZE_MAX, BLK_NUM_MAX, BLK_ALIGN);
*/ */
void test_mpool_alloc_merge_failed_diff_size(void) void test_mpool_alloc_merge_failed_diff_size(void)
{ {
/* The heap backend doesn't use the splitting mechanism tested
* here, and in fact is significantly more fragmentation
* resistant and succeeds at the "failed" allocation desired
* below.
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
struct k_mem_block block[BLK_NUM_MIN], block_fail; struct k_mem_block block[BLK_NUM_MIN], block_fail;
size_t block_size[] = { size_t block_size[] = {
BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN, BLK_SIZE_MIN,