tests/kernel: Fill allocation robustly for mpool heap backend

These five tests (mbox_api, mheap_api_concept, msgq_api, pipe_api and
queue) all had test cases where they needed a mem_pool allocation to
FAIL.  And they are all written to assume the behavior of the original
allocator and not the more general k_heap code, which actually
succeeds in a bunch of these cases.

* Even a very small heap saves enough metadata memory for the very
  small minimum block size, and this can be re-used as an allocation.
  So you can't assume a small heap is full.

* Calculating the number of blocks based on "num_blocks * max size /
  minimum size" and allocating them does not fill the heap, because
  the conservative metadata reservation leaves some space left over.

So these have all been modified to "fill" a heap by iteratively
allocating until failure.

Also, this fixes a benign overrun bug in mbox.  The test code would
insert a "big" message by reading past the end of the small message
buffer.  This didn't fail because it happened to be part of an array
of messages and the other ones defined contained the memory read.  But
still.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-04-07 10:22:02 -07:00 committed by Andrew Boie
commit 987658dcee
6 changed files with 50 additions and 19 deletions

View file

@ -62,6 +62,9 @@ static char data[MAX_INFO_TYPE][MAIL_LEN] = {
"specify target/source thread, using a memory block"
};
static char big_msg_data[256]
= "Large message buffer, too big for mem_pool to receive";
static void async_put_sema_give(void *p1, void *p2, void *p3)
{
k_sem_give(&sync_sema);
@ -199,8 +202,8 @@ static void tmbox_put(struct k_mbox *pmbox)
* but size is bigger than what the mem_pool can handle at
* that point of time
*/
mmsg.size = sizeof(data[1]) * 2;
mmsg.tx_data = data[1];
mmsg.size = sizeof(big_msg_data);
mmsg.tx_data = big_msg_data;
mmsg.tx_block.data = NULL;
mmsg.tx_target_thread = K_ANY;
zassert_true(k_mbox_put(pmbox, &mmsg, TIMEOUT) == 0, NULL);
@ -414,7 +417,7 @@ static void tmbox_get(struct k_mbox *pmbox)
* pool block via data_block_get
*/
mmsg.rx_source_thread = K_ANY;
mmsg.size = MAIL_LEN * 2;
mmsg.size = sizeof(big_msg_data);
zassert_true(k_mbox_get(pmbox, &mmsg, NULL, K_FOREVER) == 0,
NULL);

View file

@ -24,23 +24,25 @@
*/
void test_mheap_malloc_free(void)
{
void *block[BLK_NUM_MAX], *block_fail;
void *block[2 * BLK_NUM_MAX], *block_fail;
int nb;
for (int i = 0; i < BLK_NUM_MAX; i++) {
for (nb = 0; nb < ARRAY_SIZE(block); nb++) {
/**
* TESTPOINT: This routine provides traditional malloc()
* semantics. Memory is allocated from the heap memory pool.
*/
block[i] = k_malloc(i);
/** TESTPOINT: Address of the allocated memory if successful;*/
zassert_not_null(block[i], NULL);
block[nb] = k_malloc(BLK_SIZE_MIN);
if (block[nb] == NULL) {
break;
}
}
block_fail = k_malloc(BLK_SIZE_MIN);
/** TESTPOINT: Return NULL if fail.*/
zassert_is_null(block_fail, NULL);
for (int i = 0; i < BLK_NUM_MAX; i++) {
for (int i = 0; i < nb; i++) {
/**
* TESTPOINT: This routine provides traditional free()
* semantics. The memory being returned must have been allocated

View file

@ -63,6 +63,15 @@ void test_mheap_min_block_size(void)
{
void *block[BLK_NUM_MAX], *block_fail;
/* The k_heap backend doesn't have the splitting behavior
* expected here, this test is too specific, and a more
* general version of the same test is available in
* test_mheap_malloc_free()
*/
if (IS_ENABLED(CONFIG_MEM_POOL_HEAP_BACKEND)) {
ztest_test_skip();
}
/**
* TESTPOINT: The heap memory pool also defines a minimum block
* size of 64 bytes.

View file

@ -307,7 +307,7 @@ void test_msgq_alloc(void)
k_msgq_cleanup(&kmsgq_test_alloc);
/** Requesting buffer allocation from the test pool.*/
ret = k_msgq_alloc_init(&kmsgq_test_alloc, MSG_SIZE * 64, MSGQ_LEN);
ret = k_msgq_alloc_init(&kmsgq_test_alloc, MSG_SIZE * 128, MSGQ_LEN);
zassert_true(ret == -ENOMEM,
"resource pool is smaller then requested buffer");

View file

@ -312,8 +312,8 @@ void test_half_pipe_get_put(void)
*/
void test_half_pipe_saturating_block_put(void)
{
int r[3];
struct k_mem_block blocks[3];
int nb;
struct k_mem_block blocks[16];
/**TESTPOINT: thread-thread data passing via pipe*/
k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
@ -324,12 +324,19 @@ void test_half_pipe_saturating_block_put(void)
k_msleep(10);
/* Ensure half the mempool is still queued in the pipe */
r[0] = k_mem_pool_alloc(&mpool, &blocks[0], BYTES_TO_WRITE, K_NO_WAIT);
r[1] = k_mem_pool_alloc(&mpool, &blocks[1], BYTES_TO_WRITE, K_NO_WAIT);
r[2] = k_mem_pool_alloc(&mpool, &blocks[2], BYTES_TO_WRITE, K_NO_WAIT);
zassert_true(r[0] == 0 && r[1] == 0 && r[2] == -ENOMEM, NULL);
k_mem_pool_free(&blocks[0]);
k_mem_pool_free(&blocks[1]);
for (nb = 0; nb < ARRAY_SIZE(blocks); nb++) {
if (k_mem_pool_alloc(&mpool, &blocks[nb],
BYTES_TO_WRITE, K_NO_WAIT) != 0) {
break;
}
}
/* Must have allocated two blocks, and pool must be full */
zassert_true(nb >= 2 && nb < ARRAY_SIZE(blocks), NULL);
for (int i = 0; i < nb; i++) {
k_mem_pool_free(&blocks[i]);
}
tpipe_get(&khalfpipe, K_FOREVER);
@ -376,7 +383,7 @@ void test_pipe_alloc(void)
zassert_false(k_pipe_alloc_init(&pipe_test_alloc, 0), NULL);
k_pipe_cleanup(&pipe_test_alloc);
ret = k_pipe_alloc_init(&pipe_test_alloc, 1024);
ret = k_pipe_alloc_init(&pipe_test_alloc, 2048);
zassert_true(ret == -ENOMEM,
"resource pool max block size is not smaller then requested buffer");
}

View file

@ -273,6 +273,16 @@ static void tqueue_alloc(struct k_queue *pqueue)
*/
void test_queue_alloc(void)
{
struct k_mem_block block;
/* The mem_pool_fail pool is supposed to be too small to
* succeed any allocations, but in fact with the heap backend
* there's some base minimal memory in there that can be used.
* Make sure it's really truly full.
*/
while (k_mem_pool_alloc(&mem_pool_fail, &block, 1, K_NO_WAIT) == 0) {
}
k_queue_init(&queue);
tqueue_alloc(&queue);