kernel: Remove deprecated k_mem_pool_defrag code

Remove references to k_mem_pool_defrag and any related bits associated
with mem_pool defrag that don't make sense anymore.

Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
Kumar Gala 2017-11-28 11:41:18 -06:00 committed by Anas Nashif
commit a2caf36103
8 changed files with 0 additions and 147 deletions

View file

@ -3662,17 +3662,6 @@ extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
*/
extern void k_mem_pool_free(struct k_mem_block *block);
/**
* @brief Defragment a memory pool.
*
* This is a no-op API preserved for backward compatibility only.
*
* @param pool Unused
*
* @return N/A
*/
static inline void __deprecated k_mem_pool_defrag(struct k_mem_pool *pool) {}
/**
* @} end addtogroup mem_pool_apis
*/

View file

@ -359,48 +359,6 @@ config NUM_PIPE_ASYNC_MSGS
endmenu
menu "Memory Pool Options"
choice
prompt "Memory pool block allocation policy"
default MEM_POOL_SPLIT_BEFORE_DEFRAG
help
This option specifies how a memory pool reacts if an unused memory
block of the required size is not available.
config MEM_POOL_SPLIT_BEFORE_DEFRAG
bool "Split a larger block before merging smaller blocks"
help
This option instructs a memory pool to try splitting a larger unused
block if an unused block of the required size is not available; only
if no such blocks exist will the memory pool try merging smaller unused
blocks. This policy attempts to limit the cost of performing automatic
partial defragmentation of the memory pool, at the cost of fragmenting
the memory pool's larger blocks.
config MEM_POOL_DEFRAG_BEFORE_SPLIT
bool "Merge smaller blocks before splitting a larger block"
help
This option instructs a memory pool to try merging smaller unused
blocks if an unused block of the required size is not available; only
if this does not generate a sufficiently large block will the memory
pool try splitting a larger unused block. This policy attempts to
preserve the memory pool's larger blocks, at the cost of performing
automatic partial defragmentations more frequently.
config MEM_POOL_SPLIT_ONLY
bool "Split a larger block, but never merge smaller blocks"
help
This option instructs a memory pool to try splitting a larger unused
block if an unused block of the required size is not available; if no
such blocks exist the block allocation operation fails. This policy
attempts to limit the cost of defragmenting the memory pool by avoiding
automatic partial defragmentation, at the cost of requiring the
application to explicitly request a full defragmentation of the memory
pool when an allocation fails. Depending on how a memory pool is used,
it may be more efficient for a memory pool to perform an occasional
full defragmentation than to perform frequent partial defragmentations.
endchoice
config HEAP_MEM_POOL_SIZE
int
prompt "Heap memory pool size (in bytes)"

View file

@ -107,7 +107,6 @@ static pfunc func_array[] = {
/* mem pools */
(pfunc)k_mem_pool_alloc,
(pfunc)k_mem_pool_free,
(pfunc)k_mem_pool_defrag,
(pfunc)k_malloc,
(pfunc)k_free,

View file

@ -35,7 +35,6 @@ tc_start() - Test Memory Pool and Heap APIs
Testing k_mem_pool_alloc(K_NO_WAIT) ...
Testing k_mem_pool_alloc(timeout) ...
Testing k_mem_pool_alloc(K_FOREVER) ...
Testing k_mem_pool_defragment() ...
Testing k_malloc() and k_free() ...
===================================================================
PASS - RegressionTask.

View file

@ -25,13 +25,10 @@
#define NUM_BLOCKS 64
#define DEFRAG_BLK_TEST 2222
/* size of stack area used by each thread */
#define STACKSIZE 512
K_SEM_DEFINE(ALTERNATE_SEM, 0, 1);
K_SEM_DEFINE(DEFRAG_SEM, 0, 1);
K_SEM_DEFINE(REGRESS_SEM, 0, 1);
K_SEM_DEFINE(HELPER_SEM, 0, 1);
@ -84,19 +81,6 @@ static struct TEST_CASE getwt_set[] = {
{ &block_list[4], &POOL_ID, 256, TENTH_SECOND, -EAGAIN }
};
static struct TEST_CASE defrag[] = {
{ &block_list[0], &POOL_ID, 64, 0, 0 },
{ &block_list[1], &POOL_ID, 64, 0, 0 },
{ &block_list[2], &POOL_ID, 64, 0, 0 },
{ &block_list[3], &POOL_ID, 64, 0, 0 },
{ &block_list[4], &POOL_ID, 256, 0, 0 },
{ &block_list[5], &POOL_ID, 256, 0, 0 },
{ &block_list[6], &POOL_ID, 256, 0, 0 },
{ &block_list[7], &POOL_ID, 1024, 0, 0 },
{ &block_list[8], &POOL_ID, 1024, 0, 0 },
{ &block_list[9], &POOL_ID, 1024, 0, 0 }
};
/**
*
* @brief Compare the two blocks
@ -357,72 +341,6 @@ int pool_block_get_wait_test(void)
return TC_PASS;
}
/**
*
* @brief Task responsible for defragmenting the pool POOL_ID
*
* @return N/A
*/
void defrag_task(void)
{
k_sem_take(&DEFRAG_SEM, K_FOREVER); /* Wait to be activated */
k_mem_pool_defrag(&POOL_ID);
k_sem_give(&REGRESS_SEM); /* defrag_task is finished */
}
/**
*
* pool_defrag_test -
*
* @return TC_PASS on success, TC_FAIL on failure
*/
int pool_defrag_test(void)
{
int rv;
struct k_mem_block new_block;
/* Get a bunch of blocks */
rv = pool_block_get_work("k_mem_pool_alloc", pool_block_get_func,
defrag, ARRAY_SIZE(defrag));
if (rv != TC_PASS) {
return TC_FAIL;
}
k_sem_give(&DEFRAG_SEM); /* Activate defrag_task */
/*
* Block on getting another block from the pool.
* This will allow defrag_task to execute so that we can get some
* better code coverage. 500 ms is expected to more than sufficient
* time for defrag_task to finish.
*/
rv = k_mem_pool_alloc(&POOL_ID, &new_block, DEFRAG_BLK_TEST, 500);
if (rv != -EAGAIN) {
TC_ERROR("k_mem_pool_alloc() returned %d, not %d\n", rv,
-EAGAIN);
return TC_FAIL;
}
rv = k_sem_take(&REGRESS_SEM, K_NO_WAIT);
if (rv != 0) {
TC_ERROR("defrag_task did not finish in allotted time!\n");
return TC_FAIL;
}
/* Free the allocated blocks */
free_blocks(defrag, ARRAY_SIZE(defrag));
return TC_PASS;
}
/**
*
* @brief Alternate task in the test suite
@ -557,10 +475,6 @@ void test_mem_pool(void)
tc_rc = pool_block_get_wait_test();
zassert_equal(tc_rc, TC_PASS, "pool block wait failure");
TC_PRINT("Testing k_mem_pool_defragment() ...\n");
tc_rc = pool_defrag_test();
zassert_equal(tc_rc, TC_PASS, "pool defrag failure");
tc_rc = pool_malloc_test();
zassert_equal(tc_rc, TC_PASS, "pool malloc failure");
}
@ -569,9 +483,6 @@ void test_mem_pool(void)
K_THREAD_DEFINE(t_alternate, STACKSIZE, alternate_task, NULL, NULL, NULL,
6, 0, K_NO_WAIT);
K_THREAD_DEFINE(t_defrag, STACKSIZE, defrag_task, NULL, NULL, NULL,
7, 0, K_NO_WAIT);
K_THREAD_DEFINE(t_helper, STACKSIZE, helper_task, NULL, NULL, NULL,
7, 0, K_NO_WAIT);

View file

@ -33,7 +33,6 @@ void test_mpool_alloc_merge_failed_diff_parent(void)
k_mem_pool_free(&block[i]);
}
/* 3. request a big block, expected failed to merge*/
k_mem_pool_defrag(&mpool1);
zassert_true(k_mem_pool_alloc(&mpool1, &block_fail, BLK_SIZE_MAX,
TIMEOUT) == -EAGAIN, NULL);

View file

@ -45,7 +45,6 @@ void test_mpool_alloc_merge_failed_diff_size(void)
k_mem_pool_free(&block[i]);
}
/* 3. request a big block, expected failed to merge*/
k_mem_pool_defrag(&mpool3);
zassert_true(k_mem_pool_alloc(&mpool3, &block_fail, BLK_SIZE_MAX,
TIMEOUT) == -EAGAIN, NULL);

View file

@ -71,7 +71,6 @@ static void tmpool_api(void *p1, void *p2, void *p3)
k_mem_pool_free(&block[i]);
}
}
k_mem_pool_defrag(pool);
k_sem_give(&sync_sema);
}