unified: Implement memory pools

Due to the memory pool structure only static declaration of
memory pool is possible.

Change-Id: I4797ed88fd2ac3b7812ff26e552e1745611c4575
Signed-off-by: Dmitriy Korovkin <dmitriy.korovkin@windriver.com>
This commit is contained in:
Dmitriy Korovkin 2016-09-01 18:14:17 -04:00 committed by Benjamin Walsh
commit 3c426888a1
9 changed files with 938 additions and 99 deletions

View file

@ -245,6 +245,18 @@ SECTIONS
_k_event_list_end = .;
} GROUP_LINK_IN(RAMABLE_REGION)
SECTION_PROLOGUE(_k_memory_pool, (OPTIONAL),)
{
*(._k_memory_pool.struct*)
KEEP(*(SORT_BY_NAME("._k_memory_pool.struct*")))
_k_mem_pool_start = .;
*(._k_memory_pool.*)
KEEP(*(SORT_BY_NAME("._k_memory_pool*")))
_k_mem_pool_end = .;
} GROUP_LINK_IN(RAMABLE_REGION)
__data_ram_end = .;
SECTION_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)

View file

@ -206,6 +206,17 @@ SECTIONS
_k_event_list_end = .;
} GROUP_LINK_IN(RAM)
SECTION_PROLOGUE(_k_memory_pool, (OPTIONAL),)
{
*(._k_memory_pool.struct*)
KEEP(*(SORT_BY_NAME("._k_memory_pool.struct*")))
_k_mem_pool_start = .;
*(._k_memory_pool.*)
KEEP(*(SORT_BY_NAME("._k_memory_pool*")))
_k_mem_pool_end = .;
} GROUP_LINK_IN(RAM)
__data_ram_end = .;
SECTION_PROLOGUE(_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)

View file

@ -1036,26 +1036,207 @@ static inline int k_mem_map_num_used_get(struct k_mem_map *map)
/* memory pools */
struct k_mem_pool {
_wait_q_t wait_q;
int max_block_size;
int num_max_blocks;
/*
* Memory pool requires a buffer and two arrays of structures for the
* memory block accounting:
* A set of arrays of k_mem_pool_quad_block structures where each keeps a
* status of four blocks of memory.
*/
struct k_mem_pool_quad_block {
char *mem_blocks; /* pointer to the first of four memory blocks */
uint32_t mem_status; /* four bits. If bit is set, memory block is
allocated */
};
/*
* Memory pool mechanism uses one array of k_mem_pool_quad_block for accounting
* blocks of one size. Block sizes go from maximal to minimal. Next memory
* block size is 4 times less than the previous one and thus requires 4 times
* bigger array of k_mem_pool_quad_block structures to keep track of the
* memory blocks.
*/
/*
* The array of k_mem_pool_block_set keeps the information of each array of
* k_mem_pool_quad_block structures
*/
struct k_mem_pool_block_set {
int block_size; /* memory block size */
int nr_of_entries; /* nr of quad block structures in the array */
struct k_mem_pool_quad_block *quad_block;
int count;
};
/* Memory pool descriptor */
struct k_mem_pool {
int max_block_size;
int min_block_size;
int nr_of_maxblocks;
int nr_of_block_sets;
struct k_mem_pool_block_set *block_set;
char *bufblock;
_wait_q_t wait_q;
_DEBUG_TRACING_KERNEL_OBJECTS_NEXT_PTR(k_mem_pool);
};
/* cannot initialize pools statically */
#ifdef CONFIG_ARM
#define _SECTION_TYPE_SIGN "%"
#else
#define _SECTION_TYPE_SIGN "@"
#endif
/*
* Static memory pool initialization
*/
/*
* Use .altmacro to be able to recalculate values and pass them as string
* arguments when calling assembler macros resursively
*/
__asm__(".altmacro\n\t");
/*
* Recursively calls a macro
* The followig global symbols need to be initialized:
* __memory_pool_max_block_size - maximal size of the memory block
* __memory_pool_min_block_size - minimal size of the memory block
* Notes:
* Global symbols are used due the fact that assembler macro allows only
* one argument be passed with the % conversion
* Some assemblers do not get division operation ("/"). To avoid it >> 2
* is used instead of / 4.
* n_max argument needs to go first in the invoked macro, as some
* assemblers concatenate \name and %(\n_max * 4) arguments
* if \name goes first
*/
__asm__(".macro __do_recurse macro_name, name, n_max\n\t"
".ifge __memory_pool_max_block_size >> 2 -"
" __memory_pool_min_block_size\n\t\t"
"__memory_pool_max_block_size = __memory_pool_max_block_size >> 2\n\t\t"
"\\macro_name %(\\n_max * 4) \\name\n\t"
".endif\n\t"
".endm\n");
/*
* Build quad blocks
* Macro allocates space in memory for the array of k_mem_pool_quad_block
* structures and recursively calls itself for the next array, 4 times
* larger.
* The followig global symbols need to be initialized:
* __memory_pool_max_block_size - maximal size of the memory block
* __memory_pool_min_block_size - minimal size of the memory block
* __memory_pool_quad_block_size - sizeof(struct k_mem_pool_quad_block)
*/
__asm__(".macro _build_quad_blocks n_max, name\n\t"
"_mem_pool_quad_blocks_\\name\\()_\\n_max:\n\t"
".skip __memory_pool_quad_block_size * \\n_max >> 2\n\t"
".if \\n_max % 4\n\t\t"
".skip __memory_pool_quad_block_size\n\t"
".endif\n\t"
"__do_recurse _build_quad_blocks \\name \\n_max\n\t"
".endm\n");
/*
* Build block sets and initialize them
* Macro initializes the k_mem_pool_block_set structure and
* recursively calls itself for the next one.
* The followig global symbols need to be initialized:
* __memory_pool_max_block_size - maximal size of the memory block
* __memory_pool_min_block_size - minimal size of the memory block
* __memory_pool_block_set_count, the number of the elements in the
* block set array must be set to 0. Macro calculates it's real
* value.
* Since the macro initializes pointers to an array of k_mem_pool_quad_block
* structures, _build_quad_blocks must be called prior it.
*/
__asm__(".macro _build_block_set n_max, name\n\t"
".int __memory_pool_max_block_size\n\t" /* block_size */
".if \\n_max % 4\n\t\t"
".int \\n_max >> 2 + 1\n\t" /* nr_of_entries */
".else\n\t\t"
".int \\n_max >> 2\n\t"
".endif\n\t"
".int _mem_pool_quad_blocks_\\name\\()_\\n_max\n\t" /* quad_block */
".int 0\n\t" /* count */
"__memory_pool_block_set_count = __memory_pool_block_set_count + 1\n\t"
"__do_recurse _build_block_set \\name \\n_max\n\t"
".endm\n");
/*
* Build a memory pool structure and initialize it
* Macro uses __memory_pool_block_set_count global symbol,
* block set addresses and buffer address, it may be called only after
* _build_block_set
*/
__asm__(".macro _build_mem_pool name, min_size, max_size, n_max\n\t"
".pushsection ._k_memory_pool,\"aw\","
_SECTION_TYPE_SIGN "progbits\n\t"
".globl \\name\n\t"
"\\name:\n\t"
".int \\max_size\n\t" /* max_block_size */
".int \\min_size\n\t" /* min_block_size */
".int \\n_max\n\t" /* nr_of_maxblocks */
".int __memory_pool_block_set_count\n\t" /* nr_of_block_sets */
".int _mem_pool_block_sets_\\name\n\t" /* block_set */
".int _mem_pool_buffer_\\name\n\t" /* bufblock */
".int 0\n\t" /* wait_q->head */
".int 0\n\t" /* wait_q->next */
".popsection\n\t"
".endm\n");
#define _MEMORY_POOL_QUAD_BLOCK_DEFINE(name, min_size, max_size, n_max) \
__asm__(".pushsection ._k_memory_pool.struct,\"aw\"," \
_SECTION_TYPE_SIGN "progbits\n\t"); \
__asm__("__memory_pool_min_block_size = " STRINGIFY(min_size) "\n\t"); \
__asm__("__memory_pool_max_block_size = " STRINGIFY(max_size) "\n\t"); \
__asm__("_build_quad_blocks " STRINGIFY(n_max) " " \
STRINGIFY(name) "\n\t"); \
__asm__(".popsection\n\t")
#define _MEMORY_POOL_BLOCK_SETS_DEFINE(name, min_size, max_size, n_max) \
__asm__("__memory_pool_block_set_count = 0\n\t"); \
__asm__("__memory_pool_max_block_size = " STRINGIFY(max_size) "\n\t"); \
__asm__(".pushsection ._k_memory_pool.struct,\"aw\"," \
_SECTION_TYPE_SIGN "progbits\n\t"); \
__asm__("_mem_pool_block_sets_" STRINGIFY(name) ":\n\t"); \
__asm__("_build_block_set " STRINGIFY(n_max) " " \
STRINGIFY(name) "\n\t"); \
__asm__("_mem_pool_block_set_count_" STRINGIFY(name) ":\n\t"); \
__asm__(".int __memory_pool_block_set_count\n\t"); \
__asm__(".popsection\n\t"); \
extern uint32_t _mem_pool_block_set_count_##name; \
extern struct k_mem_pool_block_set _mem_pool_block_sets_##name[]
#define _MEMORY_POOL_BUFFER_DEFINE(name, max_size, n_max) \
char __noinit _mem_pool_buffer_##name[(max_size) * (n_max)]
#define K_MEMORY_POOL_DEFINE(name, min_size, max_size, n_max) \
_MEMORY_POOL_QUAD_BLOCK_DEFINE(name, min_size, max_size, n_max); \
_MEMORY_POOL_BLOCK_SETS_DEFINE(name, min_size, max_size, n_max); \
_MEMORY_POOL_BUFFER_DEFINE(name, max_size, n_max); \
__asm__("_build_mem_pool " STRINGIFY(name) " " STRINGIFY(min_size) " " \
STRINGIFY(max_size) " " STRINGIFY(n_max) "\n\t"); \
extern struct k_mem_pool name
/*
* Dummy function that assigns the value of sizeof(struct k_mem_pool_quad_block)
* to __memory_pool_quad_block_size absolute symbol.
* This function does not get called, but compiler calculates the value and
* assigns it to the absolute symbol, that, in turn is used by assembler macros.
*/
static void __attribute__ ((used)) __k_mem_pool_quad_block_size_define(void)
{
__asm__(".globl __memory_pool_quad_block_size\n\t"
"__memory_pool_quad_block_size = %c0\n\t"
:
: "n"(sizeof(struct k_mem_pool_quad_block)));
}
/* XXX - review this computation */
#define K_MEM_POOL_SIZE(max_block_size, num_max_blocks) \
(sizeof(struct k_mem_pool) + ((max_block_size) * (num_max_blocks)))
extern void k_mem_pool_init(struct k_mem_pool *mem, int max_block_size,
int num_max_blocks);
extern int k_mem_pool_alloc(k_mem_pool_t id, struct k_mem_block *block,
extern int k_mem_pool_alloc(struct k_mem_pool *pool, struct k_mem_block *block,
int size, int32_t timeout);
extern void k_mem_pool_free(struct k_mem_block *block);
extern void k_mem_pool_defrag(k_mem_pool_t id);
extern void k_mem_pool_defrag(struct k_mem_pool *pool);
extern void *k_malloc(uint32_t size);
extern void k_free(void *p);

View file

@ -381,9 +381,8 @@ static inline int task_mem_map_alloc(kmemory_map_t map, void **mptr,
/* memory pools */
#define k_block k_mem_block
#define kmemory_pool_t k_mem_pool_t
#if 0 /* unimplemented object */
#define kmemory_pool_t struct k_mem_pool *
#define pool_struct k_mem_pool
static inline int task_mem_pool_alloc(struct k_block *blockptr,
kmemory_pool_t pool_id,
@ -398,8 +397,6 @@ static inline int task_mem_pool_alloc(struct k_block *blockptr,
#define task_malloc k_malloc
#define task_free k_free
#endif
/* message queues */
#define kfifo_t struct k_msgq *

View file

@ -287,4 +287,31 @@ config SEMAPHORE_GROUPS
both decrease the footprint as well as improve the performance of
the k_sem_give() routine.
choice
prompt "Memory pools auto-defragmentation policy"
default MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
help
Memory pool auto-defragmentation is performed if a memory
block of the requested size can not be found. Defragmentation
can be done:
Before trying to find a block in the next largest block set.
This is an attempt to preserve the memory pool's larger blocks
by fragmenting them only when necessary (i.e. at the cost of
doing more frequent auto-defragmentations).
After trying to find a block in the next largest block set.
This is an attempt to limit the cost of doing auto-defragmentations
by doing them only when necessary (i.e. at the cost of fragmenting
the memory pool's larger blocks).
config MEM_POOL_AD_NONE
bool "No auto-defragmentation"
config MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK
bool "Before trying to find a block in the next largest block set"
config MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
bool "After trying to find a block in the next largest block set"
endchoice
endmenu

View file

@ -22,6 +22,7 @@ obj-y += $(strip \
fifo.o \
stack.o \
mem_map.o \
mem_pool.o \
msg_q.o \
mailbox.o \
mem_pool.o \

View file

@ -21,35 +21,623 @@
#include <kernel.h>
#include <nano_private.h>
#include <misc/debug/object_tracing_common.h>
#include <toolchain.h>
#include <sections.h>
#include <sched.h>
#include <wait_q.h>
#include <misc/dlist.h>
#include <init.h>
void k_mem_pool_init(struct k_mem_pool *mem, int max_block_size,
int num_max_blocks)
#define _QUAD_BLOCK_AVAILABLE 0x0F
#define _QUAD_BLOCK_ALLOCATED 0x0
extern struct k_mem_pool _k_mem_pool_start[];
extern struct k_mem_pool _k_mem_pool_end[];
#if defined _HEAP_MEM_POOL
static struct k_mem_pool *heap_mem_pool = _HEAP_MEM_POOL;
#else
static struct k_mem_pool *heap_mem_pool;
#endif
static void init_one_memory_pool(struct k_mem_pool *pool);
/**
*
* @brief Initialize kernel memory pool subsystem
*
* Perform any initialization of memory pool that wasn't done at build time.
*
* @return N/A
*/
static int init_static_pools(struct device *unused)
{
ARG_UNUSED(unused);
struct k_mem_pool *pool;
/* perform initialization for each memory pool */
for (pool = _k_mem_pool_start;
pool < _k_mem_pool_end;
pool++) {
init_one_memory_pool(pool);
}
int k_mem_pool_alloc(k_mem_pool_t id, struct k_block *block, int size,
int32_t timeout)
{
return 0;
}
void k_mem_pool_free(struct k_block *block)
/**
*
* @brief Initialize the memory pool
*
* Initialize the internal memory accounting structures of the memory pool
*
* @param pool memory pool descriptor
*
* @return N/A
*/
static void init_one_memory_pool(struct k_mem_pool *pool)
{
/*
* mark block set for largest block size
* as owning all of the memory pool buffer space
*/
int remaining_blocks = pool->nr_of_maxblocks;
int j = 0;
char *memptr = pool->bufblock;
while (remaining_blocks >= 4) {
pool->block_set[0].quad_block[j].mem_blocks = memptr;
pool->block_set[0].quad_block[j].mem_status =
_QUAD_BLOCK_AVAILABLE;
j++;
remaining_blocks -= 4;
memptr +=
OCTET_TO_SIZEOFUNIT(pool->block_set[0].block_size)
* 4;
}
void k_mem_pool_defrag(k_mem_pool_t id)
{
if (remaining_blocks != 0) {
pool->block_set[0].quad_block[j].mem_blocks = memptr;
pool->block_set[0].quad_block[j].mem_status =
_QUAD_BLOCK_AVAILABLE >> (4 - remaining_blocks);
/* non-existent blocks are marked as unavailable */
}
void *k_malloc(uint32_t size)
/*
* note: all other block sets own no blocks, since their
* first quad-block has a NULL memory pointer
*/
sys_dlist_init(&pool->wait_q);
SYS_TRACING_OBJ_INIT(memory_pool, pool);
}
/**
*
* @brief Determines which block set corresponds to the specified data size
*
* Finds the block set with the smallest blocks that can hold the specified
* amount of data.
*
* @return block set index
*/
static int compute_block_set_index(struct k_mem_pool *pool, int data_size)
{
int block_size = pool->min_block_size;
int offset = pool->nr_of_block_sets - 1;
while (data_size > block_size) {
block_size *= 4;
offset--;
}
return offset;
}
/**
*
* @brief Return an allocated block to its block set
*
* @param ptr pointer to start of block
* @param pool memory pool descriptor
* @param index block set identifier
*
* @return N/A
*/
static void free_existing_block(char *ptr, struct k_mem_pool *pool, int index)
{
struct k_mem_pool_quad_block *quad_block =
pool->block_set[index].quad_block;
char *block_ptr;
int i, j;
/*
* search block set's quad-blocks until the block is located,
* then mark it as unused
*
* note: block *must* exist, so no need to do array bounds checking
*/
for (i = 0; ; i++) {
__ASSERT((i < pool->block_set[index].nr_of_entries) &&
(quad_block[i].mem_blocks != NULL),
"Attempt to free unallocated memory pool block\n");
block_ptr = quad_block[i].mem_blocks;
for (j = 0; j < 4; j++) {
if (ptr == block_ptr) {
quad_block[i].mem_status |= (1 << j);
return;
}
block_ptr += OCTET_TO_SIZEOFUNIT(
pool->block_set[index].block_size);
}
}
}
/**
*
* @brief Defragment the specified memory pool block sets
*
* Reassembles any quad-blocks that are entirely unused into larger blocks
* (to the extent permitted).
*
* @param pool memory pool descriptor
* @param start_block_set_index index of smallest block set to defragment
* @param last_block_set_index index of largest block set to defragment
*
* @return N/A
*/
static void defrag(struct k_mem_pool *pool,
int start_block_set_index, int last_block_set_index)
{
int i, j, k;
struct k_mem_pool_quad_block *quad_block;
/* process block sets from smallest to largest permitted sizes */
for (j = start_block_set_index; j > last_block_set_index; j--) {
quad_block = pool->block_set[j].quad_block;
i = 0;
do {
/* block set is done if no more quad-blocks exist */
if (quad_block[i].mem_blocks == NULL) {
break;
}
/* reassemble current quad-block, if possible */
if (quad_block[i].mem_status == _QUAD_BLOCK_AVAILABLE) {
/*
* mark the corresponding block in next larger
* block set as free
*/
free_existing_block(
quad_block[i].mem_blocks, pool, j - 1);
/*
* delete the quad-block from this block set
* by replacing it with the last quad-block
*
* (algorithm works even when the deleted
* quad-block is the last quad_block)
*/
k = i;
while ((k < pool->block_set[j].nr_of_entries) &&
(quad_block[k + 1].mem_blocks != NULL)) {
k++;
}
quad_block[i].mem_blocks =
quad_block[k].mem_blocks;
quad_block[i].mem_status =
quad_block[k].mem_status;
quad_block[k].mem_blocks = NULL;
/* loop & process replacement quad_block[i] */
} else {
i++;
}
/* block set is done if at end of quad-block array */
} while (i < pool->block_set[j].nr_of_entries);
}
}
/**
*
* @brief Allocate block from an existing block set
*
* @param block_set pointer to block set
* @param unused_block_index the index of first unused quad-block
* when allocation fails, it is the number of quad
* blocks in the block set
*
* @return pointer to allocated block, or NULL if none available
*/
static char *get_existing_block(struct k_mem_pool_block_set *block_set,
int *unused_block_index)
{
char *found = NULL;
int i = 0;
int status;
int free_bit;
do {
/* give up if no more quad-blocks exist */
if (block_set->quad_block[i].mem_blocks == NULL) {
break;
}
/* allocate a block from current quad-block, if possible */
status = block_set->quad_block[i].mem_status;
if (status != _QUAD_BLOCK_ALLOCATED) {
/* identify first free block */
free_bit = find_lsb_set(status) - 1;
/* compute address of free block */
found = block_set->quad_block[i].mem_blocks +
(OCTET_TO_SIZEOFUNIT(free_bit *
block_set->block_size));
/* mark block as unavailable (using XOR to invert) */
block_set->quad_block[i].mem_status ^=
1 << free_bit;
#ifdef CONFIG_OBJECT_MONITOR
block_set->count++;
#endif
break;
}
/* move on to next quad-block; give up if at end of array */
} while (++i < block_set->nr_of_entries);
*unused_block_index = i;
return found;
}
/**
*
* @brief Allocate a block, recursively fragmenting larger blocks if necessary
*
* @param pool memory pool descriptor
* @param index index of block set currently being examined
* @param start_index index of block set for which allocation is being done
*
* @return pointer to allocated block, or NULL if none available
*/
static char *get_block_recursive(struct k_mem_pool *pool,
int index, int start_index)
{
int i;
char *found, *larger_block;
struct k_mem_pool_block_set *fr_table;
/* give up if we've exhausted the set of maximum size blocks */
if (index < 0) {
return NULL;
}
void k_free(void *p)
{
/* try allocating a block from the current block set */
fr_table = pool->block_set;
i = 0;
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
#ifdef CONFIG_MEM_POOL_AD_BEFORE_SEARCH_FOR_BIGGERBLOCK
/*
* do a partial defragmentation of memory pool & try allocating again
* - do this on initial invocation only, not recursive ones
* (since there is no benefit in repeating the defrag)
* - defrag only the blocks smaller than the desired size,
* and only until the size needed is reached
*
* note: defragging at this time tries to preserve the memory pool's
* larger blocks by fragmenting them only when necessary
* (i.e. at the cost of doing more frequent auto-defragmentations)
*/
if (index == start_index) {
defrag(pool, pool->nr_of_block_sets - 1, start_index);
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
}
#endif
/* try allocating a block from the next largest block set */
larger_block = get_block_recursive(pool, index - 1, start_index);
if (larger_block != NULL) {
/*
* add a new quad-block to the current block set,
* then mark one of its 4 blocks as used and return it
*
* note: "i" was earlier set to indicate the first unused
* quad-block entry in the current block set
*/
fr_table[index].quad_block[i].mem_blocks = larger_block;
fr_table[index].quad_block[i].mem_status =
_QUAD_BLOCK_AVAILABLE & (~0x1);
#ifdef CONFIG_OBJECT_MONITOR
fr_table[index].count++;
#endif
return larger_block;
}
#ifdef CONFIG_MEM_POOL_AD_AFTER_SEARCH_FOR_BIGGERBLOCK
/*
* do a partial defragmentation of memory pool & try allocating again
* - do this on initial invocation only, not recursive ones
* (since there is no benefit in repeating the defrag)
* - defrag only the blocks smaller than the desired size,
* and only until the size needed is reached
*
* note: defragging at this time tries to limit the cost of doing
* auto-defragmentations by doing them only when necessary
* (i.e. at the cost of fragmenting the memory pool's larger blocks)
*/
if (index == start_index) {
defrag(pool, pool->nr_of_block_sets - 1, start_index);
found = get_existing_block(&(fr_table[index]), &i);
if (found != NULL) {
return found;
}
}
#endif
return NULL; /* can't find (or create) desired block */
}
/**
*
* @brief Examine threads that are waiting for memory pool blocks.
*
* This routine attempts to satisfy any incomplete block allocation requests for
* the specified memory pool. It can be invoked either by the explicit freeing
* of a used block or as a result of defragmenting the pool (which may create
* one or more new, larger blocks).
*
* @return N/A
*/
static void block_waiters_check(struct k_mem_pool *pool)
{
char *found_block;
struct k_thread *waiter;
struct k_thread *next_waiter;
int offset;
unsigned int key = irq_lock();
waiter = (struct k_thread *)sys_dlist_peek_head(&pool->wait_q);
/* loop all waiters */
while (waiter != NULL) {
uint32_t req_size = (uint32_t)(waiter->swap_data);
/* locate block set to try allocating from */
offset = compute_block_set_index(pool, req_size);
/* allocate block (fragmenting a larger block, if needed) */
found_block = get_block_recursive(pool, offset, offset);
next_waiter = (struct k_thread *)sys_dlist_peek_next(
&pool->wait_q, &waiter->k_q_node);
/* if success : remove task from list and reschedule */
if (found_block != NULL) {
/* return found block */
_set_thread_return_value_with_data(waiter, 0,
found_block);
/*
* Schedule the thread. Threads will be rescheduled
* outside the function by k_sched_unlock()
*/
_unpend_thread(waiter);
_timeout_abort(waiter);
_ready_thread(waiter);
}
waiter = next_waiter;
}
irq_unlock(key);
}
/**
*
* @brief Perform defragment memory pool request
*
* @return N/A
*/
void k_mem_pool_defrag(k_mem_pool_t pool)
{
k_sched_lock();
/* do complete defragmentation of memory pool (i.e. all block sets) */
defrag(pool, pool->nr_of_block_sets - 1, 0);
/* reschedule anybody waiting for a block */
block_waiters_check(pool);
k_sched_unlock();
}
/**
*
* @brief Perform allocate memory pool block request
*
* @return N/A
*/
int k_mem_pool_alloc(k_mem_pool_t pool, struct k_mem_block *block,
int size, int32_t timeout)
{
char *found_block;
int offset;
k_sched_lock();
/* locate block set to try allocating from */
offset = compute_block_set_index(pool, size);
/* allocate block (fragmenting a larger block, if needed) */
found_block = get_block_recursive(pool, offset, offset);
if (found_block != NULL) {
k_sched_unlock();
block->pool_id = pool;
block->addr_in_pool = found_block;
block->data = found_block;
block->req_size = size;
return 0;
}
/*
* no suitable block is currently available,
* so either wait for one to appear or indicate failure
*/
if (likely(timeout != TICKS_NONE)) {
int result;
unsigned int key = irq_lock();
_sched_unlock_no_reschedule();
_current->swap_data = (void *)size;
_pend_current_thread(&pool->wait_q, timeout);
result = _Swap(key);
if (result == 0) {
block->pool_id = pool;
block->addr_in_pool = _current->swap_data;
block->data = _current->swap_data;
block->req_size = size;
}
return result;
}
k_sched_unlock();
return -ENOMEM;
}
#define MALLOC_ALIGN (sizeof(uint32_t))
/**
* @brief Allocate memory from heap pool
*
* This routine provides traditional malloc semantics; internally it uses
* the microkernel pool APIs on a dedicated HEAP pool
*
* @param size Size of memory requested by the caller.
*
* @retval address of the block if successful otherwise returns NULL
*/
void *k_malloc(uint32_t size)
{
uint32_t new_size;
uint32_t *aligned_addr;
struct k_mem_block mem_block;
__ASSERT(heap_mem_pool != NULL,
"Try to allocate a block in undefined heap\n");
/* The address pool returns, may not be aligned. Also
* pool_free requires both start address and size. So
* we end up needing 2 slots to save the size and
* start address in addition to padding space
*/
new_size = size + (sizeof(uint32_t) << 1) + MALLOC_ALIGN - 1;
if (k_mem_pool_alloc(heap_mem_pool, &mem_block, new_size,
TICKS_NONE) != 0) {
return NULL;
}
/* Get the next aligned address following the address returned by pool*/
aligned_addr = (uint32_t *) ROUND_UP(mem_block.addr_in_pool,
MALLOC_ALIGN);
/* Save the size requested to the pool API, to be used while freeing */
*aligned_addr = new_size;
/* Save the original unaligned_addr pointer too */
aligned_addr++;
*((void **) aligned_addr) = mem_block.addr_in_pool;
/* return the subsequent address */
return ++aligned_addr;
}
/**
*
* @brief Perform return memory pool block request
*
* @param blockptr address of the memory block to be freed
*
* Marks a block belonging to a pool as free; if there are waiters that can use
* the the block it is passed to a waiting task.
*
* @return N/A
*/
void k_mem_pool_free(struct k_mem_block *blockptr)
{
int offset;
struct k_mem_pool *pool = blockptr->pool_id;
k_sched_lock();
/* determine block set that block belongs to */
offset = compute_block_set_index(pool, blockptr->req_size);
/* mark the block as unused */
free_existing_block(blockptr->addr_in_pool, pool, offset);
/* reschedule anybody waiting for a block */
block_waiters_check(pool);
k_sched_unlock();
}
/**
* @brief Free memory allocated through task_malloc
*
* @param ptr pointer to be freed
*
* @return NA
*/
void k_free(void *ptr)
{
struct k_mem_block mem_block;
__ASSERT(heap_mem_pool != NULL,
"Try to free a block in undefined heap\n");
mem_block.pool_id = heap_mem_pool;
/* Fetch the pointer returned by the pool API */
mem_block.addr_in_pool = *((void **) ((uint32_t *)ptr - 1));
mem_block.data = *((void **) ((uint32_t *)ptr - 1));
/* Further fetch the size asked from pool */
mem_block.req_size = *((uint32_t *)ptr - 2);
k_mem_pool_free(&mem_block);
}
SYS_INIT(init_static_pools, PRIMARY, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View file

@ -393,7 +393,7 @@ class SizeCalculator:
alloc_sections = ["bss", "noinit"]
rw_sections = ["datas", "initlevel", "_k_mem_map_ptr", "_k_pipe_ptr",
"_k_task_ptr", "_k_task_list", "_k_event_list",
"exceptions"]
"_k_memory_pool", "exceptions"]
# These get copied into RAM only on non-XIP
ro_sections = ["text", "ctors", "init_array", "reset",
"rodata", "devconfig"]

View file

@ -802,6 +802,7 @@ def kernel_main_c_pools():
kernel_main_c_out("\nint _k_mem_pool_count = %d;\n" % (total_pools))
if kernel_type == 'micro':
if (total_pools == 0):
kernel_main_c_out("\nstruct pool_struct * _k_mem_pool_list = NULL;\n")
return
@ -887,6 +888,16 @@ def kernel_main_c_pools():
# generate memory pool descriptor info
pool_descriptors += "};\n"
elif kernel_type == 'unified':
pool_descriptors = ""
for pool in pool_list:
kernel_main_c_out("\n")
min_block_size = pool[1]
max_block_size = pool[2]
num_maximal_blocks = pool[3]
pool_descriptors += "K_MEMORY_POOL_DEFINE(_k_mem_pool_obj_%s, %d, %d, %d);\n" % \
(pool[0], min_block_size, max_block_size,
num_maximal_blocks)
kernel_main_c_out(pool_descriptors)
@ -967,11 +978,11 @@ def kernel_main_c_generate():
kernel_main_c_mailboxes()
kernel_main_c_tasks()
kernel_main_c_pipes()
kernel_main_c_pools()
if kernel_type == 'micro':
kernel_main_c_kargs()
kernel_main_c_timers()
kernel_main_c_pools()
kernel_main_c_node_init()
kernel_main_c_priorities()
@ -1117,6 +1128,7 @@ def generate_sysgen_h_obj_ids():
mbox_struct = 'k_mbox'
mbox_type = 'struct k_mbox *'
event_type = 'struct k_event *'
mem_pool_type = 'struct k_mem_pool'
# add missing object types
# mutex object ids
@ -1208,12 +1220,22 @@ def generate_sysgen_h_obj_ids():
sysgen_h_data += \
"#define %s (&_k_event_obj_%s)\n\n" % (name, name)
# all other object ids
# memory pool object ids
if kernel_type == 'micro':
obj_types = [
[pool_list, 0],
]
sysgen_h_data += generate_obj_id_lines(obj_types)
elif (kernel_type == 'unified'):
for mem_pool in pool_list:
name = mem_pool[0];
sysgen_h_data += \
"extern %s _k_mem_pool_obj_%s;\n" % (mem_pool_type, name)
sysgen_h_data += \
"#define %s ((%s *)&_k_mem_pool_obj_%s)\n" % (name, mem_pool_type, name)
# all other object ids
sysgen_h_footer_include_guard_str = \