kernel: Add k_mem_pool compatibility layer on top of k_heap
Add a shim layer implementing the legacy k_mem_pool APIs backed by a k_heap instead of the original implementation. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
0dd83b8c2e
commit
8f0959c7b1
6 changed files with 115 additions and 2 deletions
|
@ -4692,6 +4692,14 @@ void k_heap_free(struct k_heap *h, void *mem);
|
|||
* If the pool is to be accessed outside the module where it is defined, it
|
||||
* can be declared via
|
||||
*
|
||||
* @note When CONFIG_MEM_POOL_HEAP_BACKEND is enabled, the k_mem_pool
|
||||
* API is implemented on top of a k_heap, which is a more general
|
||||
* purpose allocator which does not make the same promises about
|
||||
* splitting or alignment detailed above. Blocks will be aligned only
|
||||
* to the 8 byte chunk stride of the underlying heap and may point
|
||||
* anywhere within the heap; they are not split into four as
|
||||
* described.
|
||||
*
|
||||
* @code extern struct k_mem_pool <name>; @endcode
|
||||
*
|
||||
* @param name Name of the memory pool.
|
||||
|
|
|
@ -27,7 +27,11 @@
|
|||
#include <sys/util.h>
|
||||
#include <sys/mempool_base.h>
|
||||
#include <kernel_structs.h>
|
||||
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
|
||||
#include <mempool_heap.h>
|
||||
#else
|
||||
#include <mempool_sys.h>
|
||||
#endif
|
||||
#include <kernel_version.h>
|
||||
#include <random/rand32.h>
|
||||
#include <syscall.h>
|
||||
|
|
55
include/mempool_heap.h
Normal file
55
include/mempool_heap.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#ifndef ZEPHYR_INCLUDE_MEMPOOL_HEAP_H_
|
||||
|
||||
/* Compatibility implementation of a k_mem_pool backend in terms of a
|
||||
* k_heap
|
||||
*/
|
||||
|
||||
/* The "ID" of a k_heap-based mempool is just the tuple of the data
|
||||
* block pointer and the heap that allocated it
|
||||
*/
|
||||
struct k_mem_block_id {
|
||||
void *data;
|
||||
struct k_heap *heap;
|
||||
};
|
||||
|
||||
/* Note the data pointer gets unioned with the same value stored in
|
||||
* the ID field to save space.
|
||||
*/
|
||||
struct k_mem_block {
|
||||
union {
|
||||
void *data;
|
||||
struct k_mem_block_id id;
|
||||
};
|
||||
};
|
||||
|
||||
struct k_mem_pool {
|
||||
struct k_heap *heap;
|
||||
};
|
||||
|
||||
/* Sizing is a heuristic, as k_mem_pool made promises about layout
|
||||
* that k_heap does not. We make space for the number of maximum
|
||||
* objects defined, and include extra so there's enough metadata space
|
||||
* available for the maximum number of minimum-sized objects to be
|
||||
* stored: 8 bytes for each desired chunk header, and a 24 word block
|
||||
* to reserve room for a "typical" set of bucket list heads (this size
|
||||
* was picked more to conform with existing test expectations than any
|
||||
* rigorous theory -- we have tests that rely on being able to
|
||||
* allocate the blocks promised and ones that make assumptions about
|
||||
* when memory will run out).
|
||||
*/
|
||||
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
K_HEAP_DEFINE(poolheap_##name, \
|
||||
((maxsz) * (nmax)) \
|
||||
+ 8 * ((maxsz) * (nmax) / (minsz)) \
|
||||
+ 24 * sizeof(void *)); \
|
||||
struct k_mem_pool name = { \
|
||||
.heap = &poolheap_##name \
|
||||
}
|
||||
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_MEMPOOL_HEAP_H_ */
|
|
@ -12,7 +12,6 @@ add_library(kernel
|
|||
mailbox.c
|
||||
mem_slab.c
|
||||
mempool.c
|
||||
mempool_sys.c
|
||||
msg_q.c
|
||||
mutex.c
|
||||
pipes.c
|
||||
|
@ -42,6 +41,11 @@ target_sources_ifdef(CONFIG_SYS_CLOCK_EXISTS kernel PRIVATE timeout.c timer
|
|||
target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)
|
||||
target_sources_if_kconfig( kernel PRIVATE poll.c)
|
||||
|
||||
if(${CONFIG_MEM_POOL_HEAP_BACKEND})
|
||||
else()
|
||||
target_sources(kernel PRIVATE mempool_sys.c)
|
||||
endif()
|
||||
|
||||
# The last 2 files inside the target_sources_ifdef should be
|
||||
# userspace_handler.c and userspace.c. If not the linker would complain.
|
||||
# This order has to be maintained. Any new file should be placed
|
||||
|
|
|
@ -465,6 +465,15 @@ config NUM_PIPE_ASYNC_MSGS
|
|||
Setting this option to 0 disables support for asynchronous
|
||||
pipe messages.
|
||||
|
||||
config MEM_POOL_HEAP_BACKEND
|
||||
bool "Use k_heap as the backend for k_mem_pool"
|
||||
help
|
||||
This selects a backend implementation for k_mem_pool based
|
||||
on the sys_heap abstraction instead of the legacy
|
||||
sys_mem_pool. This backend has significantly better
|
||||
performance and memory utilization for general purpose
|
||||
workloads.
|
||||
|
||||
config HEAP_MEM_POOL_SIZE
|
||||
int "Heap memory pool size (in bytes)"
|
||||
default 0 if !POSIX_MQUEUE
|
||||
|
|
|
@ -56,5 +56,38 @@ void k_heap_free(struct k_heap *h, void *mem)
|
|||
k_spinlock_key_t key = k_spin_lock(&h->lock);
|
||||
|
||||
sys_heap_free(&h->heap, mem);
|
||||
k_spin_unlock(&h->lock, key);
|
||||
|
||||
if (z_unpend_all(&h->wait_q) != 0) {
|
||||
z_reschedule(&h->lock, key);
|
||||
} else {
|
||||
k_spin_unlock(&h->lock, key);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEM_POOL_HEAP_BACKEND
|
||||
/* Compatibility layer for legacy k_mem_pool code on top of a k_heap
|
||||
* backend.
|
||||
*/
|
||||
|
||||
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||
size_t size, k_timeout_t timeout)
|
||||
{
|
||||
block->id.heap = p->heap;
|
||||
block->data = k_heap_alloc(p->heap, size, timeout);
|
||||
|
||||
/* The legacy API returns -EAGAIN on timeout expiration, but
|
||||
* -ENOMEM if the timeout was K_NO_WAIT. Don't ask.
|
||||
*/
|
||||
if (size != 0 && block->data == NULL) {
|
||||
return K_TIMEOUT_EQ(timeout, K_NO_WAIT) ? -ENOMEM : -EAGAIN;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
void k_mem_pool_free_id(struct k_mem_block_id *id)
|
||||
{
|
||||
k_heap_free(id->heap, id->data);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MEM_POOL_HEAP_BACKEND */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue