kernel: Refactor k_mem_pool APIs into a base and derived level
Almost all of the k_mem_pool API is implemented in terms of three lower level primitives: K_MEM_POOL_DEFINE(), k_mem_pool_alloc() and k_mem_pool_free_id(). These are themselves implemented on top of the lower level sys_mem_pool abstraction. Make this layering explicit by splitting the low level out into its own files: mempool_sys.c/h. Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
41220d2bea
commit
e96ac9061f
6 changed files with 169 additions and 155 deletions
|
@ -4074,30 +4074,6 @@ static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
|
|||
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup mem_pool_apis Memory Pool APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/* Note on sizing: the use of a 20 bit field for block means that,
|
||||
* assuming a reasonable minimum block size of 16 bytes, we're limited
|
||||
* to 16M of memory managed by a single pool. Long term it would be
|
||||
* good to move to a variable bit size based on configuration.
|
||||
*/
|
||||
struct k_mem_block_id {
|
||||
u32_t pool : 8;
|
||||
u32_t level : 4;
|
||||
u32_t block : 20;
|
||||
};
|
||||
|
||||
struct k_mem_block {
|
||||
void *data;
|
||||
struct k_mem_block_id id;
|
||||
};
|
||||
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @defgroup mailbox_apis Mailbox APIs
|
||||
* @ingroup kernel_apis
|
||||
|
@ -4637,19 +4613,6 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
|
|||
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* @cond INTERNAL_HIDDEN
|
||||
*/
|
||||
|
||||
struct k_mem_pool {
|
||||
struct sys_mem_pool_base base;
|
||||
_wait_q_t wait_q;
|
||||
};
|
||||
|
||||
/**
|
||||
* INTERNAL_HIDDEN @endcond
|
||||
*/
|
||||
|
||||
/**
|
||||
* @addtogroup mem_pool_apis
|
||||
* @{
|
||||
|
@ -4674,21 +4637,8 @@ struct k_mem_pool {
|
|||
* @param nmax Number of maximum sized blocks in the pool.
|
||||
* @param align Alignment of the pool's buffer (power of 2).
|
||||
*/
|
||||
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \
|
||||
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
|
||||
struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
|
||||
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
|
||||
.base = { \
|
||||
.buf = _mpool_buf_##name, \
|
||||
.max_sz = WB_UP(maxsz), \
|
||||
.n_max = nmax, \
|
||||
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
|
||||
.levels = _mpool_lvls_##name, \
|
||||
.flags = SYS_MEM_POOL_KERNEL \
|
||||
} \
|
||||
}; \
|
||||
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK, "K_MEM_POOL_DEFINE: size of the largest block (parameter maxsz) is too small")
|
||||
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align)
|
||||
|
||||
/**
|
||||
* @brief Allocate memory from a memory pool.
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <sys/util.h>
|
||||
#include <sys/mempool_base.h>
|
||||
#include <kernel_structs.h>
|
||||
#include <mempool_sys.h>
|
||||
#include <kernel_version.h>
|
||||
#include <random/rand32.h>
|
||||
#include <syscall.h>
|
||||
|
|
55
include/mempool_sys.h
Normal file
55
include/mempool_sys.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_MEMPOOL_SYS_H_
|
||||
|
||||
/**
|
||||
* @defgroup mem_pool_apis Memory Pool APIs
|
||||
* @ingroup kernel_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/* Note on sizing: the use of a 20 bit field for block means that,
|
||||
* assuming a reasonable minimum block size of 16 bytes, we're limited
|
||||
* to 16M of memory managed by a single pool. Long term it would be
|
||||
* good to move to a variable bit size based on configuration.
|
||||
*/
|
||||
struct k_mem_block_id {
|
||||
u32_t pool : 8;
|
||||
u32_t level : 4;
|
||||
u32_t block : 20;
|
||||
};
|
||||
|
||||
struct k_mem_block {
|
||||
void *data;
|
||||
struct k_mem_block_id id;
|
||||
};
|
||||
|
||||
/** @} */
|
||||
|
||||
struct k_mem_pool {
|
||||
struct sys_mem_pool_base base;
|
||||
_wait_q_t wait_q;
|
||||
};
|
||||
|
||||
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
|
||||
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \
|
||||
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
|
||||
struct sys_mem_pool_lvl \
|
||||
_mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
|
||||
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
|
||||
.base = { \
|
||||
.buf = _mpool_buf_##name, \
|
||||
.max_sz = WB_UP(maxsz), \
|
||||
.n_max = nmax, \
|
||||
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
|
||||
.levels = _mpool_lvls_##name, \
|
||||
.flags = SYS_MEM_POOL_KERNEL \
|
||||
} \
|
||||
}; \
|
||||
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK)
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_MEMPOOL_SYS_H_ */
|
|
@ -11,6 +11,7 @@ add_library(kernel
|
|||
mailbox.c
|
||||
mem_slab.c
|
||||
mempool.c
|
||||
mempool_sys.c
|
||||
msg_q.c
|
||||
mutex.c
|
||||
pipes.c
|
||||
|
|
103
kernel/mempool.c
103
kernel/mempool.c
|
@ -5,111 +5,8 @@
|
|||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
#include <init.h>
|
||||
#include <string.h>
|
||||
#include <sys/__assert.h>
|
||||
#include <sys/math_extras.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
static struct k_mem_pool *get_pool(int id)
|
||||
{
|
||||
extern struct k_mem_pool _k_mem_pool_list_start[];
|
||||
return &_k_mem_pool_list_start[id];
|
||||
}
|
||||
|
||||
static int pool_id(struct k_mem_pool *pool)
|
||||
{
|
||||
extern struct k_mem_pool _k_mem_pool_list_start[];
|
||||
return pool - &_k_mem_pool_list_start[0];
|
||||
}
|
||||
|
||||
static void k_mem_pool_init(struct k_mem_pool *p)
|
||||
{
|
||||
z_waitq_init(&p->wait_q);
|
||||
z_sys_mem_pool_base_init(&p->base);
|
||||
}
|
||||
|
||||
int init_static_pools(struct device *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
|
||||
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
|
||||
k_mem_pool_init(p);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||
size_t size, k_timeout_t timeout)
|
||||
{
|
||||
int ret;
|
||||
u64_t end = 0;
|
||||
|
||||
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
|
||||
|
||||
end = z_timeout_end_calc(timeout);
|
||||
|
||||
while (true) {
|
||||
u32_t level_num, block_num;
|
||||
|
||||
ret = z_sys_mem_pool_block_alloc(&p->base, size,
|
||||
&level_num, &block_num,
|
||||
&block->data);
|
||||
block->id.pool = pool_id(p);
|
||||
block->id.level = level_num;
|
||||
block->id.block = block_num;
|
||||
|
||||
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
|
||||
ret != -ENOMEM) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
z_pend_curr_unlocked(&p->wait_q, timeout);
|
||||
|
||||
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
s64_t remaining = end - z_tick_get();
|
||||
|
||||
if (remaining <= 0) {
|
||||
break;
|
||||
}
|
||||
timeout = Z_TIMEOUT_TICKS(remaining);
|
||||
}
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
void k_mem_pool_free_id(struct k_mem_block_id *id)
|
||||
{
|
||||
int need_sched = 0;
|
||||
struct k_mem_pool *p = get_pool(id->pool);
|
||||
|
||||
z_sys_mem_pool_block_free(&p->base, id->level, id->block);
|
||||
|
||||
/* Wake up anyone blocked on this pool and let them repeat
|
||||
* their allocation attempts
|
||||
*
|
||||
* (Note that this spinlock only exists because z_unpend_all()
|
||||
* is unsynchronized. Maybe we want to put the lock into the
|
||||
* wait_q instead and make the API safe?)
|
||||
*/
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
|
||||
need_sched = z_unpend_all(&p->wait_q);
|
||||
|
||||
if (need_sched != 0) {
|
||||
z_reschedule(&lock, key);
|
||||
} else {
|
||||
k_spin_unlock(&lock, key);
|
||||
}
|
||||
}
|
||||
|
||||
void k_mem_pool_free(struct k_mem_block *block)
|
||||
{
|
||||
|
|
110
kernel/mempool_sys.c
Normal file
110
kernel/mempool_sys.c
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* Copyright (c) 2017, 2020 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <kernel.h>
|
||||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
#include <init.h>
|
||||
|
||||
static struct k_spinlock lock;
|
||||
|
||||
static struct k_mem_pool *get_pool(int id)
|
||||
{
|
||||
extern struct k_mem_pool _k_mem_pool_list_start[];
|
||||
|
||||
return &_k_mem_pool_list_start[id];
|
||||
}
|
||||
|
||||
static int pool_id(struct k_mem_pool *pool)
|
||||
{
|
||||
extern struct k_mem_pool _k_mem_pool_list_start[];
|
||||
|
||||
return pool - &_k_mem_pool_list_start[0];
|
||||
}
|
||||
|
||||
static void k_mem_pool_init(struct k_mem_pool *p)
|
||||
{
|
||||
z_waitq_init(&p->wait_q);
|
||||
z_sys_mem_pool_base_init(&p->base);
|
||||
}
|
||||
|
||||
int init_static_pools(struct device *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
|
||||
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
|
||||
k_mem_pool_init(p);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
|
||||
|
||||
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
|
||||
size_t size, k_timeout_t timeout)
|
||||
{
|
||||
int ret;
|
||||
u64_t end = 0;
|
||||
|
||||
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
|
||||
|
||||
end = z_timeout_end_calc(timeout);
|
||||
|
||||
while (true) {
|
||||
u32_t level_num, block_num;
|
||||
|
||||
ret = z_sys_mem_pool_block_alloc(&p->base, size,
|
||||
&level_num, &block_num,
|
||||
&block->data);
|
||||
block->id.pool = pool_id(p);
|
||||
block->id.level = level_num;
|
||||
block->id.block = block_num;
|
||||
|
||||
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
|
||||
ret != -ENOMEM) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
z_pend_curr_unlocked(&p->wait_q, timeout);
|
||||
|
||||
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
|
||||
s64_t remaining = end - z_tick_get();
|
||||
|
||||
if (remaining <= 0) {
|
||||
break;
|
||||
}
|
||||
timeout = Z_TIMEOUT_TICKS(remaining);
|
||||
}
|
||||
}
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
void k_mem_pool_free_id(struct k_mem_block_id *id)
|
||||
{
|
||||
int need_sched = 0;
|
||||
struct k_mem_pool *p = get_pool(id->pool);
|
||||
|
||||
z_sys_mem_pool_block_free(&p->base, id->level, id->block);
|
||||
|
||||
/* Wake up anyone blocked on this pool and let them repeat
|
||||
* their allocation attempts
|
||||
*
|
||||
* (Note that this spinlock only exists because z_unpend_all()
|
||||
* is unsynchronized. Maybe we want to put the lock into the
|
||||
* wait_q instead and make the API safe?)
|
||||
*/
|
||||
k_spinlock_key_t key = k_spin_lock(&lock);
|
||||
|
||||
need_sched = z_unpend_all(&p->wait_q);
|
||||
|
||||
if (need_sched != 0) {
|
||||
z_reschedule(&lock, key);
|
||||
} else {
|
||||
k_spin_unlock(&lock, key);
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue