kernel: Refactor k_mem_pool APIs into a base and derived level

Almost all of the k_mem_pool API is implemented in terms of three
lower level primitives: K_MEM_POOL_DEFINE(), k_mem_pool_alloc() and
k_mem_pool_free_id().  These are themselves implemented on top of the
lower level sys_mem_pool abstraction.

Make this layering explicit by splitting the low level out into its
own files: mempool_sys.c/h.

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2020-03-31 09:23:11 -07:00 committed by Andrew Boie
commit e96ac9061f
6 changed files with 169 additions and 155 deletions

View file

@ -4074,30 +4074,6 @@ static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq)
/** @} */ /** @} */
/**
* @defgroup mem_pool_apis Memory Pool APIs
* @ingroup kernel_apis
* @{
*/
/* Note on sizing: the use of a 20 bit field for block means that,
* assuming a reasonable minimum block size of 16 bytes, we're limited
* to 16M of memory managed by a single pool. Long term it would be
* good to move to a variable bit size based on configuration.
*/
struct k_mem_block_id {
u32_t pool : 8;
u32_t level : 4;
u32_t block : 20;
};
struct k_mem_block {
void *data;
struct k_mem_block_id id;
};
/** @} */
/** /**
* @defgroup mailbox_apis Mailbox APIs * @defgroup mailbox_apis Mailbox APIs
* @ingroup kernel_apis * @ingroup kernel_apis
@ -4637,19 +4613,6 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab)
/** @} */ /** @} */
/**
* @cond INTERNAL_HIDDEN
*/
struct k_mem_pool {
struct sys_mem_pool_base base;
_wait_q_t wait_q;
};
/**
* INTERNAL_HIDDEN @endcond
*/
/** /**
* @addtogroup mem_pool_apis * @addtogroup mem_pool_apis
* @{ * @{
@ -4674,21 +4637,8 @@ struct k_mem_pool {
* @param nmax Number of maximum sized blocks in the pool. * @param nmax Number of maximum sized blocks in the pool.
* @param align Alignment of the pool's buffer (power of 2). * @param align Alignment of the pool's buffer (power of 2).
*/ */
#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \ #define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \ Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align)
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
.base = { \
.buf = _mpool_buf_##name, \
.max_sz = WB_UP(maxsz), \
.n_max = nmax, \
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
.levels = _mpool_lvls_##name, \
.flags = SYS_MEM_POOL_KERNEL \
} \
}; \
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK, "K_MEM_POOL_DEFINE: size of the largest block (parameter maxsz) is too small")
/** /**
* @brief Allocate memory from a memory pool. * @brief Allocate memory from a memory pool.

View file

@ -27,6 +27,7 @@
#include <sys/util.h> #include <sys/util.h>
#include <sys/mempool_base.h> #include <sys/mempool_base.h>
#include <kernel_structs.h> #include <kernel_structs.h>
#include <mempool_sys.h>
#include <kernel_version.h> #include <kernel_version.h>
#include <random/rand32.h> #include <random/rand32.h>
#include <syscall.h> #include <syscall.h>

55
include/mempool_sys.h Normal file
View file

@ -0,0 +1,55 @@
/*
* Copyright (c) 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_MEMPOOL_SYS_H_
/**
* @defgroup mem_pool_apis Memory Pool APIs
* @ingroup kernel_apis
* @{
*/
/* Note on sizing: the use of a 20 bit field for block means that,
* assuming a reasonable minimum block size of 16 bytes, we're limited
* to 16M of memory managed by a single pool. Long term it would be
* good to move to a variable bit size based on configuration.
*/
struct k_mem_block_id {
u32_t pool : 8;
u32_t level : 4;
u32_t block : 20;
};
struct k_mem_block {
void *data;
struct k_mem_block_id id;
};
/** @} */
struct k_mem_pool {
struct sys_mem_pool_base base;
_wait_q_t wait_q;
};
#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \
char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \
+ _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \
struct sys_mem_pool_lvl \
_mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \
Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \
.base = { \
.buf = _mpool_buf_##name, \
.max_sz = WB_UP(maxsz), \
.n_max = nmax, \
.n_levels = Z_MPOOL_LVLS(maxsz, minsz), \
.levels = _mpool_lvls_##name, \
.flags = SYS_MEM_POOL_KERNEL \
} \
}; \
BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK)
#endif /* ZEPHYR_INCLUDE_MEMPOOL_SYS_H_ */

View file

@ -11,6 +11,7 @@ add_library(kernel
mailbox.c mailbox.c
mem_slab.c mem_slab.c
mempool.c mempool.c
mempool_sys.c
msg_q.c msg_q.c
mutex.c mutex.c
pipes.c pipes.c

View file

@ -5,111 +5,8 @@
*/ */
#include <kernel.h> #include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <init.h>
#include <string.h> #include <string.h>
#include <sys/__assert.h>
#include <sys/math_extras.h> #include <sys/math_extras.h>
#include <stdbool.h>
static struct k_spinlock lock;
static struct k_mem_pool *get_pool(int id)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return &_k_mem_pool_list_start[id];
}
static int pool_id(struct k_mem_pool *pool)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return pool - &_k_mem_pool_list_start[0];
}
static void k_mem_pool_init(struct k_mem_pool *p)
{
z_waitq_init(&p->wait_q);
z_sys_mem_pool_base_init(&p->base);
}
int init_static_pools(struct device *unused)
{
ARG_UNUSED(unused);
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
k_mem_pool_init(p);
}
return 0;
}
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, k_timeout_t timeout)
{
int ret;
u64_t end = 0;
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
end = z_timeout_end_calc(timeout);
while (true) {
u32_t level_num, block_num;
ret = z_sys_mem_pool_block_alloc(&p->base, size,
&level_num, &block_num,
&block->data);
block->id.pool = pool_id(p);
block->id.level = level_num;
block->id.block = block_num;
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
ret != -ENOMEM) {
return ret;
}
z_pend_curr_unlocked(&p->wait_q, timeout);
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
s64_t remaining = end - z_tick_get();
if (remaining <= 0) {
break;
}
timeout = Z_TIMEOUT_TICKS(remaining);
}
}
return -EAGAIN;
}
void k_mem_pool_free_id(struct k_mem_block_id *id)
{
int need_sched = 0;
struct k_mem_pool *p = get_pool(id->pool);
z_sys_mem_pool_block_free(&p->base, id->level, id->block);
/* Wake up anyone blocked on this pool and let them repeat
* their allocation attempts
*
* (Note that this spinlock only exists because z_unpend_all()
* is unsynchronized. Maybe we want to put the lock into the
* wait_q instead and make the API safe?)
*/
k_spinlock_key_t key = k_spin_lock(&lock);
need_sched = z_unpend_all(&p->wait_q);
if (need_sched != 0) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
}
void k_mem_pool_free(struct k_mem_block *block) void k_mem_pool_free(struct k_mem_block *block)
{ {

110
kernel/mempool_sys.c Normal file
View file

@ -0,0 +1,110 @@
/*
* Copyright (c) 2017, 2020 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <ksched.h>
#include <wait_q.h>
#include <init.h>
static struct k_spinlock lock;
static struct k_mem_pool *get_pool(int id)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return &_k_mem_pool_list_start[id];
}
static int pool_id(struct k_mem_pool *pool)
{
extern struct k_mem_pool _k_mem_pool_list_start[];
return pool - &_k_mem_pool_list_start[0];
}
static void k_mem_pool_init(struct k_mem_pool *p)
{
z_waitq_init(&p->wait_q);
z_sys_mem_pool_base_init(&p->base);
}
int init_static_pools(struct device *unused)
{
ARG_UNUSED(unused);
Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) {
k_mem_pool_init(p);
}
return 0;
}
SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
size_t size, k_timeout_t timeout)
{
int ret;
u64_t end = 0;
__ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), "");
end = z_timeout_end_calc(timeout);
while (true) {
u32_t level_num, block_num;
ret = z_sys_mem_pool_block_alloc(&p->base, size,
&level_num, &block_num,
&block->data);
block->id.pool = pool_id(p);
block->id.level = level_num;
block->id.block = block_num;
if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) ||
ret != -ENOMEM) {
return ret;
}
z_pend_curr_unlocked(&p->wait_q, timeout);
if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
s64_t remaining = end - z_tick_get();
if (remaining <= 0) {
break;
}
timeout = Z_TIMEOUT_TICKS(remaining);
}
}
return -EAGAIN;
}
void k_mem_pool_free_id(struct k_mem_block_id *id)
{
int need_sched = 0;
struct k_mem_pool *p = get_pool(id->pool);
z_sys_mem_pool_block_free(&p->base, id->level, id->block);
/* Wake up anyone blocked on this pool and let them repeat
* their allocation attempts
*
* (Note that this spinlock only exists because z_unpend_all()
* is unsynchronized. Maybe we want to put the lock into the
* wait_q instead and make the API safe?)
*/
k_spinlock_key_t key = k_spin_lock(&lock);
need_sched = z_unpend_all(&p->wait_q);
if (need_sched != 0) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
}