diff --git a/include/kernel.h b/include/kernel.h index 88775f44125..1abc88a953d 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -4074,30 +4074,6 @@ static inline u32_t z_impl_k_msgq_num_used_get(struct k_msgq *msgq) /** @} */ -/** - * @defgroup mem_pool_apis Memory Pool APIs - * @ingroup kernel_apis - * @{ - */ - -/* Note on sizing: the use of a 20 bit field for block means that, - * assuming a reasonable minimum block size of 16 bytes, we're limited - * to 16M of memory managed by a single pool. Long term it would be - * good to move to a variable bit size based on configuration. - */ -struct k_mem_block_id { - u32_t pool : 8; - u32_t level : 4; - u32_t block : 20; -}; - -struct k_mem_block { - void *data; - struct k_mem_block_id id; -}; - -/** @} */ - /** * @defgroup mailbox_apis Mailbox APIs * @ingroup kernel_apis @@ -4637,19 +4613,6 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab) /** @} */ -/** - * @cond INTERNAL_HIDDEN - */ - -struct k_mem_pool { - struct sys_mem_pool_base base; - _wait_q_t wait_q; -}; - -/** - * INTERNAL_HIDDEN @endcond - */ - /** * @addtogroup mem_pool_apis * @{ @@ -4674,21 +4637,8 @@ struct k_mem_pool { * @param nmax Number of maximum sized blocks in the pool. * @param align Alignment of the pool's buffer (power of 2). */ -#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \ - char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \ - + _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \ - struct sys_mem_pool_lvl _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \ - Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \ - .base = { \ - .buf = _mpool_buf_##name, \ - .max_sz = WB_UP(maxsz), \ - .n_max = nmax, \ - .n_levels = Z_MPOOL_LVLS(maxsz, minsz), \ - .levels = _mpool_lvls_##name, \ - .flags = SYS_MEM_POOL_KERNEL \ - } \ - }; \ - BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK, "K_MEM_POOL_DEFINE: size of the largest block (parameter maxsz) is too small") +#define K_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \ + Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) /** * @brief Allocate memory from a memory pool. diff --git a/include/kernel_includes.h b/include/kernel_includes.h index 03e39b5791a..a3096dba0a0 100644 --- a/include/kernel_includes.h +++ b/include/kernel_includes.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/include/mempool_sys.h b/include/mempool_sys.h new file mode 100644 index 00000000000..7c158130af7 --- /dev/null +++ b/include/mempool_sys.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_MEMPOOL_SYS_H_ + +/** + * @defgroup mem_pool_apis Memory Pool APIs + * @ingroup kernel_apis + * @{ + */ + +/* Note on sizing: the use of a 20 bit field for block means that, + * assuming a reasonable minimum block size of 16 bytes, we're limited + * to 16M of memory managed by a single pool. Long term it would be + * good to move to a variable bit size based on configuration. + */ +struct k_mem_block_id { + u32_t pool : 8; + u32_t level : 4; + u32_t block : 20; +}; + +struct k_mem_block { + void *data; + struct k_mem_block_id id; +}; + +/** @} */ + +struct k_mem_pool { + struct sys_mem_pool_base base; + _wait_q_t wait_q; +}; + +#define Z_MEM_POOL_DEFINE(name, minsz, maxsz, nmax, align) \ + char __aligned(WB_UP(align)) _mpool_buf_##name[WB_UP(maxsz) * nmax \ + + _MPOOL_BITS_SIZE(maxsz, minsz, nmax)]; \ + struct sys_mem_pool_lvl \ + _mpool_lvls_##name[Z_MPOOL_LVLS(maxsz, minsz)]; \ + Z_STRUCT_SECTION_ITERABLE(k_mem_pool, name) = { \ + .base = { \ + .buf = _mpool_buf_##name, \ + .max_sz = WB_UP(maxsz), \ + .n_max = nmax, \ + .n_levels = Z_MPOOL_LVLS(maxsz, minsz), \ + .levels = _mpool_lvls_##name, \ + .flags = SYS_MEM_POOL_KERNEL \ + } \ + }; \ + BUILD_ASSERT(WB_UP(maxsz) >= _MPOOL_MINBLK) + +#endif /* ZEPHYR_INCLUDE_MEMPOOL_SYS_H_ */ diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 55e20289abf..0d4995b8ebf 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -11,6 +11,7 @@ add_library(kernel mailbox.c mem_slab.c mempool.c + mempool_sys.c msg_q.c mutex.c pipes.c diff --git a/kernel/mempool.c b/kernel/mempool.c index 49df124506e..304b39f5683 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -5,111 +5,8 @@ */ #include -#include -#include -#include #include -#include #include -#include - -static struct k_spinlock lock; - -static struct k_mem_pool *get_pool(int id) -{ - extern struct k_mem_pool _k_mem_pool_list_start[]; - return &_k_mem_pool_list_start[id]; -} - -static int pool_id(struct k_mem_pool *pool) -{ - extern struct k_mem_pool _k_mem_pool_list_start[]; - return pool - &_k_mem_pool_list_start[0]; -} - -static void k_mem_pool_init(struct k_mem_pool *p) -{ - z_waitq_init(&p->wait_q); - z_sys_mem_pool_base_init(&p->base); -} - -int init_static_pools(struct device *unused) -{ - ARG_UNUSED(unused); - - Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) { - k_mem_pool_init(p); - } - - return 0; -} - -SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); - -int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, - size_t size, k_timeout_t timeout) -{ - int ret; - u64_t end = 0; - - __ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); - - end = z_timeout_end_calc(timeout); - - while (true) { - u32_t level_num, block_num; - - ret = z_sys_mem_pool_block_alloc(&p->base, size, - &level_num, &block_num, - &block->data); - block->id.pool = pool_id(p); - block->id.level = level_num; - block->id.block = block_num; - - if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) || - ret != -ENOMEM) { - return ret; - } - - z_pend_curr_unlocked(&p->wait_q, timeout); - - if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { - s64_t remaining = end - z_tick_get(); - - if (remaining <= 0) { - break; - } - timeout = Z_TIMEOUT_TICKS(remaining); - } - } - - return -EAGAIN; -} - -void k_mem_pool_free_id(struct k_mem_block_id *id) -{ - int need_sched = 0; - struct k_mem_pool *p = get_pool(id->pool); - - z_sys_mem_pool_block_free(&p->base, id->level, id->block); - - /* Wake up anyone blocked on this pool and let them repeat - * their allocation attempts - * - * (Note that this spinlock only exists because z_unpend_all() - * is unsynchronized. Maybe we want to put the lock into the - * wait_q instead and make the API safe?) - */ - k_spinlock_key_t key = k_spin_lock(&lock); - - need_sched = z_unpend_all(&p->wait_q); - - if (need_sched != 0) { - z_reschedule(&lock, key); - } else { - k_spin_unlock(&lock, key); - } -} void k_mem_pool_free(struct k_mem_block *block) { diff --git a/kernel/mempool_sys.c b/kernel/mempool_sys.c new file mode 100644 index 00000000000..da78107ece5 --- /dev/null +++ b/kernel/mempool_sys.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2017, 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +static struct k_spinlock lock; + +static struct k_mem_pool *get_pool(int id) +{ + extern struct k_mem_pool _k_mem_pool_list_start[]; + + return &_k_mem_pool_list_start[id]; +} + +static int pool_id(struct k_mem_pool *pool) +{ + extern struct k_mem_pool _k_mem_pool_list_start[]; + + return pool - &_k_mem_pool_list_start[0]; +} + +static void k_mem_pool_init(struct k_mem_pool *p) +{ + z_waitq_init(&p->wait_q); + z_sys_mem_pool_base_init(&p->base); +} + +int init_static_pools(struct device *unused) +{ + ARG_UNUSED(unused); + + Z_STRUCT_SECTION_FOREACH(k_mem_pool, p) { + k_mem_pool_init(p); + } + + return 0; +} + +SYS_INIT(init_static_pools, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); + +int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, + size_t size, k_timeout_t timeout) +{ + int ret; + u64_t end = 0; + + __ASSERT(!(arch_is_in_isr() && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)), ""); + + end = z_timeout_end_calc(timeout); + + while (true) { + u32_t level_num, block_num; + + ret = z_sys_mem_pool_block_alloc(&p->base, size, + &level_num, &block_num, + &block->data); + block->id.pool = pool_id(p); + block->id.level = level_num; + block->id.block = block_num; + + if (ret == 0 || K_TIMEOUT_EQ(timeout, K_NO_WAIT) || + ret != -ENOMEM) { + return ret; + } + + z_pend_curr_unlocked(&p->wait_q, timeout); + + if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) { + s64_t remaining = end - z_tick_get(); + + if (remaining <= 0) { + break; + } + timeout = Z_TIMEOUT_TICKS(remaining); + } + } + + return -EAGAIN; +} + +void k_mem_pool_free_id(struct k_mem_block_id *id) +{ + int need_sched = 0; + struct k_mem_pool *p = get_pool(id->pool); + + z_sys_mem_pool_block_free(&p->base, id->level, id->block); + + /* Wake up anyone blocked on this pool and let them repeat + * their allocation attempts + * + * (Note that this spinlock only exists because z_unpend_all() + * is unsynchronized. Maybe we want to put the lock into the + * wait_q instead and make the API safe?) + */ + k_spinlock_key_t key = k_spin_lock(&lock); + + need_sched = z_unpend_all(&p->wait_q); + + if (need_sched != 0) { + z_reschedule(&lock, key); + } else { + k_spin_unlock(&lock, key); + } +}