From 0dd83b8c2e6e3a1f7cd79887b20127322980f79e Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Fri, 3 Apr 2020 10:01:03 -0700 Subject: [PATCH] kernel: Add k_heap synchronized memory allocator This adds a k_heap data structure, a synchronized wrapper around a sys_heap memory allocator. As of this patch, it is an alternative implementation to k_mem_pool() with somewhat better efficiency and performance and more conventional (and convenient) behavior. Note that commit involves some header motion to break dependencies. The declaration for struct k_spinlock moves to kernel_structs.h, and a bunch of includes were trimmed. Signed-off-by: Andy Ross --- include/kernel.h | 63 ++++++++++++++++++++++++++++++++++++ include/kernel_structs.h | 43 ++++++++++++++++++++++++ include/linker/common-ram.ld | 7 ++++ include/spinlock.h | 32 +----------------- include/sys/sys_heap.h | 4 ++- kernel/CMakeLists.txt | 1 + kernel/atomic_c.c | 3 +- kernel/kheap.c | 60 ++++++++++++++++++++++++++++++++++ 8 files changed, 180 insertions(+), 33 deletions(-) create mode 100644 kernel/kheap.c diff --git a/include/kernel.h b/include/kernel.h index 1abc88a953d..33349173cfb 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -4618,6 +4618,69 @@ static inline u32_t k_mem_slab_num_free_get(struct k_mem_slab *slab) * @{ */ +/** + * @brief Initialize a k_heap + * + * This constructs a synchronized k_heap object over a memory region + * specified by the user. Note that while any alignment and size can + * be passed as valid parameters, internal alignment restrictions + * inside the inner sys_heap mean that not all bytes may be usable as + * allocated memory. + * + * @param h Heap struct to initialize + * @param mem Pointer to memory. + * @param bytes Size of memory region, in bytes + */ +void k_heap_init(struct k_heap *h, void *mem, size_t bytes); + +/** + * @brief Allocate memory from a k_heap + * + * Allocates and returns a memory buffer from the memory region owned + * by the heap. If no memory is available immediately, the call will + * block for the specified timeout (constructed via the standard + * timeout API, or K_NO_WAIT or K_FOREVER) waiting for memory to be + * freed. If the allocation cannot be performed by the expiration of + * the timeout, NULL will be returned. + * + * @param h Heap from which to allocate + * @param bytes Desired size of block to allocate + * @param timeout How long to wait, or K_NO_WAIT + * @return A pointer to valid heap memory, or NULL + */ +void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout); + +/** + * @brief Free memory allocated by k_heap_alloc() + * + * Returns the specified memory block, which must have been returned + * from k_heap_alloc(), to the heap for use by other callers. Passing + * a NULL block is legal, and has no effect. + * + * @param h Heap to which to return the memory + * @param mem A valid memory block, or NULL + */ +void k_heap_free(struct k_heap *h, void *mem); + +/** + * @brief Define a static k_heap + * + * This macro defines and initializes a static memory region and + * k_heap of the requested size. After kernel start, &name can be + * used as if k_heap_init() had been called. + * + * @param name Symbol name for the struct k_heap object + * @param bytes Size of memory region, in bytes + */ +#define K_HEAP_DEFINE(name, bytes) \ + char __aligned(sizeof(void *)) kheap_##name[bytes]; \ + Z_STRUCT_SECTION_ITERABLE(k_heap, name) = { \ + .heap = { \ + .init_mem = kheap_##name, \ + .init_bytes = (bytes), \ + }, \ + } + /** * @brief Statically define and initialize a memory pool. * diff --git a/include/kernel_structs.h b/include/kernel_structs.h index 85ef7f6a625..e067a7f765c 100644 --- a/include/kernel_structs.h +++ b/include/kernel_structs.h @@ -21,10 +21,12 @@ #define ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ #if !defined(_ASMLANGUAGE) +#include #include #include #include #include +#include #endif #define K_NUM_PRIORITIES \ @@ -240,6 +242,47 @@ struct _timeout { _timeout_func_t fn; }; +/* kernel spinlock type */ + +struct k_spinlock { +#ifdef CONFIG_SMP + atomic_t locked; +#endif + +#ifdef CONFIG_SPIN_VALIDATE + /* Stores the thread that holds the lock with the locking CPU + * ID in the bottom two bits. + */ + uintptr_t thread_cpu; +#endif + +#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \ + !defined(CONFIG_SPIN_VALIDATE) + /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined + * the k_spinlock struct will have no members. The result + * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1. + * + * This size difference causes problems when the k_spinlock + * is embedded into another struct like k_msgq, because C and + * C++ will have different ideas on the offsets of the members + * that come after the k_spinlock member. + * + * To prevent this we add a 1 byte dummy member to k_spinlock + * when the user selects C++ support and k_spinlock would + * otherwise be empty. + */ + char dummy; +#endif +}; + +/* kernel synchronized heap struct */ + +struct k_heap { + struct sys_heap heap; + _wait_q_t wait_q; + struct k_spinlock lock; +}; + #endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_STRUCTS_H_ */ diff --git a/include/linker/common-ram.ld b/include/linker/common-ram.ld index 56fd2f6ee30..e8ca1992789 100644 --- a/include/linker/common-ram.ld +++ b/include/linker/common-ram.ld @@ -76,6 +76,13 @@ _k_mem_pool_list_end = .; } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) + SECTION_DATA_PROLOGUE(_k_heap_area,,SUBALIGN(4)) + { + _k_heap_list_start = .; + KEEP(*("._k_heap.static.*")) + _k_heap_list_end = .; + } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) + SECTION_DATA_PROLOGUE(_k_sem_area,,SUBALIGN(4)) { _k_sem_list_start = .; diff --git a/include/spinlock.h b/include/spinlock.h index a7447b9165e..8bc2bb5b663 100644 --- a/include/spinlock.h +++ b/include/spinlock.h @@ -7,6 +7,7 @@ #define ZEPHYR_INCLUDE_SPINLOCK_H_ #include +#include /* There's a spinlock validation framework available when asserts are * enabled. It adds a relatively hefty overhead (about 3k or so) to @@ -28,37 +29,6 @@ struct k_spinlock_key { typedef struct k_spinlock_key k_spinlock_key_t; -struct k_spinlock { -#ifdef CONFIG_SMP - atomic_t locked; -#endif - -#ifdef CONFIG_SPIN_VALIDATE - /* Stores the thread that holds the lock with the locking CPU - * ID in the bottom two bits. - */ - uintptr_t thread_cpu; -#endif - -#if defined(CONFIG_CPLUSPLUS) && !defined(CONFIG_SMP) && \ - !defined(CONFIG_SPIN_VALIDATE) - /* If CONFIG_SMP and CONFIG_SPIN_VALIDATE are both not defined - * the k_spinlock struct will have no members. The result - * is that in C sizeof(k_spinlock) is 0 and in C++ it is 1. - * - * This size difference causes problems when the k_spinlock - * is embedded into another struct like k_msgq, because C and - * C++ will have different ideas on the offsets of the members - * that come after the k_spinlock member. - * - * To prevent this we add a 1 byte dummy member to k_spinlock - * when the user selects C++ support and k_spinlock would - * otherwise be empty. - */ - char dummy; -#endif -}; - static ALWAYS_INLINE k_spinlock_key_t k_spin_lock(struct k_spinlock *l) { ARG_UNUSED(l); diff --git a/include/sys/sys_heap.h b/include/sys/sys_heap.h index 38c41891db6..829658ca7f8 100644 --- a/include/sys/sys_heap.h +++ b/include/sys/sys_heap.h @@ -6,7 +6,9 @@ #ifndef ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_ #define ZEPHYR_INCLUDE_SYS_SYS_HEAP_H_ -#include +#include +#include +#include /* Simple, fast heap implementation. * diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 0d4995b8ebf..ed0ec4252bc 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -8,6 +8,7 @@ add_library(kernel fatal.c idle.c init.c + kheap.c mailbox.c mem_slab.c mempool.c diff --git a/kernel/atomic_c.c b/kernel/atomic_c.c index ee3c7889bbf..948f15b1dfb 100644 --- a/kernel/atomic_c.c +++ b/kernel/atomic_c.c @@ -18,10 +18,11 @@ * (originally from x86's atomic.c) */ -#include #include #include #include +#include +#include /* Single global spinlock for atomic operations. This is fallback * code, not performance sensitive. At least by not using irq_lock() diff --git a/kernel/kheap.c b/kernel/kheap.c new file mode 100644 index 00000000000..8b94dc249c3 --- /dev/null +++ b/kernel/kheap.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2020 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +void k_heap_init(struct k_heap *h, void *mem, size_t bytes) +{ + z_waitq_init(&h->wait_q); + sys_heap_init(&h->heap, mem, bytes); +} + +static int statics_init(struct device *unused) +{ + ARG_UNUSED(unused); + Z_STRUCT_SECTION_FOREACH(k_heap, h) { + k_heap_init(h, h->heap.init_mem, h->heap.init_bytes); + } + return 0; +} + +SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); + +void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout) +{ + s64_t now, end = z_timeout_end_calc(timeout); + void *ret = NULL; + k_spinlock_key_t key = k_spin_lock(&h->lock); + + __ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), ""); + + while (ret == NULL) { + ret = sys_heap_alloc(&h->heap, bytes); + + now = z_tick_get(); + if ((ret != NULL) || ((end - now) <= 0)) { + break; + } + + (void) z_pend_curr(&h->lock, key, &h->wait_q, + K_TICKS(end - now)); + key = k_spin_lock(&h->lock); + } + + k_spin_unlock(&h->lock, key); + return ret; +} + +void k_heap_free(struct k_heap *h, void *mem) +{ + k_spinlock_key_t key = k_spin_lock(&h->lock); + + sys_heap_free(&h->heap, mem); + k_spin_unlock(&h->lock, key); +}