kernel: kheap: decouple simple alloc from aligned_alloc

When k_heap_alloc() is expressed in terms of k_heap_aligned_alloc()
it invokes a longer aligned allocation code path with an extra runtime
overhead even though no alignment is necessary.

Let's reference and invoke the aligned allocation code path only when an
actual aligned allocation is requested. This opens the possibility for
the linker to garbage-collect the aligning code otherwise.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2025-03-10 17:26:40 -04:00 committed by Benjamin Cabé
commit 9da06456f2
9 changed files with 50 additions and 18 deletions

View file

@ -138,6 +138,18 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes);
*/
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes);
/** @brief Allocate memory from a sys_heap
*
* This is a wrapper for sys_heap_alloc() whose purpose is to provide the same
* function signature as sys_heap_aligned_alloc().
*
* @param heap Heap from which to allocate
* @param align Ignored placeholder
* @param bytes Number of bytes requested
* @return Pointer to memory the caller can now use
*/
void *sys_heap_noalign_alloc(struct sys_heap *heap, size_t align, size_t bytes);
/** @brief Free memory into a sys_heap
*
* De-allocates a pointer to memory previously returned from

View file

@ -1753,7 +1753,7 @@
* @param h Heap object
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_aligned_alloc_blocking(h, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(h, timeout)
/**
* @brief Trace Heap align alloc attempt outcome

View file

@ -63,22 +63,23 @@ SYS_INIT_NAMED(statics_init_pre, statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_
SYS_INIT_NAMED(statics_init_post, statics_init, POST_KERNEL, 0);
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout)
typedef void * (sys_heap_allocator_t)(struct sys_heap *heap, size_t align, size_t bytes);
static void *z_heap_alloc_helper(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout,
sys_heap_allocator_t *sys_heap_allocator)
{
k_timepoint_t end = sys_timepoint_calc(timeout);
void *ret = NULL;
k_spinlock_key_t key = k_spin_lock(&heap->lock);
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
bool blocked_alloc = false;
while (ret == NULL) {
ret = sys_heap_aligned_alloc(&heap->heap, align, bytes);
ret = sys_heap_allocator(&heap->heap, align, bytes);
if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
@ -88,7 +89,7 @@ void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
if (!blocked_alloc) {
blocked_alloc = true;
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, heap, timeout);
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, alloc_helper, heap, timeout);
} else {
/**
* @todo Trace attempt to avoid empty trace segments
@ -100,8 +101,6 @@ void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
key = k_spin_lock(&heap->lock);
}
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);
k_spin_unlock(&heap->lock, key);
return ret;
}
@ -110,13 +109,27 @@ void *k_heap_alloc(struct k_heap *heap, size_t bytes, k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, heap, timeout);
void *ret = k_heap_aligned_alloc(heap, sizeof(void *), bytes, timeout);
void *ret = z_heap_alloc_helper(heap, 0, bytes, timeout,
sys_heap_noalign_alloc);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, heap, timeout, ret);
return ret;
}
void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);
void *ret = z_heap_alloc_helper(heap, align, bytes, timeout,
sys_heap_aligned_alloc);
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);
return ret;
}
void *k_heap_calloc(struct k_heap *heap, size_t num, size_t size, k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, calloc, heap, timeout);
@ -148,7 +161,7 @@ void *k_heap_realloc(struct k_heap *heap, void *ptr, size_t bytes, k_timeout_t t
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
while (ret == NULL) {
ret = sys_heap_aligned_realloc(&heap->heap, ptr, sizeof(void *), bytes);
ret = sys_heap_realloc(&heap->heap, ptr, bytes);
if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {

View file

@ -298,6 +298,13 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes)
return mem;
}
void *sys_heap_noalign_alloc(struct sys_heap *heap, size_t align, size_t bytes)
{
ARG_UNUSED(align);
return sys_heap_alloc(heap, bytes);
}
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes)
{
struct z_heap *h = heap->heap;

View file

@ -311,7 +311,7 @@ extern "C" {
#define sys_port_trace_k_heap_init(heap)
#define sys_port_trace_k_heap_aligned_alloc_enter(heap, timeout)
#define sys_port_trace_k_heap_aligned_alloc_blocking(heap, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(heap, timeout)
#define sys_port_trace_k_heap_aligned_alloc_exit(heap, timeout, ret)
#define sys_port_trace_k_heap_alloc_enter(heap, timeout)
#define sys_port_trace_k_heap_alloc_exit(heap, timeout, ret)

View file

@ -573,7 +573,7 @@ void sys_trace_thread_info(struct k_thread *thread);
SEGGER_SYSVIEW_RecordU32x2(TID_HEAP_ALIGNED_ALLOC, (uint32_t)(uintptr_t)heap, \
(uint32_t)timeout.ticks)
#define sys_port_trace_k_heap_aligned_alloc_blocking(heap, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(heap, timeout)
#define sys_port_trace_k_heap_aligned_alloc_exit(heap, timeout, ret) \
SEGGER_SYSVIEW_RecordEndCallU32(TID_HEAP_ALIGNED_ALLOC, (uint32_t)ret)

View file

@ -381,7 +381,7 @@ void sys_trace_k_heap_realloc_exit(struct k_heap *h, void *ptr, size_t bytes, k_
TRACING_STRING("%s: %p\n", __func__, h);
}
void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout)
void sys_trace_k_heap_alloc_helper_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout)
{
TRACING_STRING("%s: %p\n", __func__, h);
}

View file

@ -388,8 +388,8 @@
#define sys_port_trace_k_heap_init(h) sys_trace_k_heap_init(h, mem, bytes)
#define sys_port_trace_k_heap_aligned_alloc_enter(h, timeout) \
sys_trace_k_heap_aligned_alloc_enter(h, bytes, timeout)
#define sys_port_trace_k_heap_aligned_alloc_blocking(h, timeout) \
sys_trace_k_heap_aligned_alloc_blocking(h, bytes, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(h, timeout) \
sys_trace_k_heap_alloc_helper_blocking(h, bytes, timeout)
#define sys_port_trace_k_heap_aligned_alloc_exit(h, timeout, ret) \
sys_trace_k_heap_aligned_alloc_exit(h, bytes, timeout, ret)
#define sys_port_trace_k_heap_alloc_enter(h, timeout) \
@ -697,7 +697,7 @@ void sys_trace_k_heap_calloc_enter(struct k_heap *h, size_t num, size_t size, k_
void sys_trace_k_heap_calloc_exit(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout,
void *ret);
void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout);
void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout);
void sys_trace_k_heap_alloc_helper_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout);
void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout,
void *ret);
void sys_trace_k_heap_free(struct k_heap *h, void *mem);

View file

@ -352,7 +352,7 @@ void sys_trace_gpio_fire_callback_user(const struct device *port, struct gpio_ca
#define sys_port_trace_k_heap_init(heap)
#define sys_port_trace_k_heap_aligned_alloc_enter(heap, timeout)
#define sys_port_trace_k_heap_aligned_alloc_blocking(heap, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(heap, timeout)
#define sys_port_trace_k_heap_aligned_alloc_exit(heap, timeout, ret)
#define sys_port_trace_k_heap_alloc_enter(heap, timeout)
#define sys_port_trace_k_heap_alloc_exit(heap, timeout, ret)