soc: nordic: common: dmm: Fix memory utilization

DMM was enforcing cache line alignment all memory regions, including
those which were not cacheable. Fixing it by using memory attribute
from the device tree to determine if alignment needs to be applied.

Because of that memory usage was significantly increased because
even 1 byte buffers (e.g. for uart_poll_out) was consuming 32 bytes
(cache line size).

Signed-off-by: Krzysztof Chruściński <krzysztof.chruscinski@nordicsemi.no>
This commit is contained in:
Krzysztof Chruściński 2024-06-21 15:31:43 +02:00 committed by Anas Nashif
commit 5f32265459
2 changed files with 20 additions and 8 deletions

View file

@ -23,6 +23,7 @@
{.dt_addr = DT_REG_ADDR(node_id), \
.dt_size = DT_REG_SIZE(node_id), \
.dt_attr = DT_PROP(node_id, zephyr_memory_attr), \
.dt_align = DMM_ALIGN_SIZE(node_id), \
.dt_allc = &_BUILD_LINKER_END_VAR(node_id)},
/* Generate declarations of linker variables used to determine size of preallocated variables
@ -36,6 +37,7 @@ struct dmm_region {
uintptr_t dt_addr;
size_t dt_size;
uint32_t dt_attr;
uint32_t dt_align;
void *dt_allc;
};
@ -91,7 +93,7 @@ static bool is_user_buffer_correctly_preallocated(void const *user_buffer, size_
return true;
}
if (IS_ALIGNED(addr, DMM_DCACHE_LINE_SIZE)) {
if (IS_ALIGNED(addr, region->dt_align)) {
/* If buffer is in cacheable region it must be aligned to data cache line size. */
return true;
}
@ -101,7 +103,7 @@ static bool is_user_buffer_correctly_preallocated(void const *user_buffer, size_
static size_t dmm_heap_start_get(struct dmm_heap *dh)
{
return ROUND_UP(dh->region->dt_allc, DMM_DCACHE_LINE_SIZE);
return ROUND_UP(dh->region->dt_allc, dh->region->dt_align);
}
static size_t dmm_heap_size_get(struct dmm_heap *dh)
@ -111,8 +113,8 @@ static size_t dmm_heap_size_get(struct dmm_heap *dh)
static void *dmm_buffer_alloc(struct dmm_heap *dh, size_t length)
{
length = ROUND_UP(length, DMM_DCACHE_LINE_SIZE);
return sys_heap_aligned_alloc(&dh->heap, DMM_DCACHE_LINE_SIZE, length);
length = ROUND_UP(length, dh->region->dt_align);
return sys_heap_aligned_alloc(&dh->heap, dh->region->dt_align, length);
}
static void dmm_buffer_free(struct dmm_heap *dh, void *buffer)

View file

@ -14,6 +14,7 @@
#include <stdint.h>
#include <zephyr/devicetree.h>
#include <zephyr/linker/devicetree_regions.h>
#include <zephyr/mem_mgmt/mem_attr.h>
#include <zephyr/sys/util.h>
#ifdef __cplusplus
@ -22,8 +23,18 @@ extern "C" {
/** @cond INTERNAL_HIDDEN */
#define DMM_DCACHE_LINE_SIZE \
COND_CODE_1(IS_ENABLED(CONFIG_DCACHE), (CONFIG_DCACHE_LINE_SIZE), (sizeof(uint8_t)))
/* Determine if memory region for the peripheral is cacheable. */
#define DMM_IS_REG_CACHEABLE(node_id) \
COND_CODE_1(CONFIG_DCACHE, \
(COND_CODE_1(DT_NODE_HAS_PROP(DT_PHANDLE(node_id, memory_regions), zephyr_memory_attr), \
(DT_PROP(DT_PHANDLE(node_id, memory_regions), zephyr_memory_attr) & DT_MEM_CACHEABLE), \
(0))), (0))
/* Determine required alignment of the static buffers in memory regions. Cache line alignment is
* required if region is cacheable and data cache is enabled.
*/
#define DMM_ALIGN_SIZE(node_id) \
(DMM_IS_REG_CACHEABLE(node_id) ? CONFIG_DCACHE_LINE_SIZE : sizeof(uint8_t))
/**
* @brief Get reference to memory region associated with the specified device node
@ -35,7 +46,6 @@ extern "C" {
#define DMM_DEV_TO_REG(node_id) \
COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \
((void *)DT_REG_ADDR(DT_PHANDLE(node_id, memory_regions))), (NULL))
/**
* @brief Preallocate buffer in memory region associated with the specified device node
*
@ -45,7 +55,7 @@ extern "C" {
COND_CODE_1(DT_NODE_HAS_PROP(node_id, memory_regions), \
(__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \
DT_PHANDLE(node_id, memory_regions))))) \
__aligned(DMM_DCACHE_LINE_SIZE)), \
__aligned(DMM_ALIGN_SIZE(node_id))), \
())
#ifdef CONFIG_HAS_NORDIC_DMM