shared_multi_heap: Rework framework
Entirely rework the shared_multi_heap framework. Refer to the documentation for more information. Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
parent
a9e935e01b
commit
1dcea253d2
11 changed files with 409 additions and 252 deletions
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/ {
|
||||
/delete-node/ memory@38000000;
|
||||
|
||||
sram2_3: memory@38000000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x38000000 0x100000>;
|
||||
zephyr,memory-region = "SRAM2_3";
|
||||
};
|
||||
|
||||
res0: memory@38100000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x38100000 0x1000>;
|
||||
zephyr,memory-region = "RES0";
|
||||
zephyr,memory-region-mpu = "RAM";
|
||||
};
|
||||
|
||||
res1: memory@38200000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x38200000 0x2000>;
|
||||
zephyr,memory-region = "RES1";
|
||||
zephyr,memory-region-mpu = "RAM_NOCACHE";
|
||||
};
|
||||
|
||||
res2: memory@38300000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x38300000 0x3000>;
|
||||
zephyr,memory-region = "RES2";
|
||||
zephyr,memory-region-mpu = "RAM";
|
||||
};
|
||||
};
|
|
@ -0,0 +1,2 @@
|
|||
CONFIG_HAVE_CUSTOM_LINKER_SCRIPT=y
|
||||
CONFIG_CUSTOM_LINKER_SCRIPT="linker_arm64_shared_pool.ld"
|
|
@ -5,30 +5,32 @@
|
|||
*/
|
||||
|
||||
/ {
|
||||
reserved-memory {
|
||||
compatible = "reserved-memory";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
res0: reserved@42000000 {
|
||||
compatible = "shared-multi-heap";
|
||||
reg = <0x42000000 0x1000>;
|
||||
capability = "cacheable";
|
||||
label = "res0";
|
||||
soc {
|
||||
res0: memory@42000000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x0 0x42000000 0x0 0x1000>;
|
||||
zephyr,memory-region = "RES0";
|
||||
zephyr,memory-region-mpu = "RAM";
|
||||
};
|
||||
|
||||
res1: reserved@43000000 {
|
||||
compatible = "shared-multi-heap";
|
||||
reg = <0x43000000 0x2000>;
|
||||
capability = "non-cacheable";
|
||||
label = "res1";
|
||||
res1: memory@43000000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x0 0x43000000 0x0 0x2000>;
|
||||
zephyr,memory-region = "RES1";
|
||||
zephyr,memory-region-mpu = "RAM_NOCACHE";
|
||||
};
|
||||
|
||||
res2: reserved2@44000000 {
|
||||
compatible = "shared-multi-heap";
|
||||
reg = <0x44000000 0x3000>;
|
||||
capability = "cacheable";
|
||||
label = "res2";
|
||||
res_no_mpu: memory@45000000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x0 0x45000000 0x0 0x1000>;
|
||||
zephyr,memory-region = "RES_NO_MPU";
|
||||
};
|
||||
|
||||
res2: memory@44000000 {
|
||||
compatible = "zephyr,memory-region", "mmio-sram";
|
||||
reg = <0x0 0x44000000 0x0 0x3000>;
|
||||
zephyr,memory-region = "RES2";
|
||||
zephyr,memory-region-mpu = "RAM";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -6,18 +6,19 @@
|
|||
|
||||
#include <linker/sections.h>
|
||||
#include <devicetree.h>
|
||||
#include <linker/devicetree_regions.h>
|
||||
|
||||
#include <linker/linker-defs.h>
|
||||
#include <linker/linker-tool.h>
|
||||
|
||||
MEMORY
|
||||
{
|
||||
LINKER_DT_RESERVED_MEM_REGIONS()
|
||||
LINKER_DT_REGIONS()
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
LINKER_DT_RESERVED_MEM_SECTIONS()
|
||||
LINKER_DT_SECTIONS()
|
||||
}
|
||||
|
||||
#include <arch/arm64/scripts/linker.ld>
|
||||
|
|
|
@ -2,6 +2,4 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
CONFIG_ZTEST=y
|
||||
CONFIG_HAVE_CUSTOM_LINKER_SCRIPT=y
|
||||
CONFIG_CUSTOM_LINKER_SCRIPT="linker_arm64_shared_pool.ld"
|
||||
CONFIG_SHARED_MULTI_HEAP=y
|
||||
|
|
|
@ -11,101 +11,194 @@
|
|||
|
||||
#include <multi_heap/shared_multi_heap.h>
|
||||
|
||||
#define MAX_REGIONS (3)
|
||||
#define DT_DRV_COMPAT zephyr_memory_region
|
||||
|
||||
static struct {
|
||||
struct shared_multi_heap_region *reg;
|
||||
uint8_t *v_addr;
|
||||
} map[MAX_REGIONS];
|
||||
#define RES0_CACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res0))
|
||||
#define RES1_NOCACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res1))
|
||||
#define RES2_CACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res2))
|
||||
|
||||
static bool smh_reg_init(struct shared_multi_heap_region *reg, uint8_t **v_addr, size_t *size)
|
||||
struct region_map {
|
||||
struct shared_multi_heap_region region;
|
||||
uintptr_t p_addr;
|
||||
};
|
||||
|
||||
#define FOREACH_REG(n) \
|
||||
{ \
|
||||
.region = { \
|
||||
.addr = (uintptr_t) DT_INST_REG_ADDR(n), \
|
||||
.size = DT_INST_REG_SIZE(n), \
|
||||
.attr = DT_INST_ENUM_IDX_OR(n, zephyr_memory_region_mpu, \
|
||||
SMH_REG_ATTR_NUM), \
|
||||
}, \
|
||||
},
|
||||
|
||||
struct region_map map[] = {
|
||||
DT_INST_FOREACH_STATUS_OKAY(FOREACH_REG)
|
||||
};
|
||||
|
||||
#if defined(CONFIG_MMU)
|
||||
static void smh_reg_map(struct shared_multi_heap_region *region)
|
||||
{
|
||||
static int reg_idx;
|
||||
uint32_t mem_attr;
|
||||
uint8_t *v_addr;
|
||||
|
||||
mem_attr = (reg->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE;
|
||||
mem_attr = (region->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE;
|
||||
mem_attr |= K_MEM_PERM_RW;
|
||||
|
||||
z_phys_map(v_addr, reg->addr, reg->size, mem_attr);
|
||||
z_phys_map(&v_addr, region->addr, region->size, mem_attr);
|
||||
|
||||
*size = reg->size;
|
||||
|
||||
/* Save the mapping to retrieve the region from the vaddr */
|
||||
map[reg_idx].reg = reg;
|
||||
map[reg_idx].v_addr = *v_addr;
|
||||
|
||||
reg_idx++;
|
||||
|
||||
return true;
|
||||
region->addr = (uintptr_t) v_addr;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static struct shared_multi_heap_region *get_reg_addr(uint8_t *v_addr)
|
||||
/*
|
||||
* Given a virtual address retrieve the original memory region that the mapping
|
||||
* is belonging to.
|
||||
*/
|
||||
static struct region_map *get_region_map(void *v_addr)
|
||||
{
|
||||
for (size_t reg = 0; reg < MAX_REGIONS; reg++) {
|
||||
if (v_addr >= map[reg].v_addr &&
|
||||
v_addr < map[reg].v_addr + map[reg].reg->size) {
|
||||
return map[reg].reg;
|
||||
for (size_t reg = 0; reg < ARRAY_SIZE(map); reg++) {
|
||||
if ((uintptr_t) v_addr >= map[reg].region.addr &&
|
||||
(uintptr_t) v_addr < map[reg].region.addr + map[reg].region.size) {
|
||||
return &map[reg];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline enum smh_reg_attr mpu_to_reg_attr(int mpu_attr)
|
||||
{
|
||||
/*
|
||||
* All the memory regions defined in the DT with the MPU property `RAM`
|
||||
* can be accessed and memory can be retrieved from using the attribute
|
||||
* `SMH_REG_ATTR_CACHEABLE`.
|
||||
*
|
||||
* All the memory regions defined in the DT with the MPU property
|
||||
* `RAM_NOCACHE` can be accessed and memory can be retrieved from using
|
||||
* the attribute `SMH_REG_ATTR_NON_CACHEABLE`.
|
||||
*
|
||||
* [MPU attr] -> [SMH attr]
|
||||
*
|
||||
* RAM -> SMH_REG_ATTR_CACHEABLE
|
||||
* RAM_NOCACHE -> SMH_REG_ATTR_NON_CACHEABLE
|
||||
*/
|
||||
switch (mpu_attr) {
|
||||
case 0: /* RAM */
|
||||
return SMH_REG_ATTR_CACHEABLE;
|
||||
case 1: /* RAM_NOCACHE */
|
||||
return SMH_REG_ATTR_NON_CACHEABLE;
|
||||
default:
|
||||
/* How ? */
|
||||
ztest_test_fail();
|
||||
}
|
||||
|
||||
/* whatever */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fill_multi_heap(void)
|
||||
{
|
||||
struct region_map *reg_map;
|
||||
|
||||
for (size_t idx = 0; idx < DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT); idx++) {
|
||||
reg_map = &map[idx];
|
||||
|
||||
/* zephyr,memory-region-mpu property not found. Skip it. */
|
||||
if (reg_map->region.attr == SMH_REG_ATTR_NUM) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Convert MPU attributes to shared-multi-heap capabilities */
|
||||
reg_map->region.attr = mpu_to_reg_attr(reg_map->region.attr);
|
||||
|
||||
/* Assume for now that phys == virt */
|
||||
reg_map->p_addr = reg_map->region.addr;
|
||||
|
||||
#if defined(CONFIG_MMU)
|
||||
/*
|
||||
* For MMU-enabled platform we have to MMU-map the physical
|
||||
* address retrieved by DT at run-time because the SMH
|
||||
* framework expects virtual addresses.
|
||||
*
|
||||
* For MPU-enabled plaform the code is assuming that the region
|
||||
* are configured at build-time, so no map is needed.
|
||||
*/
|
||||
smh_reg_map(®_map->region);
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
shared_multi_heap_add(®_map->region, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void test_shared_multi_heap(void)
|
||||
{
|
||||
struct shared_multi_heap_region *reg;
|
||||
uint8_t *block;
|
||||
struct region_map *reg_map;
|
||||
void *block;
|
||||
int ret;
|
||||
|
||||
shared_multi_heap_pool_init(smh_reg_init);
|
||||
ret = shared_multi_heap_pool_init();
|
||||
zassert_equal(0, ret, "failed initialization");
|
||||
|
||||
/*
|
||||
* Return -EALREADY if already inited
|
||||
*/
|
||||
ret = shared_multi_heap_pool_init();
|
||||
zassert_equal(-EALREADY, ret, "second init should fail");
|
||||
|
||||
/*
|
||||
* Fill the buffer pool with the memory heaps coming from DT
|
||||
*/
|
||||
fill_multi_heap();
|
||||
|
||||
/*
|
||||
* Request a small cacheable chunk. It should be allocated in the
|
||||
* smaller region (@ 0x42000000)
|
||||
* smaller region RES0
|
||||
*/
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x40);
|
||||
reg = get_reg_addr(block);
|
||||
reg_map = get_region_map(block);
|
||||
|
||||
zassert_equal(reg->addr, 0x42000000, "block in the wrong memory region");
|
||||
zassert_equal(reg->attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
zassert_equal(reg_map->p_addr, RES0_CACHE_ADDR, "block in the wrong memory region");
|
||||
zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
|
||||
/*
|
||||
* Request another small cacheable chunk. It should be allocated in the
|
||||
* smaller cacheable region (@ 0x42000000)
|
||||
* smaller cacheable region RES0
|
||||
*/
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x80);
|
||||
reg = get_reg_addr(block);
|
||||
reg_map = get_region_map(block);
|
||||
|
||||
zassert_equal(reg->addr, 0x42000000, "block in the wrong memory region");
|
||||
zassert_equal(reg->attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
zassert_equal(reg_map->p_addr, RES0_CACHE_ADDR, "block in the wrong memory region");
|
||||
zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
|
||||
/*
|
||||
* Request a big cacheable chunk. It should be allocated in the
|
||||
* bigger cacheable region (@ 0x44000000)
|
||||
* bigger cacheable region RES2
|
||||
*/
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x1200);
|
||||
reg = get_reg_addr(block);
|
||||
reg_map = get_region_map(block);
|
||||
|
||||
zassert_equal(reg->addr, 0x44000000, "block in the wrong memory region");
|
||||
zassert_equal(reg->attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
zassert_equal(reg_map->p_addr, RES2_CACHE_ADDR, "block in the wrong memory region");
|
||||
zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
|
||||
|
||||
/*
|
||||
* Request a non-cacheable chunk. It should be allocated in the
|
||||
* non-cacheable region (@ 0x43000000)
|
||||
* non-cacheable region RES1
|
||||
*/
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x100);
|
||||
reg = get_reg_addr(block);
|
||||
reg_map = get_region_map(block);
|
||||
|
||||
zassert_equal(reg->addr, 0x43000000, "block in the wrong memory region");
|
||||
zassert_equal(reg->attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
|
||||
zassert_equal(reg_map->p_addr, RES1_NOCACHE_ADDR, "block in the wrong memory region");
|
||||
zassert_equal(reg_map->region.attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
|
||||
|
||||
/*
|
||||
* Request again a non-cacheable chunk. It should be allocated in the
|
||||
* non-cacheable region (@ 0x43000000)
|
||||
* non-cacheable region RES1
|
||||
*/
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x100);
|
||||
reg = get_reg_addr(block);
|
||||
reg_map = get_region_map(block);
|
||||
|
||||
zassert_equal(reg->addr, 0x43000000, "block in the wrong memory region");
|
||||
zassert_equal(reg->attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
|
||||
zassert_equal(reg_map->p_addr, RES1_NOCACHE_ADDR, "block in the wrong memory region");
|
||||
zassert_equal(reg_map->region.attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
|
||||
|
||||
/* Request a block too big */
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x10000);
|
||||
|
@ -116,9 +209,8 @@ void test_shared_multi_heap(void)
|
|||
zassert_is_null(block, "0 size accepted as valid");
|
||||
|
||||
/* Request a non-existent attribute */
|
||||
block = shared_multi_heap_alloc(SMH_REG_ATTR_NUM + 1, 0x100);
|
||||
block = shared_multi_heap_alloc(MAX_SHARED_MULTI_HEAP_ATTR, 0x100);
|
||||
zassert_is_null(block, "wrong attribute accepted as valid");
|
||||
|
||||
}
|
||||
|
||||
void test_main(void)
|
||||
|
|
|
@ -3,6 +3,6 @@
|
|||
|
||||
tests:
|
||||
kernel.shared_multi_heap:
|
||||
platform_allow: qemu_cortex_a53
|
||||
platform_allow: qemu_cortex_a53 mps2_an521
|
||||
tags: board multi_heap
|
||||
harness: ztest
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue