aarch64: mmu: Enable CONFIG_MMU

Enable CONFIG_MMU for AArch64 and add the new arch_mem_map() required
function.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-10-26 12:08:37 +01:00 committed by Anas Nashif
commit e710d36f77
5 changed files with 62 additions and 11 deletions

View file

@ -92,6 +92,8 @@ config GEN_IRQ_VECTOR_TABLE
config ARM_MMU
bool "ARM MMU Support"
default y
select MMU
select SRAM_REGION_PERMISSIONS
help
Memory Management Unit support.
@ -106,6 +108,9 @@ config EXCEPTION_DEBUG
if ARM_MMU
config MMU_PAGE_SIZE
default 0x1000
config MAX_XLAT_TABLES
int "Maximum numbers of translation tables"
default 8

View file

@ -237,8 +237,8 @@ static void add_map(struct arm_mmu_ptables *ptables, const char *name,
name, virt, phys, size);
/* check minimum alignment requirement for given mmap region */
__ASSERT(((virt & (PAGE_SIZE - 1)) == 0) &&
((size & (PAGE_SIZE - 1)) == 0),
__ASSERT(((virt & (CONFIG_MMU_PAGE_SIZE - 1)) == 0) &&
((size & (CONFIG_MMU_PAGE_SIZE - 1)) == 0),
"address/size are not page aligned\n");
desc = get_region_desc(attrs);
@ -407,6 +407,9 @@ static int arm_mmu_init(const struct device *arg)
/* Current MMU code supports only EL1 */
__asm__ volatile("mrs %0, CurrentEL" : "=r" (val));
__ASSERT(CONFIG_MMU_PAGE_SIZE == KB(4),
"Only 4K page size is supported\n");
__ASSERT(GET_EL(val) == MODE_EL1,
"Exception level not EL1, MMU not enabled!\n");
@ -429,3 +432,51 @@ SYS_INIT(arm_mmu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEVICE
#endif
);
int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
{
struct arm_mmu_ptables *ptables;
uint32_t entry_flags = MT_SECURE | MT_P_RX_U_NA;
/* Always map in the kernel page tables */
ptables = &kernel_ptables;
/* Translate flags argument into HW-recognized entry flags. */
switch (flags & K_MEM_CACHE_MASK) {
/*
* K_MEM_CACHE_NONE => MT_DEVICE_nGnRnE
* (Device memory nGnRnE)
* K_MEM_CACHE_WB => MT_NORMAL
* (Normal memory Outer WB + Inner WB)
* K_MEM_CACHE_WT => MT_NORMAL_WT
* (Normal memory Outer WT + Inner WT)
*/
case K_MEM_CACHE_NONE:
entry_flags |= MT_DEVICE_nGnRnE;
break;
case K_MEM_CACHE_WT:
entry_flags |= MT_NORMAL_WT;
break;
case K_MEM_CACHE_WB:
entry_flags |= MT_NORMAL;
break;
default:
return -ENOTSUP;
}
if ((flags & K_MEM_PERM_RW) != 0U) {
entry_flags |= MT_RW;
}
if ((flags & K_MEM_PERM_EXEC) == 0U) {
entry_flags |= MT_P_EXECUTE_NEVER;
}
if ((flags & K_MEM_PERM_USER) != 0U) {
return -ENOTSUP;
}
add_map(ptables, "generic", phys, (uintptr_t)virt, size, entry_flags);
return 0;
}

View file

@ -42,7 +42,6 @@
/* Only 4K granule is supported */
#define PAGE_SIZE_SHIFT 12U
#define PAGE_SIZE (1U << PAGE_SIZE_SHIFT)
/* 48-bit VA address */
#define VA_SIZE_SHIFT_MAX 48U

View file

@ -19,12 +19,14 @@
#define MT_DEVICE_GRE 2U
#define MT_NORMAL_NC 3U
#define MT_NORMAL 4U
#define MT_NORMAL_WT 5U
#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_nGnRnE * 8)) | \
(0x04 << (MT_DEVICE_nGnRE * 8)) | \
(0x0c << (MT_DEVICE_GRE * 8)) | \
(0x44 << (MT_NORMAL_NC * 8)) | \
(0xffUL << (MT_NORMAL * 8)))
(0xffUL << (MT_NORMAL * 8)) | \
(0xbbUL << (MT_NORMAL_WT * 8)))
/* More flags from user's perpective are supported using remaining bits
* of "attrs" field, i.e. attrs[31:3], underlying code will take care

View file

@ -39,12 +39,6 @@
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
/*
* MMU currently supports 4 kB translation granule size,
* so all regions are required to be 4 kB aligned
*/
#define PAGE_SIZE 0x1000
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE CONFIG_FLASH_LOAD_SIZE
#else
@ -66,7 +60,7 @@
#endif
#if defined(CONFIG_ARM_MMU)
_region_min_align = PAGE_SIZE;
_region_min_align = CONFIG_MMU_PAGE_SIZE;
#else
/* If building without MMU support, use default 4-byte alignment. */
_region_min_align = 4;