diff --git a/arch/arm/core/aarch64/CMakeLists.txt b/arch/arm/core/aarch64/CMakeLists.txt index 0fa8053c5fb..8b49457d3a5 100644 --- a/arch/arm/core/aarch64/CMakeLists.txt +++ b/arch/arm/core/aarch64/CMakeLists.txt @@ -20,3 +20,4 @@ zephyr_library_sources( zephyr_library_sources_ifdef(CONFIG_GEN_SW_ISR_TABLE isr_wrapper.S) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD irq_offload.c) +zephyr_library_sources_ifdef(CONFIG_ARM_MMU arm_mmu.c) diff --git a/arch/arm/core/aarch64/Kconfig b/arch/arm/core/aarch64/Kconfig index b6ebf35d5d8..6566f09fac9 100644 --- a/arch/arm/core/aarch64/Kconfig +++ b/arch/arm/core/aarch64/Kconfig @@ -90,6 +90,85 @@ config GEN_ISR_TABLES config GEN_IRQ_VECTOR_TABLE default n +config ARM_MMU + bool "ARM MMU Support" + default y + help + Memory Management Unit support. + +if ARM_MMU + +config MAX_XLAT_TABLES + int "Maximum numbers of translation tables" + default 7 + help + This option specifies the maximum numbers of translation tables + excluding the base translation table. Based on this, translation + tables are allocated at compile time and used at runtime as needed. + If the runtime need exceeds preallocated numbers of translation + tables, it will result in assert. Number of translation tables + required is decided based on how many discrete memory regions + (both normal and device memory) are present on given platform and + how much granularity is required while assigning attributes + to these memory regions. + +choice + prompt "Virtual address space size" + default ARM64_VA_BITS_32 + help + Allows choosing one of multiple possible virtual address + space sizes. The level of translation table is determined by + a combination of page size and virtual address space size. + +config ARM64_VA_BITS_32 + bool "32-bit" + +config ARM64_VA_BITS_36 + bool "36-bit" + +config ARM64_VA_BITS_42 + bool "42-bit" + +config ARM64_VA_BITS_48 + bool "48-bit" +endchoice + +config ARM64_VA_BITS + int + default 32 if ARM64_VA_BITS_32 + default 36 if ARM64_VA_BITS_36 + default 42 if ARM64_VA_BITS_42 + default 48 if ARM64_VA_BITS_48 + +choice + prompt "Physical address space size" + default ARM64_PA_BITS_32 + help + Choose the maximum physical address range that the kernel will + support. + +config ARM64_PA_BITS_32 + bool "32-bit" + +config ARM64_PA_BITS_36 + bool "36-bit" + +config ARM64_PA_BITS_42 + bool "42-bit" + +config ARM64_PA_BITS_48 + bool "48-bit" +endchoice + +config ARM64_PA_BITS + int + default 32 if ARM64_PA_BITS_32 + default 36 if ARM64_PA_BITS_36 + default 42 if ARM64_PA_BITS_42 + default 48 if ARM64_PA_BITS_48 + +endif #ARM_MMU + endif # CPU_CORTEX_A endif # ARM64 diff --git a/arch/arm/core/aarch64/arm_mmu.c b/arch/arm/core/aarch64/arm_mmu.c new file mode 100644 index 00000000000..a6f8c42c185 --- /dev/null +++ b/arch/arm/core/aarch64/arm_mmu.c @@ -0,0 +1,438 @@ +/* + * Copyright 2019 Broadcom + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Set below flag to get debug prints */ +#define MMU_DEBUG_PRINTS 0 +/* To get prints from MMU driver, it has to initialized after console driver */ +#define MMU_DEBUG_PRIORITY 70 + +#if MMU_DEBUG_PRINTS +/* To dump page table entries while filling them, set DUMP_PTE macro */ +#define DUMP_PTE 0 +#define MMU_DEBUG(fmt, ...) printk(fmt, ##__VA_ARGS__) +#else +#define MMU_DEBUG(...) +#endif + +/* We support only 4kB translation granule */ +#define PAGE_SIZE_SHIFT 12U +#define PAGE_SIZE (1U << PAGE_SIZE_SHIFT) +#define XLAT_TABLE_SIZE_SHIFT PAGE_SIZE_SHIFT /* Size of one complete table */ +#define XLAT_TABLE_SIZE (1U << XLAT_TABLE_SIZE_SHIFT) + +#define XLAT_TABLE_ENTRY_SIZE_SHIFT 3U /* Each table entry is 8 bytes */ +#define XLAT_TABLE_LEVEL_MAX 3U + +#define XLAT_TABLE_ENTRIES_SHIFT \ + (XLAT_TABLE_SIZE_SHIFT - XLAT_TABLE_ENTRY_SIZE_SHIFT) +#define XLAT_TABLE_ENTRIES (1U << XLAT_TABLE_ENTRIES_SHIFT) + +/* Address size covered by each entry at given translation table level */ +#define L3_XLAT_VA_SIZE_SHIFT PAGE_SIZE_SHIFT +#define L2_XLAT_VA_SIZE_SHIFT \ + (L3_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT) +#define L1_XLAT_VA_SIZE_SHIFT \ + (L2_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT) +#define L0_XLAT_VA_SIZE_SHIFT \ + (L1_XLAT_VA_SIZE_SHIFT + XLAT_TABLE_ENTRIES_SHIFT) + +#define LEVEL_TO_VA_SIZE_SHIFT(level) \ + (PAGE_SIZE_SHIFT + (XLAT_TABLE_ENTRIES_SHIFT * \ + (XLAT_TABLE_LEVEL_MAX - (level)))) + +/* Virtual Address Index within given translation table level */ +#define XLAT_TABLE_VA_IDX(va_addr, level) \ + ((va_addr >> LEVEL_TO_VA_SIZE_SHIFT(level)) & (XLAT_TABLE_ENTRIES - 1)) + +/* + * Calculate the initial translation table level from CONFIG_ARM64_VA_BITS + * For a 4 KB page size, + * (va_bits <= 21) - base level 3 + * (22 <= va_bits <= 30) - base level 2 + * (31 <= va_bits <= 39) - base level 1 + * (40 <= va_bits <= 48) - base level 0 + */ +#define GET_XLAT_TABLE_BASE_LEVEL(va_bits) \ + ((va_bits > L0_XLAT_VA_SIZE_SHIFT) \ + ? 0U \ + : (va_bits > L1_XLAT_VA_SIZE_SHIFT) \ + ? 1U \ + : (va_bits > L2_XLAT_VA_SIZE_SHIFT) \ + ? 2U : 3U) + +#define XLAT_TABLE_BASE_LEVEL GET_XLAT_TABLE_BASE_LEVEL(CONFIG_ARM64_VA_BITS) + +#define GET_NUM_BASE_LEVEL_ENTRIES(va_bits) \ + (1U << (va_bits - LEVEL_TO_VA_SIZE_SHIFT(XLAT_TABLE_BASE_LEVEL))) + +#define NUM_BASE_LEVEL_ENTRIES GET_NUM_BASE_LEVEL_ENTRIES(CONFIG_ARM64_VA_BITS) + +#if DUMP_PTE +#define L0_SPACE "" +#define L1_SPACE " " +#define L2_SPACE " " +#define L3_SPACE " " +#define XLAT_TABLE_LEVEL_SPACE(level) \ + (((level) == 0) ? L0_SPACE : \ + ((level) == 1) ? L1_SPACE : \ + ((level) == 2) ? L2_SPACE : L3_SPACE) +#endif + +static u64_t base_xlat_table[NUM_BASE_LEVEL_ENTRIES] + __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(u64_t)); + +static u64_t xlat_tables[CONFIG_MAX_XLAT_TABLES][XLAT_TABLE_ENTRIES] + __aligned(XLAT_TABLE_ENTRIES * sizeof(u64_t)); + +/* Translation table control register settings */ +static u64_t get_tcr(int el) +{ + u64_t tcr; + u64_t pa_bits = CONFIG_ARM64_PA_BITS; + u64_t va_bits = CONFIG_ARM64_VA_BITS; + u64_t tcr_ps_bits; + + switch (pa_bits) { + case 48: + tcr_ps_bits = TCR_PS_BITS_256TB; + break; + case 44: + tcr_ps_bits = TCR_PS_BITS_16TB; + break; + case 42: + tcr_ps_bits = TCR_PS_BITS_4TB; + break; + case 40: + tcr_ps_bits = TCR_PS_BITS_1TB; + break; + case 36: + tcr_ps_bits = TCR_PS_BITS_64GB; + break; + default: + tcr_ps_bits = TCR_PS_BITS_4GB; + break; + } + + if (el == 1) { + tcr = (tcr_ps_bits << TCR_EL1_IPS_SHIFT); + /* + * TCR_EL1.EPD1: Disable translation table walk for addresses + * that are translated using TTBR1_EL1. + */ + tcr |= TCR_EPD1_DISABLE; + } else + tcr = (tcr_ps_bits << TCR_EL3_PS_SHIFT); + + tcr |= TCR_T0SZ(va_bits); + /* + * Translation table walk is cacheable, inner/outer WBWA and + * inner shareable + */ + tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA; + + return tcr; +} + +static int pte_desc_type(u64_t *pte) +{ + return *pte & PTE_DESC_TYPE_MASK; +} + +static u64_t *calculate_pte_index(u64_t addr, int level) +{ + int base_level = XLAT_TABLE_BASE_LEVEL; + u64_t *pte; + u64_t idx; + unsigned int i; + + /* Walk through all translation tables to find pte index */ + pte = (u64_t *)base_xlat_table; + for (i = base_level; i <= XLAT_TABLE_LEVEL_MAX; i++) { + idx = XLAT_TABLE_VA_IDX(addr, i); + pte += idx; + + /* Found pte index */ + if (i == level) + return pte; + /* if PTE is not table desc, can't traverse */ + if (pte_desc_type(pte) != PTE_TABLE_DESC) + return NULL; + /* Move to the next translation table level */ + pte = (u64_t *)(*pte & 0x0000fffffffff000ULL); + } + + return NULL; +} + +static void set_pte_table_desc(u64_t *pte, u64_t *table, unsigned int level) +{ +#if DUMP_PTE + MMU_DEBUG("%s", XLAT_TABLE_LEVEL_SPACE(level)); + MMU_DEBUG("%p: [Table] %p\n", pte, table); +#endif + /* Point pte to new table */ + *pte = PTE_TABLE_DESC | (u64_t)table; +} + +static void set_pte_block_desc(u64_t *pte, u64_t addr_pa, + unsigned int attrs, unsigned int level) +{ + u64_t desc = addr_pa; + unsigned int mem_type; + + desc |= (level == 3) ? PTE_PAGE_DESC : PTE_BLOCK_DESC; + + /* NS bit for security memory access from secure state */ + desc |= (attrs & MT_NS) ? PTE_BLOCK_DESC_NS : 0; + + /* AP bits for Data access permission */ + desc |= (attrs & MT_RW) ? PTE_BLOCK_DESC_AP_RW : PTE_BLOCK_DESC_AP_RO; + + /* the access flag */ + desc |= PTE_BLOCK_DESC_AF; + + /* memory attribute index field */ + mem_type = MT_TYPE(attrs); + desc |= PTE_BLOCK_DESC_MEMTYPE(mem_type); + + switch (mem_type) { + case MT_DEVICE_nGnRnE: + case MT_DEVICE_nGnRE: + case MT_DEVICE_GRE: + /* Access to Device memory and non-cacheable memory are coherent + * for all observers in the system and are treated as + * Outer shareable, so, for these 2 types of memory, + * it is not strictly needed to set shareability field + */ + desc |= PTE_BLOCK_DESC_OUTER_SHARE; + /* Map device memory as execute-never */ + desc |= PTE_BLOCK_DESC_PXN; + desc |= PTE_BLOCK_DESC_UXN; + break; + case MT_NORMAL_NC: + case MT_NORMAL: + /* Make Normal RW memory as execute never */ + if ((attrs & MT_RW) || (attrs & MT_EXECUTE_NEVER)) + desc |= PTE_BLOCK_DESC_PXN; + if (mem_type == MT_NORMAL) + desc |= PTE_BLOCK_DESC_INNER_SHARE; + else + desc |= PTE_BLOCK_DESC_OUTER_SHARE; + } + +#if DUMP_PTE + MMU_DEBUG("%s", XLAT_TABLE_LEVEL_SPACE(level)); + MMU_DEBUG("%p: ", pte); + MMU_DEBUG((mem_type == MT_NORMAL) ? "MEM" : + ((mem_type == MT_NORMAL_NC) ? "NC" : "DEV")); + MMU_DEBUG((attrs & MT_RW) ? "-RW" : "-RO"); + MMU_DEBUG((attrs & MT_NS) ? "-NS" : "-S"); + MMU_DEBUG((attrs & MT_EXECUTE_NEVER) ? "-XN" : "-EXEC"); + MMU_DEBUG("\n"); +#endif + + *pte = desc; +} + +/* Returns a new reallocated table */ +static u64_t *new_prealloc_table(void) +{ + static unsigned int table_idx; + + __ASSERT(table_idx < CONFIG_MAX_XLAT_TABLES, + "Enough xlat tables not allocated"); + + return (u64_t *)(xlat_tables[table_idx++]); +} + +/* Splits a block into table with entries spanning the old block */ +static void split_pte_block_desc(u64_t *pte, int level) +{ + u64_t old_block_desc = *pte; + u64_t *new_table; + unsigned int i = 0; + /* get address size shift bits for next level */ + int levelshift = LEVEL_TO_VA_SIZE_SHIFT(level + 1); + + MMU_DEBUG("Splitting existing PTE %p(L%d)\n", pte, level); + + new_table = new_prealloc_table(); + + for (i = 0; i < XLAT_TABLE_ENTRIES; i++) { + new_table[i] = old_block_desc | (i << levelshift); + + if ((level + 1) == 3) + new_table[i] |= PTE_PAGE_DESC; + } + + /* Overwrite existing PTE set the new table into effect */ + set_pte_table_desc(pte, new_table, level); +} + +/* Create/Populate translation table(s) for given region */ +static void init_xlat_tables(const struct arm_mmu_region *region) +{ + u64_t *pte; + u64_t virt = region->base_va; + u64_t phys = region->base_pa; + u64_t size = region->size; + u64_t attrs = region->attrs; + u64_t level_size; + u64_t *new_table; + unsigned int level = XLAT_TABLE_BASE_LEVEL; + + MMU_DEBUG("mmap: virt %llx phys %llx size %llx\n", virt, phys, size); + /* check minimum alignment requirement for given mmap region */ + __ASSERT(((virt & (PAGE_SIZE - 1)) == 0) && + ((size & (PAGE_SIZE - 1)) == 0), + "address/size are not page aligned\n"); + + while (size) { + __ASSERT(level <= XLAT_TABLE_LEVEL_MAX, + "max translation table level exceeded\n"); + + /* Locate PTE for given virtual address and page table level */ + pte = calculate_pte_index(virt, level); + __ASSERT(pte != NULL, "pte not found\n"); + + level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); + + if (size >= level_size && !(virt & (level_size - 1))) { + /* Given range fits into level size, + * create block/page descriptor + */ + set_pte_block_desc(pte, phys, attrs, level); + virt += level_size; + phys += level_size; + size -= level_size; + /* Range is mapped, start again for next range */ + level = XLAT_TABLE_BASE_LEVEL; + } else if (pte_desc_type(pte) == PTE_INVALID_DESC) { + /* Range doesn't fit, create subtable */ + new_table = new_prealloc_table(); + set_pte_table_desc(pte, new_table, level); + level++; + } else if (pte_desc_type(pte) == PTE_BLOCK_DESC) { + split_pte_block_desc(pte, level); + level++; + } else if (pte_desc_type(pte) == PTE_TABLE_DESC) + level++; + } +} + +static void setup_page_tables(void) +{ + unsigned int index; + const struct arm_mmu_region *region; + u64_t max_va = 0, max_pa = 0; + + for (index = 0; index < mmu_config.num_regions; index++) { + region = &mmu_config.mmu_regions[index]; + max_va = MAX(max_va, region->base_va + region->size); + max_pa = MAX(max_pa, region->base_pa + region->size); + } + + __ASSERT(max_va <= (1ULL << CONFIG_ARM64_VA_BITS), + "Maximum VA not supported\n"); + __ASSERT(max_pa <= (1ULL << CONFIG_ARM64_PA_BITS), + "Maximum PA not supported\n"); + + /* create translation tables for user provided platform regions */ + for (index = 0; index < mmu_config.num_regions; index++) { + region = &mmu_config.mmu_regions[index]; + if (region->size || region->attrs) + init_xlat_tables(region); + } +} + +static void enable_mmu_el1(unsigned int flags) +{ + ARG_UNUSED(flags); + u64_t val; + + /* Set MAIR, TCR and TBBR registers */ + __asm__ volatile("msr mair_el1, %0" + : + : "r" (MEMORY_ATTRIBUTES) + : "memory", "cc"); + __asm__ volatile("msr tcr_el1, %0" + : + : "r" (get_tcr(1)) + : "memory", "cc"); + __asm__ volatile("msr ttbr0_el1, %0" + : + : "r" ((u64_t)base_xlat_table) + : "memory", "cc"); + + /* Ensure these changes are seen before MMU is enabled */ + __ISB(); + + /* Enable the MMU and data cache */ + __asm__ volatile("mrs %0, sctlr_el1" : "=r" (val)); + __asm__ volatile("msr sctlr_el1, %0" + : + : "r" (val | SCTLR_M_BIT | SCTLR_C_BIT) + : "memory", "cc"); + + /* Ensure the MMU enable takes effect immediately */ + __ISB(); + + MMU_DEBUG("MMU enabled with dcache\n"); +} + +/* ARM MMU Driver Initial Setup */ + +/* + * @brief MMU default configuration + * + * This function provides the default configuration mechanism for the Memory + * Management Unit (MMU). + */ +static int arm_mmu_init(struct device *arg) +{ + u64_t val; + unsigned int idx, flags = 0; + + /* Current MMU code supports only EL1 */ + __asm__ volatile("mrs %0, CurrentEL" : "=r" (val)); + + __ASSERT(GET_EL(val) == MODE_EL1, + "Exception level not EL1, MMU not enabled!\n"); + + /* Ensure that MMU is already not enabled */ + __asm__ volatile("mrs %0, sctlr_el1" : "=r" (val)); + __ASSERT((val & SCTLR_M_BIT) == 0, "MMU is already enabled\n"); + + MMU_DEBUG("xlat tables:\n"); + MMU_DEBUG("base table(L%d): %p, %d entries\n", XLAT_TABLE_BASE_LEVEL, + (u64_t *)base_xlat_table, NUM_BASE_LEVEL_ENTRIES); + for (idx = 0; idx < CONFIG_MAX_XLAT_TABLES; idx++) + MMU_DEBUG("%d: %p\n", idx, (u64_t *)(xlat_tables + idx)); + + setup_page_tables(); + + /* currently only EL1 is supported */ + enable_mmu_el1(flags); + + return 0; +} + +SYS_INIT(arm_mmu_init, PRE_KERNEL_1, +#if MMU_DEBUG_PRINTS + MMU_DEBUG_PRIORITY +#else + CONFIG_KERNEL_INIT_PRIORITY_DEVICE +#endif +); diff --git a/include/arch/arm/aarch64/arm_mmu.h b/include/arch/arm/aarch64/arm_mmu.h new file mode 100644 index 00000000000..04219579e4b --- /dev/null +++ b/include/arch/arm/aarch64/arm_mmu.h @@ -0,0 +1,166 @@ +/* + * Copyright 2019 Broadcom + * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_MMU_ARM_MMU_H_ +#define ZEPHYR_INCLUDE_ARCH_ARM64_MMU_ARM_MMU_H_ + +/* Following Memory types supported through MAIR encodings can be passed + * by user through "attrs"(attributes) field of specified memory region. + * As MAIR supports such 8 encodings, we will reserve attrs[2:0]; + * so that we can provide encodings upto 7 if needed in future. + */ +#define MT_TYPE_MASK 0x7U +#define MT_TYPE(attr) (attr & MT_TYPE_MASK) +#define MT_DEVICE_nGnRnE 0U +#define MT_DEVICE_nGnRE 1U +#define MT_DEVICE_GRE 2U +#define MT_NORMAL_NC 3U +#define MT_NORMAL 4U + +#define MEMORY_ATTRIBUTES ((0x00 << (MT_DEVICE_nGnRnE * 8)) | \ + (0x04 << (MT_DEVICE_nGnRE * 8)) | \ + (0x0c << (MT_DEVICE_GRE * 8)) | \ + (0x44 << (MT_NORMAL_NC * 8)) | \ + (0xffUL << (MT_NORMAL * 8))) + +/* More flags from user's perpective are supported using remaining bits + * of "attrs" field, i.e. attrs[31:3], underlying code will take care + * of setting PTE fields correctly. + * + * current usage of attrs[31:3] is: + * attrs[3] : Access Permissions + * attrs[4] : Memory access from secure/ns state + * attrs[5] : Execute Permissions + * + */ +#define MT_PERM_SHIFT 3U +#define MT_SEC_SHIFT 4U +#define MT_EXECUTE_SHIFT 5U + +#define MT_RO (0U << MT_PERM_SHIFT) +#define MT_RW (1U << MT_PERM_SHIFT) + +#define MT_SECURE (0U << MT_SEC_SHIFT) +#define MT_NS (1U << MT_SEC_SHIFT) + +#define MT_EXECUTE (0U << MT_EXECUTE_SHIFT) +#define MT_EXECUTE_NEVER (1U << MT_EXECUTE_SHIFT) + +/* Some compound attributes for most common usages */ +#define MT_CODE (MT_NORMAL | MT_RO | MT_EXECUTE) +#define MT_RODATA (MT_NORMAL | MT_RO | MT_EXECUTE_NEVER) + +/* + * PTE descriptor can be Block descriptor or Table descriptor + * or Page descriptor. + */ +#define PTE_DESC_TYPE_MASK 3U +#define PTE_BLOCK_DESC 1U +#define PTE_TABLE_DESC 3U +#define PTE_PAGE_DESC 3U +#define PTE_INVALID_DESC 0U + +/* + * Block and Page descriptor attributes fields + */ +#define PTE_BLOCK_DESC_MEMTYPE(x) (x << 2) +#define PTE_BLOCK_DESC_NS (1ULL << 5) +#define PTE_BLOCK_DESC_AP_RO (1ULL << 7) +#define PTE_BLOCK_DESC_AP_RW (0ULL << 7) +#define PTE_BLOCK_DESC_NON_SHARE (0ULL << 8) +#define PTE_BLOCK_DESC_OUTER_SHARE (2ULL << 8) +#define PTE_BLOCK_DESC_INNER_SHARE (3ULL << 8) +#define PTE_BLOCK_DESC_AF (1ULL << 10) +#define PTE_BLOCK_DESC_NG (1ULL << 11) +#define PTE_BLOCK_DESC_PXN (1ULL << 53) +#define PTE_BLOCK_DESC_UXN (1ULL << 54) + +/* + * TCR definitions. + */ +#define TCR_EL1_IPS_SHIFT 32U +#define TCR_EL2_PS_SHIFT 16U +#define TCR_EL3_PS_SHIFT 16U + +#define TCR_T0SZ_SHIFT 0U +#define TCR_T0SZ(x) ((64 - (x)) << TCR_T0SZ_SHIFT) + +#define TCR_IRGN_NC (0ULL << 8) +#define TCR_IRGN_WBWA (1ULL << 8) +#define TCR_IRGN_WT (2ULL << 8) +#define TCR_IRGN_WBNWA (3ULL << 8) +#define TCR_IRGN_MASK (3ULL << 8) +#define TCR_ORGN_NC (0ULL << 10) +#define TCR_ORGN_WBWA (1ULL << 10) +#define TCR_ORGN_WT (2ULL << 10) +#define TCR_ORGN_WBNWA (3ULL << 10) +#define TCR_ORGN_MASK (3ULL << 10) +#define TCR_SHARED_NON (0ULL << 12) +#define TCR_SHARED_OUTER (2ULL << 12) +#define TCR_SHARED_INNER (3ULL << 12) +#define TCR_TG0_4K (0ULL << 14) +#define TCR_TG0_64K (1ULL << 14) +#define TCR_TG0_16K (2ULL << 14) +#define TCR_EPD1_DISABLE (1ULL << 23) + +#define TCR_PS_BITS_4GB 0x0ULL +#define TCR_PS_BITS_64GB 0x1ULL +#define TCR_PS_BITS_1TB 0x2ULL +#define TCR_PS_BITS_4TB 0x3ULL +#define TCR_PS_BITS_16TB 0x4ULL +#define TCR_PS_BITS_256TB 0x5ULL + +#ifndef _ASMLANGUAGE +/* Region definition data structure */ +struct arm_mmu_region { + /* Region Base Physical Address */ + u64_t base_pa; + /* Region Base Virtual Address */ + u64_t base_va; + /* Region size */ + u64_t size; + /* Region Name */ + const char *name; + /* Region Attributes */ + unsigned int attrs; +}; + +/* MMU configuration data structure */ +struct arm_mmu_config { + /* Number of regions */ + u32_t num_regions; + /* Regions */ + const struct arm_mmu_region *mmu_regions; +}; + +/* Convenience macros to represent the ARMv8-A-specific + * configuration for memory access permission and + * cache-ability attribution. + */ + +#define MMU_REGION_ENTRY(_name, _base_pa, _base_va, _size, _attrs) \ + {\ + .name = _name, \ + .base_pa = _base_pa, \ + .base_va = _base_va, \ + .size = _size, \ + .attrs = _attrs, \ + } + +#define MMU_REGION_FLAT_ENTRY(name, adr, sz, attrs) \ + MMU_REGION_ENTRY(name, adr, adr, sz, attrs) + +/* Reference to the MMU configuration. + * + * This struct is defined and populated for each SoC (in the SoC definition), + * and holds the build-time configuration information for the fixed MMU + * regions enabled during kernel initialization. + */ +extern const struct arm_mmu_config mmu_config; + +#endif /* _ASMLANGUAGE */ + +#endif /* ZEPHYR_INCLUDE_ARCH_ARM64_MMU_ARM_MMU_H_ */