aarch64: mmu: Do not assume a single set of pagetables is used

The MMU code is currently assuming that Zephyr only uses one single set
of page tables shared by kernel and user threads. This could possibly be
not longer true in the future when multiple set of page tables can be
present and swapped at run-time.

With this patch a new arm_mmu_ptables struct is introduced that is used
to host a buffer pointing to the memory region containing the page
tables and the helper variables used to manage the page tables. This new
struct is then used by the ARM64 MMU code instead of assuming that the
kernel page tables are the only ones present.

Signed-off-by: Carlo Caione <ccaione@baylibre.com>
This commit is contained in:
Carlo Caione 2020-10-22 09:39:44 +02:00 committed by Anas Nashif
commit 0a0061d901
2 changed files with 42 additions and 28 deletions

View file

@ -15,9 +15,13 @@
#include "arm_mmu.h" #include "arm_mmu.h"
static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES][Ln_XLAT_NUM_ENTRIES] static uint64_t kernel_xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES]
__aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t)); __aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t));
static struct arm_mmu_ptables kernel_ptables = {
.xlat_tables = kernel_xlat_tables,
};
/* Translation table control register settings */ /* Translation table control register settings */
static uint64_t get_tcr(int el) static uint64_t get_tcr(int el)
{ {
@ -52,7 +56,8 @@ static int pte_desc_type(uint64_t *pte)
return *pte & PTE_DESC_TYPE_MASK; return *pte & PTE_DESC_TYPE_MASK;
} }
static uint64_t *calculate_pte_index(uint64_t addr, int level) static uint64_t *calculate_pte_index(struct arm_mmu_ptables *ptables,
uint64_t addr, int level)
{ {
int base_level = BASE_XLAT_LEVEL; int base_level = BASE_XLAT_LEVEL;
uint64_t *pte; uint64_t *pte;
@ -60,7 +65,7 @@ static uint64_t *calculate_pte_index(uint64_t addr, int level)
unsigned int i; unsigned int i;
/* Walk through all translation tables to find pte index */ /* Walk through all translation tables to find pte index */
pte = (uint64_t *)xlat_tables; pte = (uint64_t *)ptables->xlat_tables;
for (i = base_level; i < XLAT_LEVEL_MAX; i++) { for (i = base_level; i < XLAT_LEVEL_MAX; i++) {
idx = XLAT_TABLE_VA_IDX(addr, i); idx = XLAT_TABLE_VA_IDX(addr, i);
pte += idx; pte += idx;
@ -184,18 +189,20 @@ static void set_pte_block_desc(uint64_t *pte, uint64_t addr_pa,
} }
/* Returns a new reallocated table */ /* Returns a new reallocated table */
static uint64_t *new_prealloc_table(void) static uint64_t *new_prealloc_table(struct arm_mmu_ptables *ptables)
{ {
static unsigned int table_idx = 1; ptables->next_table++;
__ASSERT(table_idx < CONFIG_MAX_XLAT_TABLES, __ASSERT(ptables->next_table < CONFIG_MAX_XLAT_TABLES,
"Enough xlat tables not allocated"); "Enough xlat tables not allocated");
return (uint64_t *)(xlat_tables[table_idx++]); return (uint64_t *)(&ptables->xlat_tables[ptables->next_table *
Ln_XLAT_NUM_ENTRIES]);
} }
/* Splits a block into table with entries spanning the old block */ /* Splits a block into table with entries spanning the old block */
static void split_pte_block_desc(uint64_t *pte, uint64_t desc, int level) static void split_pte_block_desc(struct arm_mmu_ptables *ptables, uint64_t *pte,
uint64_t desc, int level)
{ {
uint64_t old_block_desc = *pte; uint64_t old_block_desc = *pte;
uint64_t *new_table; uint64_t *new_table;
@ -205,7 +212,7 @@ static void split_pte_block_desc(uint64_t *pte, uint64_t desc, int level)
MMU_DEBUG("Splitting existing PTE %p(L%d)\n", pte, level); MMU_DEBUG("Splitting existing PTE %p(L%d)\n", pte, level);
new_table = new_prealloc_table(); new_table = new_prealloc_table(ptables);
for (i = 0; i < Ln_XLAT_NUM_ENTRIES; i++) { for (i = 0; i < Ln_XLAT_NUM_ENTRIES; i++) {
new_table[i] = old_block_desc | (i << levelshift); new_table[i] = old_block_desc | (i << levelshift);
@ -218,8 +225,8 @@ static void split_pte_block_desc(uint64_t *pte, uint64_t desc, int level)
set_pte_table_desc(pte, new_table, level); set_pte_table_desc(pte, new_table, level);
} }
static void add_map(const char *name, uint64_t phys, uint64_t virt, static void add_map(struct arm_mmu_ptables *ptables, const char *name,
uint64_t size, uint64_t attrs) uint64_t phys, uint64_t virt, uint64_t size, uint64_t attrs)
{ {
uint64_t desc, *pte; uint64_t desc, *pte;
uint64_t level_size; uint64_t level_size;
@ -240,7 +247,7 @@ static void add_map(const char *name, uint64_t phys, uint64_t virt,
"max translation table level exceeded\n"); "max translation table level exceeded\n");
/* Locate PTE for given virtual address and page table level */ /* Locate PTE for given virtual address and page table level */
pte = calculate_pte_index(virt, level); pte = calculate_pte_index(ptables, virt, level);
__ASSERT(pte != NULL, "pte not found\n"); __ASSERT(pte != NULL, "pte not found\n");
level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level); level_size = 1ULL << LEVEL_TO_VA_SIZE_SHIFT(level);
@ -257,7 +264,7 @@ static void add_map(const char *name, uint64_t phys, uint64_t virt,
level = BASE_XLAT_LEVEL; level = BASE_XLAT_LEVEL;
} else if (pte_desc_type(pte) == PTE_INVALID_DESC) { } else if (pte_desc_type(pte) == PTE_INVALID_DESC) {
/* Range doesn't fit, create subtable */ /* Range doesn't fit, create subtable */
new_table = new_prealloc_table(); new_table = new_prealloc_table(ptables);
set_pte_table_desc(pte, new_table, level); set_pte_table_desc(pte, new_table, level);
level++; level++;
} else if (pte_desc_type(pte) == PTE_BLOCK_DESC) { } else if (pte_desc_type(pte) == PTE_BLOCK_DESC) {
@ -266,7 +273,7 @@ static void add_map(const char *name, uint64_t phys, uint64_t virt,
return; return;
/* We need to split a new table */ /* We need to split a new table */
split_pte_block_desc(pte, desc, level); split_pte_block_desc(ptables, pte, desc, level);
level++; level++;
} else if (pte_desc_type(pte) == PTE_TABLE_DESC) } else if (pte_desc_type(pte) == PTE_TABLE_DESC)
level++; level++;
@ -304,18 +311,24 @@ static const struct arm_mmu_region mmu_zephyr_regions[] = {
MT_NORMAL | MT_P_RO_U_NA | MT_DEFAULT_SECURE_STATE), MT_NORMAL | MT_P_RO_U_NA | MT_DEFAULT_SECURE_STATE),
}; };
static inline void add_arm_mmu_region(const struct arm_mmu_region *region) static inline void add_arm_mmu_region(struct arm_mmu_ptables *ptables,
const struct arm_mmu_region *region)
{ {
add_map(region->name, region->base_pa, region->base_va, add_map(ptables, region->name, region->base_pa, region->base_va,
region->size, region->attrs); region->size, region->attrs);
} }
static void setup_page_tables(void) static void setup_page_tables(struct arm_mmu_ptables *ptables)
{ {
unsigned int index; unsigned int index;
const struct arm_mmu_region *region; const struct arm_mmu_region *region;
uint64_t max_va = 0, max_pa = 0; uint64_t max_va = 0, max_pa = 0;
MMU_DEBUG("xlat tables:\n");
for (index = 0; index < CONFIG_MAX_XLAT_TABLES; index++)
MMU_DEBUG("%d: %p\n", index, (uint64_t *)(ptables->xlat_tables +
(index * Ln_XLAT_NUM_ENTRIES)));
for (index = 0; index < mmu_config.num_regions; index++) { for (index = 0; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index]; region = &mmu_config.mmu_regions[index];
max_va = MAX(max_va, region->base_va + region->size); max_va = MAX(max_va, region->base_va + region->size);
@ -331,18 +344,18 @@ static void setup_page_tables(void)
for (index = 0; index < mmu_config.num_regions; index++) { for (index = 0; index < mmu_config.num_regions; index++) {
region = &mmu_config.mmu_regions[index]; region = &mmu_config.mmu_regions[index];
if (region->size || region->attrs) if (region->size || region->attrs)
add_arm_mmu_region(region); add_arm_mmu_region(ptables, region);
} }
/* setup translation table for zephyr execution regions */ /* setup translation table for zephyr execution regions */
for (index = 0; index < ARRAY_SIZE(mmu_zephyr_regions); index++) { for (index = 0; index < ARRAY_SIZE(mmu_zephyr_regions); index++) {
region = &mmu_zephyr_regions[index]; region = &mmu_zephyr_regions[index];
if (region->size || region->attrs) if (region->size || region->attrs)
add_arm_mmu_region(region); add_arm_mmu_region(ptables, region);
} }
} }
static void enable_mmu_el1(unsigned int flags) static void enable_mmu_el1(struct arm_mmu_ptables *ptables, unsigned int flags)
{ {
ARG_UNUSED(flags); ARG_UNUSED(flags);
uint64_t val; uint64_t val;
@ -358,7 +371,7 @@ static void enable_mmu_el1(unsigned int flags)
: "memory", "cc"); : "memory", "cc");
__asm__ volatile("msr ttbr0_el1, %0" __asm__ volatile("msr ttbr0_el1, %0"
: :
: "r" ((uint64_t)xlat_tables) : "r" ((uint64_t)ptables->xlat_tables)
: "memory", "cc"); : "memory", "cc");
/* Ensure these changes are seen before MMU is enabled */ /* Ensure these changes are seen before MMU is enabled */
@ -388,7 +401,7 @@ static void enable_mmu_el1(unsigned int flags)
static int arm_mmu_init(const struct device *arg) static int arm_mmu_init(const struct device *arg)
{ {
uint64_t val; uint64_t val;
unsigned int idx, flags = 0; unsigned int flags = 0;
/* Current MMU code supports only EL1 */ /* Current MMU code supports only EL1 */
__asm__ volatile("mrs %0, CurrentEL" : "=r" (val)); __asm__ volatile("mrs %0, CurrentEL" : "=r" (val));
@ -400,14 +413,10 @@ static int arm_mmu_init(const struct device *arg)
__asm__ volatile("mrs %0, sctlr_el1" : "=r" (val)); __asm__ volatile("mrs %0, sctlr_el1" : "=r" (val));
__ASSERT((val & SCTLR_M) == 0, "MMU is already enabled\n"); __ASSERT((val & SCTLR_M) == 0, "MMU is already enabled\n");
MMU_DEBUG("xlat tables:\n"); setup_page_tables(&kernel_ptables);
for (idx = 0; idx < CONFIG_MAX_XLAT_TABLES; idx++)
MMU_DEBUG("%d: %p\n", idx, (uint64_t *)(xlat_tables + idx));
setup_page_tables();
/* currently only EL1 is supported */ /* currently only EL1 is supported */
enable_mmu_el1(flags); enable_mmu_el1(&kernel_ptables, flags);
return 0; return 0;
} }

View file

@ -158,6 +158,11 @@ struct arm_mmu_config {
const struct arm_mmu_region *mmu_regions; const struct arm_mmu_region *mmu_regions;
}; };
struct arm_mmu_ptables {
uint64_t *xlat_tables;
unsigned char next_table;
};
/* Convenience macros to represent the ARMv8-A-specific /* Convenience macros to represent the ARMv8-A-specific
* configuration for memory access permission and * configuration for memory access permission and
* cache-ability attribution. * cache-ability attribution.