xtensa: mmu: rename prefix z_xtensa to xtensa_mmu

This follows the idea to remove any z_ prefix. Since MMU has
a large number of these, separate out these changes into one
commit to ease review effort.

Since these are no longer have z_, these need proper doxygen
doc. So add them too.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2023-12-07 14:54:22 -08:00 committed by Carles Cufí
commit 8bf20ee975
8 changed files with 446 additions and 223 deletions

View file

@ -26,22 +26,22 @@ static void compute_regs(uint32_t user_asid, uint32_t *l1_page, struct tlb_regs
__ASSERT_NO_MSG((((uint32_t)l1_page) & 0xfff) == 0); __ASSERT_NO_MSG((((uint32_t)l1_page) & 0xfff) == 0);
__ASSERT_NO_MSG((user_asid == 0) || ((user_asid > 2) && __ASSERT_NO_MSG((user_asid == 0) || ((user_asid > 2) &&
(user_asid < Z_XTENSA_MMU_SHARED_ASID))); (user_asid < XTENSA_MMU_SHARED_ASID)));
/* We don't use ring 1, ring 0 ASID must be 1 */ /* We don't use ring 1, ring 0 ASID must be 1 */
regs->rasid = (Z_XTENSA_MMU_SHARED_ASID << 24) | regs->rasid = (XTENSA_MMU_SHARED_ASID << 24) |
(user_asid << 16) | 0x000201; (user_asid << 16) | 0x000201;
/* Derive PTEVADDR from ASID so each domain gets its own PTE area */ /* Derive PTEVADDR from ASID so each domain gets its own PTE area */
regs->ptevaddr = CONFIG_XTENSA_MMU_PTEVADDR + user_asid * 0x400000; regs->ptevaddr = CONFIG_XTENSA_MMU_PTEVADDR + user_asid * 0x400000;
/* The ptables code doesn't add the mapping for the l1 page itself */ /* The ptables code doesn't add the mapping for the l1 page itself */
l1_page[Z_XTENSA_L1_POS(regs->ptevaddr)] = l1_page[XTENSA_MMU_L1_POS(regs->ptevaddr)] =
(uint32_t)l1_page | Z_XTENSA_PAGE_TABLE_ATTR; (uint32_t)l1_page | XTENSA_MMU_PAGE_TABLE_ATTR;
regs->ptepin_at = (uint32_t)l1_page; regs->ptepin_at = (uint32_t)l1_page;
regs->ptepin_as = Z_XTENSA_PTE_ENTRY_VADDR(regs->ptevaddr, regs->ptevaddr) regs->ptepin_as = XTENSA_MMU_PTE_ENTRY_VADDR(regs->ptevaddr, regs->ptevaddr)
| Z_XTENSA_MMU_PTE_WAY; | XTENSA_MMU_PTE_WAY;
/* Pin mapping for refilling the vector address into the ITLB /* Pin mapping for refilling the vector address into the ITLB
* (for handling TLB miss exceptions). Note: this is NOT an * (for handling TLB miss exceptions). Note: this is NOT an
@ -51,11 +51,11 @@ static void compute_regs(uint32_t user_asid, uint32_t *l1_page, struct tlb_regs
* hardware doesn't have a 4k pinnable instruction TLB way, * hardware doesn't have a 4k pinnable instruction TLB way,
* frustratingly. * frustratingly.
*/ */
uint32_t vb_pte = l1_page[Z_XTENSA_L1_POS(vecbase)]; uint32_t vb_pte = l1_page[XTENSA_MMU_L1_POS(vecbase)];
regs->vecpin_at = vb_pte; regs->vecpin_at = vb_pte;
regs->vecpin_as = Z_XTENSA_PTE_ENTRY_VADDR(regs->ptevaddr, vecbase) regs->vecpin_as = XTENSA_MMU_PTE_ENTRY_VADDR(regs->ptevaddr, vecbase)
| Z_XTENSA_MMU_VECBASE_WAY; | XTENSA_MMU_VECBASE_WAY;
} }
/* Switch to a new page table. There are four items we have to set in /* Switch to a new page table. There are four items we have to set in

View file

@ -127,9 +127,9 @@ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
.start = (uint32_t)_image_ram_start, .start = (uint32_t)_image_ram_start,
.end = (uint32_t)_image_ram_end, .end = (uint32_t)_image_ram_end,
#ifdef CONFIG_XTENSA_RPO_CACHE #ifdef CONFIG_XTENSA_RPO_CACHE
.attrs = Z_XTENSA_MMU_W, .attrs = XTENSA_MMU_PERM_W,
#else #else
.attrs = Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB, .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
#endif #endif
.name = "data", .name = "data",
}, },
@ -139,9 +139,9 @@ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
.start = (uint32_t)_heap_start, .start = (uint32_t)_heap_start,
.end = (uint32_t)_heap_end, .end = (uint32_t)_heap_end,
#ifdef CONFIG_XTENSA_RPO_CACHE #ifdef CONFIG_XTENSA_RPO_CACHE
.attrs = Z_XTENSA_MMU_W, .attrs = XTENSA_MMU_PERM_W,
#else #else
.attrs = Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB, .attrs = XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
#endif #endif
.name = "heap", .name = "heap",
}, },
@ -150,14 +150,14 @@ static const struct xtensa_mmu_range mmu_zephyr_ranges[] = {
{ {
.start = (uint32_t)__text_region_start, .start = (uint32_t)__text_region_start,
.end = (uint32_t)__text_region_end, .end = (uint32_t)__text_region_end,
.attrs = Z_XTENSA_MMU_X | Z_XTENSA_MMU_CACHED_WB | Z_XTENSA_MMU_MAP_SHARED, .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
.name = "text", .name = "text",
}, },
/* Mark rodata segment cacheable, read only and non-executable */ /* Mark rodata segment cacheable, read only and non-executable */
{ {
.start = (uint32_t)__rodata_region_start, .start = (uint32_t)__rodata_region_start,
.end = (uint32_t)__rodata_region_end, .end = (uint32_t)__rodata_region_end,
.attrs = Z_XTENSA_MMU_CACHED_WB | Z_XTENSA_MMU_MAP_SHARED, .attrs = XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
.name = "rodata", .name = "rodata",
}, },
}; };
@ -180,7 +180,7 @@ static inline uint32_t *thread_page_tables_get(const struct k_thread *thread)
*/ */
static inline bool is_pte_illegal(uint32_t pte) static inline bool is_pte_illegal(uint32_t pte)
{ {
uint32_t attr = pte & Z_XTENSA_PTE_ATTR_MASK; uint32_t attr = pte & XTENSA_MMU_PTE_ATTR_MASK;
/* /*
* The ISA manual states only 12 and 14 are illegal values. * The ISA manual states only 12 and 14 are illegal values.
@ -201,7 +201,7 @@ static void init_page_table(uint32_t *ptable, size_t num_entries)
int i; int i;
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
ptable[i] = Z_XTENSA_MMU_ILLEGAL; ptable[i] = XTENSA_MMU_PTE_ILLEGAL;
} }
} }
@ -224,11 +224,12 @@ static void map_memory_range(const uint32_t start, const uint32_t end,
uint32_t page, *table; uint32_t page, *table;
for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) { for (page = start; page < end; page += CONFIG_MMU_PAGE_SIZE) {
uint32_t pte = Z_XTENSA_PTE(page, uint32_t pte = XTENSA_MMU_PTE(page,
shared ? Z_XTENSA_SHARED_RING : Z_XTENSA_KERNEL_RING, shared ? XTENSA_MMU_SHARED_RING :
attrs); XTENSA_MMU_KERNEL_RING,
uint32_t l2_pos = Z_XTENSA_L2_POS(page); attrs);
uint32_t l1_pos = Z_XTENSA_L1_POS(page); uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
if (is_pte_illegal(z_xtensa_kernel_ptables[l1_pos])) { if (is_pte_illegal(z_xtensa_kernel_ptables[l1_pos])) {
table = alloc_l2_table(); table = alloc_l2_table();
@ -239,11 +240,11 @@ static void map_memory_range(const uint32_t start, const uint32_t end,
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES); init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
z_xtensa_kernel_ptables[l1_pos] = z_xtensa_kernel_ptables[l1_pos] =
Z_XTENSA_PTE((uint32_t)table, Z_XTENSA_KERNEL_RING, XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
Z_XTENSA_PAGE_TABLE_ATTR); XTENSA_MMU_PAGE_TABLE_ATTR);
} }
table = (uint32_t *)(z_xtensa_kernel_ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK); table = (uint32_t *)(z_xtensa_kernel_ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
table[l2_pos] = pte; table[l2_pos] = pte;
} }
} }
@ -257,7 +258,7 @@ static void map_memory(const uint32_t start, const uint32_t end,
if (arch_xtensa_is_ptr_uncached((void *)start)) { if (arch_xtensa_is_ptr_uncached((void *)start)) {
map_memory_range(POINTER_TO_UINT(z_soc_cached_ptr((void *)start)), map_memory_range(POINTER_TO_UINT(z_soc_cached_ptr((void *)start)),
POINTER_TO_UINT(z_soc_cached_ptr((void *)end)), POINTER_TO_UINT(z_soc_cached_ptr((void *)end)),
attrs | Z_XTENSA_MMU_CACHED_WB, shared); attrs | XTENSA_MMU_CACHED_WB, shared);
} else if (arch_xtensa_is_ptr_cached((void *)start)) { } else if (arch_xtensa_is_ptr_cached((void *)start)) {
map_memory_range(POINTER_TO_UINT(z_soc_uncached_ptr((void *)start)), map_memory_range(POINTER_TO_UINT(z_soc_uncached_ptr((void *)start)),
POINTER_TO_UINT(z_soc_uncached_ptr((void *)end)), attrs, shared); POINTER_TO_UINT(z_soc_uncached_ptr((void *)end)), attrs, shared);
@ -277,8 +278,8 @@ static void xtensa_init_page_tables(void)
bool shared; bool shared;
uint32_t attrs; uint32_t attrs;
shared = !!(range->attrs & Z_XTENSA_MMU_MAP_SHARED); shared = !!(range->attrs & XTENSA_MMU_MAP_SHARED);
attrs = range->attrs & ~Z_XTENSA_MMU_MAP_SHARED; attrs = range->attrs & ~XTENSA_MMU_MAP_SHARED;
map_memory(range->start, range->end, attrs, shared); map_memory(range->start, range->end, attrs, shared);
} }
@ -301,8 +302,8 @@ static void xtensa_init_page_tables(void)
bool shared; bool shared;
uint32_t attrs; uint32_t attrs;
shared = !!(range->attrs & Z_XTENSA_MMU_MAP_SHARED); shared = !!(range->attrs & XTENSA_MMU_MAP_SHARED);
attrs = range->attrs & ~Z_XTENSA_MMU_MAP_SHARED; attrs = range->attrs & ~XTENSA_MMU_MAP_SHARED;
map_memory(range->start, range->end, attrs, shared); map_memory(range->start, range->end, attrs, shared);
} }
@ -316,10 +317,10 @@ static void xtensa_init_page_tables(void)
*/ */
map_memory_range((uint32_t) &l1_page_table[0], map_memory_range((uint32_t) &l1_page_table[0],
(uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES], (uint32_t) &l1_page_table[CONFIG_XTENSA_MMU_NUM_L1_TABLES],
Z_XTENSA_PAGE_TABLE_ATTR | Z_XTENSA_MMU_W, false); XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W, false);
map_memory_range((uint32_t) &l2_page_tables[0], map_memory_range((uint32_t) &l2_page_tables[0],
(uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES], (uint32_t) &l2_page_tables[CONFIG_XTENSA_MMU_NUM_L2_TABLES],
Z_XTENSA_PAGE_TABLE_ATTR | Z_XTENSA_MMU_W, false); XTENSA_MMU_PAGE_TABLE_ATTR | XTENSA_MMU_PERM_W, false);
sys_cache_data_flush_all(); sys_cache_data_flush_all();
} }
@ -329,7 +330,7 @@ __weak void arch_xtensa_mmu_post_init(bool is_core0)
ARG_UNUSED(is_core0); ARG_UNUSED(is_core0);
} }
void z_xtensa_mmu_init(void) void xtensa_mmu_init(void)
{ {
if (_current_cpu->id == 0) { if (_current_cpu->id == 0) {
/* This is normally done via arch_kernel_init() inside z_cstart(). /* This is normally done via arch_kernel_init() inside z_cstart().
@ -372,8 +373,8 @@ __weak void arch_reserved_pages_update(void)
static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys, static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
uint32_t flags, bool is_user) uint32_t flags, bool is_user)
{ {
uint32_t l1_pos = Z_XTENSA_L1_POS((uint32_t)vaddr); uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
uint32_t l2_pos = Z_XTENSA_L2_POS((uint32_t)vaddr); uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
uint32_t *table; uint32_t *table;
sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); sys_cache_data_invd_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
@ -387,15 +388,16 @@ static bool l2_page_table_map(uint32_t *l1_table, void *vaddr, uintptr_t phys,
init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES); init_page_table(table, XTENSA_L2_PAGE_TABLE_ENTRIES);
l1_table[l1_pos] = Z_XTENSA_PTE((uint32_t)table, Z_XTENSA_KERNEL_RING, l1_table[l1_pos] = XTENSA_MMU_PTE((uint32_t)table, XTENSA_MMU_KERNEL_RING,
Z_XTENSA_PAGE_TABLE_ATTR); XTENSA_MMU_PAGE_TABLE_ATTR);
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
} }
table = (uint32_t *)(l1_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK); table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
table[l2_pos] = Z_XTENSA_PTE(phys, is_user ? Z_XTENSA_USER_RING : Z_XTENSA_KERNEL_RING, table[l2_pos] = XTENSA_MMU_PTE(phys, is_user ? XTENSA_MMU_USER_RING :
flags); XTENSA_MMU_KERNEL_RING,
flags);
sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0])); sys_cache_data_flush_range((void *)&table[l2_pos], sizeof(table[0]));
xtensa_tlb_autorefill_invalidate(); xtensa_tlb_autorefill_invalidate();
@ -427,8 +429,8 @@ static inline void __arch_mem_map(void *va, uintptr_t pa, uint32_t xtensa_flags,
paddr_uc = pa; paddr_uc = pa;
} }
flags_uc = (xtensa_flags & ~Z_XTENSA_PTE_ATTR_CACHED_MASK); flags_uc = (xtensa_flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
flags = flags_uc | Z_XTENSA_MMU_CACHED_WB; flags = flags_uc | XTENSA_MMU_CACHED_WB;
} else { } else {
vaddr = va; vaddr = va;
paddr = pa; paddr = pa;
@ -493,10 +495,10 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
switch (flags & K_MEM_CACHE_MASK) { switch (flags & K_MEM_CACHE_MASK) {
case K_MEM_CACHE_WB: case K_MEM_CACHE_WB:
xtensa_flags |= Z_XTENSA_MMU_CACHED_WB; xtensa_flags |= XTENSA_MMU_CACHED_WB;
break; break;
case K_MEM_CACHE_WT: case K_MEM_CACHE_WT:
xtensa_flags |= Z_XTENSA_MMU_CACHED_WT; xtensa_flags |= XTENSA_MMU_CACHED_WT;
break; break;
case K_MEM_CACHE_NONE: case K_MEM_CACHE_NONE:
__fallthrough; __fallthrough;
@ -505,10 +507,10 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
} }
if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) { if ((flags & K_MEM_PERM_RW) == K_MEM_PERM_RW) {
xtensa_flags |= Z_XTENSA_MMU_W; xtensa_flags |= XTENSA_MMU_PERM_W;
} }
if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) { if ((flags & K_MEM_PERM_EXEC) == K_MEM_PERM_EXEC) {
xtensa_flags |= Z_XTENSA_MMU_X; xtensa_flags |= XTENSA_MMU_PERM_X;
} }
is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER; is_user = (flags & K_MEM_PERM_USER) == K_MEM_PERM_USER;
@ -524,7 +526,7 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
} }
#if CONFIG_MP_MAX_NUM_CPUS > 1 #if CONFIG_MP_MAX_NUM_CPUS > 1
z_xtensa_mmu_tlb_ipi(); xtensa_mmu_tlb_ipi();
#endif #endif
sys_cache_data_flush_and_invd_all(); sys_cache_data_flush_and_invd_all();
@ -537,8 +539,8 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
*/ */
static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr) static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
{ {
uint32_t l1_pos = Z_XTENSA_L1_POS((uint32_t)vaddr); uint32_t l1_pos = XTENSA_MMU_L1_POS((uint32_t)vaddr);
uint32_t l2_pos = Z_XTENSA_L2_POS((uint32_t)vaddr); uint32_t l2_pos = XTENSA_MMU_L2_POS((uint32_t)vaddr);
uint32_t *l2_table; uint32_t *l2_table;
uint32_t table_pos; uint32_t table_pos;
bool exec; bool exec;
@ -552,13 +554,13 @@ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
return true; return true;
} }
exec = l1_table[l1_pos] & Z_XTENSA_MMU_X; exec = l1_table[l1_pos] & XTENSA_MMU_PERM_X;
l2_table = (uint32_t *)(l1_table[l1_pos] & Z_XTENSA_PTE_PPN_MASK); l2_table = (uint32_t *)(l1_table[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
l2_table[l2_pos] = Z_XTENSA_MMU_ILLEGAL; l2_table[l2_pos] = XTENSA_MMU_PTE_ILLEGAL;
sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); sys_cache_data_flush_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
@ -568,7 +570,7 @@ static bool l2_page_table_unmap(uint32_t *l1_table, void *vaddr)
} }
} }
l1_table[l1_pos] = Z_XTENSA_MMU_ILLEGAL; l1_table[l1_pos] = XTENSA_MMU_PTE_ILLEGAL;
sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0])); sys_cache_data_flush_range((void *)&l1_table[l1_pos], sizeof(l1_table[0]));
table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES); table_pos = (l2_table - (uint32_t *)l2_page_tables) / (XTENSA_L2_PAGE_TABLE_ENTRIES);
@ -648,7 +650,7 @@ void arch_mem_unmap(void *addr, size_t size)
} }
#if CONFIG_MP_MAX_NUM_CPUS > 1 #if CONFIG_MP_MAX_NUM_CPUS > 1
z_xtensa_mmu_tlb_ipi(); xtensa_mmu_tlb_ipi();
#endif #endif
sys_cache_data_flush_and_invd_all(); sys_cache_data_flush_and_invd_all();
@ -658,11 +660,11 @@ void arch_mem_unmap(void *addr, size_t size)
/* This should be implemented in the SoC layer. /* This should be implemented in the SoC layer.
* This weak version is here to avoid build errors. * This weak version is here to avoid build errors.
*/ */
void __weak z_xtensa_mmu_tlb_ipi(void) void __weak xtensa_mmu_tlb_ipi(void)
{ {
} }
void z_xtensa_mmu_tlb_shootdown(void) void xtensa_mmu_tlb_shootdown(void)
{ {
unsigned int key; unsigned int key;
@ -699,8 +701,8 @@ void z_xtensa_mmu_tlb_shootdown(void)
* generating the query entry directly. * generating the query entry directly.
*/ */
ptevaddr = (uint32_t)xtensa_ptevaddr_get(); ptevaddr = (uint32_t)xtensa_ptevaddr_get();
ptevaddr_entry = Z_XTENSA_PTE_ENTRY_VADDR(ptevaddr, ptevaddr) ptevaddr_entry = XTENSA_MMU_PTE_ENTRY_VADDR(ptevaddr, ptevaddr)
| Z_XTENSA_MMU_PTE_WAY; | XTENSA_MMU_PTE_WAY;
current_ptables = xtensa_dtlb_paddr_read(ptevaddr_entry); current_ptables = xtensa_dtlb_paddr_read(ptevaddr_entry);
thread_ptables = (uint32_t)thread->arch.ptables; thread_ptables = (uint32_t)thread->arch.ptables;
@ -756,11 +758,11 @@ static uint32_t *dup_table(uint32_t *source_table)
uint32_t *l2_table, *src_l2_table; uint32_t *l2_table, *src_l2_table;
if (is_pte_illegal(source_table[i])) { if (is_pte_illegal(source_table[i])) {
dst_table[i] = Z_XTENSA_MMU_ILLEGAL; dst_table[i] = XTENSA_MMU_PTE_ILLEGAL;
continue; continue;
} }
src_l2_table = (uint32_t *)(source_table[i] & Z_XTENSA_PTE_PPN_MASK); src_l2_table = (uint32_t *)(source_table[i] & XTENSA_MMU_PTE_PPN_MASK);
l2_table = alloc_l2_table(); l2_table = alloc_l2_table();
if (l2_table == NULL) { if (l2_table == NULL) {
goto err; goto err;
@ -773,8 +775,8 @@ static uint32_t *dup_table(uint32_t *source_table)
/* The page table is using kernel ASID because we don't /* The page table is using kernel ASID because we don't
* user thread manipulate it. * user thread manipulate it.
*/ */
dst_table[i] = Z_XTENSA_PTE((uint32_t)l2_table, Z_XTENSA_KERNEL_RING, dst_table[i] = XTENSA_MMU_PTE((uint32_t)l2_table, XTENSA_MMU_KERNEL_RING,
Z_XTENSA_PAGE_TABLE_ATTR); XTENSA_MMU_PAGE_TABLE_ATTR);
sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE); sys_cache_data_flush_range((void *)l2_table, XTENSA_L2_PAGE_TABLE_SIZE);
} }
@ -798,7 +800,7 @@ int arch_mem_domain_init(struct k_mem_domain *domain)
* For now, lets just assert if we have reached the maximum number * For now, lets just assert if we have reached the maximum number
* of asid we assert. * of asid we assert.
*/ */
__ASSERT(asid_count < (Z_XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available"); __ASSERT(asid_count < (XTENSA_MMU_SHARED_ASID), "Reached maximum of ASID available");
key = k_spin_lock(&xtensa_mmu_lock); key = k_spin_lock(&xtensa_mmu_lock);
ptables = dup_table(z_xtensa_kernel_ptables); ptables = dup_table(z_xtensa_kernel_ptables);
@ -829,17 +831,17 @@ static int region_map_update(uint32_t *ptables, uintptr_t start,
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) { for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
uint32_t *l2_table, pte; uint32_t *l2_table, pte;
uint32_t page = start + offset; uint32_t page = start + offset;
uint32_t l1_pos = Z_XTENSA_L1_POS(page); uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
uint32_t l2_pos = Z_XTENSA_L2_POS(page); uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
/* Make sure we grab a fresh copy of L1 page table */ /* Make sure we grab a fresh copy of L1 page table */
sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0])); sys_cache_data_invd_range((void *)&ptables[l1_pos], sizeof(ptables[0]));
l2_table = (uint32_t *)(ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK); l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0])); sys_cache_data_invd_range((void *)&l2_table[l2_pos], sizeof(l2_table[0]));
pte = Z_XTENSA_PTE_RING_SET(l2_table[l2_pos], ring); pte = XTENSA_MMU_PTE_RING_SET(l2_table[l2_pos], ring);
pte = Z_XTENSA_PTE_ATTR_SET(pte, flags); pte = XTENSA_MMU_PTE_ATTR_SET(pte, flags);
l2_table[l2_pos] = pte; l2_table[l2_pos] = pte;
@ -872,8 +874,8 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
va_uc = start; va_uc = start;
} }
new_flags_uc = (flags & ~Z_XTENSA_PTE_ATTR_CACHED_MASK); new_flags_uc = (flags & ~XTENSA_MMU_PTE_ATTR_CACHED_MASK);
new_flags = new_flags_uc | Z_XTENSA_MMU_CACHED_WB; new_flags = new_flags_uc | XTENSA_MMU_CACHED_WB;
ret = region_map_update(ptables, va, size, ring, new_flags); ret = region_map_update(ptables, va, size, ring, new_flags);
@ -886,7 +888,7 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
#if CONFIG_MP_MAX_NUM_CPUS > 1 #if CONFIG_MP_MAX_NUM_CPUS > 1
if ((option & OPTION_NO_TLB_IPI) != OPTION_NO_TLB_IPI) { if ((option & OPTION_NO_TLB_IPI) != OPTION_NO_TLB_IPI) {
z_xtensa_mmu_tlb_ipi(); xtensa_mmu_tlb_ipi();
} }
#endif #endif
@ -898,7 +900,8 @@ static inline int update_region(uint32_t *ptables, uintptr_t start,
static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option) static inline int reset_region(uint32_t *ptables, uintptr_t start, size_t size, uint32_t option)
{ {
return update_region(ptables, start, size, Z_XTENSA_KERNEL_RING, Z_XTENSA_MMU_W, option); return update_region(ptables, start, size,
XTENSA_MMU_KERNEL_RING, XTENSA_MMU_PERM_W, option);
} }
void xtensa_user_stack_perms(struct k_thread *thread) void xtensa_user_stack_perms(struct k_thread *thread)
@ -909,7 +912,7 @@ void xtensa_user_stack_perms(struct k_thread *thread)
update_region(thread_page_tables_get(thread), update_region(thread_page_tables_get(thread),
thread->stack_info.start, thread->stack_info.size, thread->stack_info.start, thread->stack_info.size,
Z_XTENSA_USER_RING, Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB, 0); XTENSA_MMU_USER_RING, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB, 0);
} }
int arch_mem_domain_max_partitions_get(void) int arch_mem_domain_max_partitions_get(void)
@ -931,8 +934,8 @@ int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id) uint32_t partition_id)
{ {
struct k_mem_partition *partition = &domain->partitions[partition_id]; struct k_mem_partition *partition = &domain->partitions[partition_id];
uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? Z_XTENSA_USER_RING : uint32_t ring = K_MEM_PARTITION_IS_USER(partition->attr) ? XTENSA_MMU_USER_RING :
Z_XTENSA_KERNEL_RING; XTENSA_MMU_KERNEL_RING;
return update_region(domain->arch.ptables, partition->start, return update_region(domain->arch.ptables, partition->start,
partition->size, ring, partition->attr, 0); partition->size, ring, partition->attr, 0);
@ -959,8 +962,8 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
*/ */
update_region(thread_page_tables_get(thread), update_region(thread_page_tables_get(thread),
thread->stack_info.start, thread->stack_info.size, thread->stack_info.start, thread->stack_info.size,
Z_XTENSA_USER_RING, XTENSA_MMU_USER_RING,
Z_XTENSA_MMU_W | Z_XTENSA_MMU_CACHED_WB, XTENSA_MMU_PERM_W | XTENSA_MMU_CACHED_WB,
OPTION_NO_TLB_IPI); OPTION_NO_TLB_IPI);
/* and reset thread's stack permission in /* and reset thread's stack permission in
* the old page tables. * the old page tables.
@ -985,7 +988,7 @@ int arch_mem_domain_thread_add(struct k_thread *thread)
* migration as it was sent above during reset_region(). * migration as it was sent above during reset_region().
*/ */
if ((thread != _current_cpu->current) && !is_migration) { if ((thread != _current_cpu->current) && !is_migration) {
z_xtensa_mmu_tlb_ipi(); xtensa_mmu_tlb_ipi();
} }
#endif #endif
@ -1026,14 +1029,14 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
{ {
uint8_t asid_ring; uint8_t asid_ring;
uint32_t rasid, pte, *l2_table; uint32_t rasid, pte, *l2_table;
uint32_t l1_pos = Z_XTENSA_L1_POS(page); uint32_t l1_pos = XTENSA_MMU_L1_POS(page);
uint32_t l2_pos = Z_XTENSA_L2_POS(page); uint32_t l2_pos = XTENSA_MMU_L2_POS(page);
if (is_pte_illegal(ptables[l1_pos])) { if (is_pte_illegal(ptables[l1_pos])) {
return false; return false;
} }
l2_table = (uint32_t *)(ptables[l1_pos] & Z_XTENSA_PTE_PPN_MASK); l2_table = (uint32_t *)(ptables[l1_pos] & XTENSA_MMU_PTE_PPN_MASK);
pte = l2_table[l2_pos]; pte = l2_table[l2_pos];
if (is_pte_illegal(pte)) { if (is_pte_illegal(pte)) {
@ -1043,8 +1046,7 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
asid_ring = 0; asid_ring = 0;
rasid = xtensa_rasid_get(); rasid = xtensa_rasid_get();
for (uint32_t i = 0; i < 4; i++) { for (uint32_t i = 0; i < 4; i++) {
if (Z_XTENSA_PTE_ASID_GET(pte, rasid) == if (XTENSA_MMU_PTE_ASID_GET(pte, rasid) == XTENSA_MMU_RASID_ASID_GET(rasid, i)) {
Z_XTENSA_RASID_ASID_GET(rasid, i)) {
asid_ring = i; asid_ring = i;
break; break;
} }
@ -1055,7 +1057,7 @@ static bool page_validate(uint32_t *ptables, uint32_t page, uint8_t ring, bool w
} }
if (write) { if (write) {
return (Z_XTENSA_PTE_ATTR_GET((pte)) & Z_XTENSA_MMU_W) != 0; return (XTENSA_MMU_PTE_ATTR_GET((pte)) & XTENSA_MMU_PERM_W) != 0;
} }
return true; return true;
@ -1069,7 +1071,7 @@ int arch_buffer_validate(void *addr, size_t size, int write)
const struct k_thread *thread = _current; const struct k_thread *thread = _current;
uint32_t *ptables = thread_page_tables_get(thread); uint32_t *ptables = thread_page_tables_get(thread);
uint8_t ring = ((thread->base.user_options & K_USER) != 0) ? uint8_t ring = ((thread->base.user_options & K_USER) != 0) ?
Z_XTENSA_USER_RING : Z_XTENSA_KERNEL_RING; XTENSA_MMU_USER_RING : XTENSA_MMU_KERNEL_RING;
/* addr/size arbitrary, fix this up into an aligned region */ /* addr/size arbitrary, fix this up into an aligned region */
k_mem_region_align((uintptr_t *)&virt, &aligned_size, k_mem_region_align((uintptr_t *)&virt, &aligned_size,

View file

@ -434,7 +434,7 @@ _KernelExceptionVector:
j _Level1Vector j _Level1Vector
#ifdef CONFIG_XTENSA_MMU #ifdef CONFIG_XTENSA_MMU
_handle_tlb_miss_kernel: _handle_tlb_miss_kernel:
/* The TLB miss handling is used only during z_xtensa_mmu_init() /* The TLB miss handling is used only during xtensa_mmu_init()
* where vecbase is at a different address, as the offset used * where vecbase is at a different address, as the offset used
* in the jump ('j') instruction will not jump to correct * in the jump ('j') instruction will not jump to correct
* address (... remember the vecbase is moved). * address (... remember the vecbase is moved).

View file

@ -72,7 +72,7 @@ static ALWAYS_INLINE void arch_kernel_init(void)
#endif #endif
#ifdef CONFIG_XTENSA_MMU #ifdef CONFIG_XTENSA_MMU
z_xtensa_mmu_init(); xtensa_mmu_init();
#endif #endif
} }

View file

@ -15,115 +15,152 @@
#include <zephyr/toolchain.h> #include <zephyr/toolchain.h>
#include <zephyr/sys/util_macro.h> #include <zephyr/sys/util_macro.h>
#define Z_XTENSA_PTE_VPN_MASK 0xFFFFF000U /**
#define Z_XTENSA_PTE_PPN_MASK 0xFFFFF000U * @defgroup xtensa_mmu_internal_apis Xtensa Memory Management Unit (MMU) Internal APIs
#define Z_XTENSA_PTE_ATTR_MASK 0x0000000FU * @ingroup xtensa_mmu_apis
#define Z_XTENSA_PTE_ATTR_CACHED_MASK 0x0000000CU * @{
#define Z_XTENSA_L1_MASK 0x3FF00000U */
#define Z_XTENSA_L2_MASK 0x3FFFFFU
#define Z_XTENSA_PPN_SHIFT 12U /** Mask for VPN in PTE */
#define XTENSA_MMU_PTE_VPN_MASK 0xFFFFF000U
#define Z_XTENSA_PTE_RING_MASK 0x00000030U /** Mask for PPN in PTE */
#define Z_XTENSA_PTE_RING_SHIFT 4U #define XTENSA_MMU_PTE_PPN_MASK 0xFFFFF000U
#define Z_XTENSA_PTEBASE_MASK 0xFFC00000 /** Mask for attributes in PTE */
#define XTENSA_MMU_PTE_ATTR_MASK 0x0000000FU
#define Z_XTENSA_PTE(paddr, ring, attr) \ /** Mask for cache mode in PTE */
(((paddr) & Z_XTENSA_PTE_PPN_MASK) | \ #define XTENSA_MMU_PTE_ATTR_CACHED_MASK 0x0000000CU
(((ring) << Z_XTENSA_PTE_RING_SHIFT) & Z_XTENSA_PTE_RING_MASK) | \
((attr) & Z_XTENSA_PTE_ATTR_MASK))
#define Z_XTENSA_PTE_ATTR_GET(pte) \ /** Mask used to figure out which L1 page table to use */
(pte) & Z_XTENSA_PTE_ATTR_MASK #define XTENSA_MMU_L1_MASK 0x3FF00000U
#define Z_XTENSA_PTE_ATTR_SET(pte, attr) \ /** Mask used to figure out which L2 page table to use */
(((pte) & ~Z_XTENSA_PTE_ATTR_MASK) | (attr)) #define XTENSA_MMU_L2_MASK 0x3FFFFFU
#define Z_XTENSA_PTE_RING_SET(pte, ring) \ #define XTENSA_MMU_PTEBASE_MASK 0xFFC00000
(((pte) & ~Z_XTENSA_PTE_RING_MASK) | \
((ring) << Z_XTENSA_PTE_RING_SHIFT))
#define Z_XTENSA_PTE_RING_GET(pte) \ #define XTENSA_MMU_PTE(paddr, ring, attr) \
(((pte) & ~Z_XTENSA_PTE_RING_MASK) >> Z_XTENSA_PTE_RING_SHIFT) (((paddr) & XTENSA_MMU_PTE_PPN_MASK) | \
(((ring) << XTENSA_MMU_PTE_RING_SHIFT) & XTENSA_MMU_PTE_RING_MASK) | \
((attr) & XTENSA_MMU_PTE_ATTR_MASK))
#define Z_XTENSA_PTE_ASID_GET(pte, rasid) \ /** Number of bits to shift for PPN in PTE */
(((rasid) >> ((((pte) & Z_XTENSA_PTE_RING_MASK) \ #define XTENSA_MMU_PTE_PPN_SHIFT 12U
>> Z_XTENSA_PTE_RING_SHIFT) * 8)) & 0xFF)
#define Z_XTENSA_TLB_ENTRY(vaddr, way) \ /** Mask for ring in PTE */
(((vaddr) & Z_XTENSA_PTE_PPN_MASK) | (way)) #define XTENSA_MMU_PTE_RING_MASK 0x00000030U
#define Z_XTENSA_AUTOFILL_TLB_ENTRY(vaddr) \ /** Number of bits to shift for ring in PTE */
(((vaddr) & Z_XTENSA_PTE_PPN_MASK) | \ #define XTENSA_MMU_PTE_RING_SHIFT 4U
(((vaddr) >> Z_XTENSA_PPN_SHIFT) & 0x03U))
#define Z_XTENSA_L2_POS(vaddr) \ /** Construct a page table entry (PTE) */
(((vaddr) & Z_XTENSA_L2_MASK) >> 12U) #define XTENSA_MMU_PTE(paddr, ring, attr) \
(((paddr) & XTENSA_MMU_PTE_PPN_MASK) | \
(((ring) << XTENSA_MMU_PTE_RING_SHIFT) & XTENSA_MMU_PTE_RING_MASK) | \
((attr) & XTENSA_MMU_PTE_ATTR_MASK))
#define Z_XTENSA_L1_POS(vaddr) \ /** Get the attributes from a PTE */
#define XTENSA_MMU_PTE_ATTR_GET(pte) \
((pte) & XTENSA_MMU_PTE_ATTR_MASK)
/** Set the attributes in a PTE */
#define XTENSA_MMU_PTE_ATTR_SET(pte, attr) \
(((pte) & ~XTENSA_MMU_PTE_ATTR_MASK) | (attr))
/** Set the ring in a PTE */
#define XTENSA_MMU_PTE_RING_SET(pte, ring) \
(((pte) & ~XTENSA_MMU_PTE_RING_MASK) | \
((ring) << XTENSA_MMU_PTE_RING_SHIFT))
/** Get the ring from a PTE */
#define XTENSA_MMU_PTE_RING_GET(pte) \
(((pte) & ~XTENSA_MMU_PTE_RING_MASK) >> XTENSA_MMU_PTE_RING_SHIFT)
/** Get the ASID from the RASID register corresponding to the ring in a PTE */
#define XTENSA_MMU_PTE_ASID_GET(pte, rasid) \
(((rasid) >> ((((pte) & XTENSA_MMU_PTE_RING_MASK) \
>> XTENSA_MMU_PTE_RING_SHIFT) * 8)) & 0xFF)
/** Calculate the L2 page table position from a virtual address */
#define XTENSA_MMU_L2_POS(vaddr) \
(((vaddr) & XTENSA_MMU_L2_MASK) >> 12U)
/** Calculate the L1 page table position from a virtual address */
#define XTENSA_MMU_L1_POS(vaddr) \
((vaddr) >> 22U) ((vaddr) >> 22U)
/* PTE attributes for entries in the L1 page table. Should never be /**
* @def XTENSA_MMU_PAGE_TABLE_ATTR
*
* PTE attributes for entries in the L1 page table. Should never be
* writable, may be cached in non-SMP contexts only * writable, may be cached in non-SMP contexts only
*/ */
#if CONFIG_MP_MAX_NUM_CPUS == 1 #if CONFIG_MP_MAX_NUM_CPUS == 1
#define Z_XTENSA_PAGE_TABLE_ATTR Z_XTENSA_MMU_CACHED_WB #define XTENSA_MMU_PAGE_TABLE_ATTR XTENSA_MMU_CACHED_WB
#else #else
#define Z_XTENSA_PAGE_TABLE_ATTR 0 #define XTENSA_MMU_PAGE_TABLE_ATTR 0
#endif #endif
/* This ASID is shared between all domains and kernel. */ /** This ASID is shared between all domains and kernel. */
#define Z_XTENSA_MMU_SHARED_ASID 255 #define XTENSA_MMU_SHARED_ASID 255
/* Fixed data TLB way to map the page table */ /** Fixed data TLB way to map the page table */
#define Z_XTENSA_MMU_PTE_WAY 7 #define XTENSA_MMU_PTE_WAY 7
/* Fixed data TLB way to map the vecbase */ /** Fixed data TLB way to map the vecbase */
#define Z_XTENSA_MMU_VECBASE_WAY 8 #define XTENSA_MMU_VECBASE_WAY 8
/* Kernel specific ASID. Ring field in the PTE */ /** Kernel specific ASID. Ring field in the PTE */
#define Z_XTENSA_KERNEL_RING 0 #define XTENSA_MMU_KERNEL_RING 0
/* User specific ASID. Ring field in the PTE */ /** User specific ASID. Ring field in the PTE */
#define Z_XTENSA_USER_RING 2 #define XTENSA_MMU_USER_RING 2
/* Ring value for MMU_SHARED_ASID */ /** Ring value for MMU_SHARED_ASID */
#define Z_XTENSA_SHARED_RING 3 #define XTENSA_MMU_SHARED_RING 3
/* Number of data TLB ways [0-9] */ /** Number of data TLB ways [0-9] */
#define Z_XTENSA_DTLB_WAYS 10 #define XTENSA_MMU_NUM_DTLB_WAYS 10
/* Number of instruction TLB ways [0-6] */ /** Number of instruction TLB ways [0-6] */
#define Z_XTENSA_ITLB_WAYS 7 #define XTENSA_MMU_NUM_ITLB_WAYS 7
/* Number of auto-refill ways */ /** Number of auto-refill ways */
#define Z_XTENSA_TLB_AUTOREFILL_WAYS 4 #define XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS 4
/** Indicate PTE is illegal. */
#define XTENSA_MMU_PTE_ILLEGAL (BIT(3) | BIT(2))
/* PITLB HIT bit. For more information see /**
* PITLB HIT bit.
*
* For more information see
* Xtensa Instruction Set Architecture (ISA) Reference Manual * Xtensa Instruction Set Architecture (ISA) Reference Manual
* 4.6.5.7 Formats for Probing MMU Option TLB Entries * 4.6.5.7 Formats for Probing MMU Option TLB Entries
*/ */
#define Z_XTENSA_PITLB_HIT BIT(3) #define XTENSA_MMU_PITLB_HIT BIT(3)
/* PDTLB HIT bit. For more information see /**
* PDTLB HIT bit.
*
* For more information see
* Xtensa Instruction Set Architecture (ISA) Reference Manual * Xtensa Instruction Set Architecture (ISA) Reference Manual
* 4.6.5.7 Formats for Probing MMU Option TLB Entries * 4.6.5.7 Formats for Probing MMU Option TLB Entries
*/ */
#define Z_XTENSA_PDTLB_HIT BIT(4) #define XTENSA_MMU_PDTLB_HIT BIT(4)
/* /**
* Virtual address where the page table is mapped * Virtual address where the page table is mapped
*/ */
#define Z_XTENSA_PTEVADDR CONFIG_XTENSA_MMU_PTEVADDR #define XTENSA_MMU_PTEVADDR CONFIG_XTENSA_MMU_PTEVADDR
/* /**
* Find the pte entry address of a given vaddr. * Find the PTE entry address of a given vaddr.
* *
* For example, assuming PTEVADDR in 0xE0000000, * For example, assuming PTEVADDR in 0xE0000000,
* the page spans from 0xE0000000 - 0xE03FFFFF * the page spans from 0xE0000000 - 0xE03FFFFF
* *
* address 0x00 is in 0xE0000000 * address 0x00 is in 0xE0000000
* address 0x1000 is in 0xE0000004 * address 0x1000 is in 0xE0000004
@ -134,23 +171,33 @@
* *
* PTE_ENTRY_ADDRESS = PTEVADDR + ((VADDR / 4096) * 4) * PTE_ENTRY_ADDRESS = PTEVADDR + ((VADDR / 4096) * 4)
*/ */
#define Z_XTENSA_PTE_ENTRY_VADDR(base, vaddr) \ #define XTENSA_MMU_PTE_ENTRY_VADDR(base, vaddr) \
((base) + (((vaddr) / KB(4)) * 4)) ((base) + (((vaddr) / KB(4)) * 4))
/* /**
* Get asid for a given ring from rasid register. * Get ASID for a given ring from RASID register.
* rasid contains four asid, one per ring. *
* RASID contains four 8-bit ASIDs, one per ring.
*/ */
#define XTENSA_MMU_RASID_ASID_GET(rasid, ring) \
#define Z_XTENSA_RASID_ASID_GET(rasid, ring) \
(((rasid) >> ((ring) * 8)) & 0xff) (((rasid) >> ((ring) * 8)) & 0xff)
/**
* @brief Set RASID register.
*
* @param rasid Value to be set.
*/
static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid) static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid)
{ {
__asm__ volatile("wsr %0, rasid\n\t" __asm__ volatile("wsr %0, rasid\n\t"
"isync\n" : : "a"(rasid)); "isync\n" : : "a"(rasid));
} }
/**
* @brief Get RASID register.
*
* @return Register value.
*/
static ALWAYS_INLINE uint32_t xtensa_rasid_get(void) static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
{ {
uint32_t rasid; uint32_t rasid;
@ -159,22 +206,37 @@ static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
return rasid; return rasid;
} }
static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t pos) /**
* @brief Set a ring in RASID register to be particular value.
*
* @param asid ASID to be set.
* @param ring ASID of which ring to be manipulated.
*/
static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t ring)
{ {
uint32_t rasid = xtensa_rasid_get(); uint32_t rasid = xtensa_rasid_get();
rasid = (rasid & ~(0xff << (pos * 8))) | ((uint32_t)asid << (pos * 8)); rasid = (rasid & ~(0xff << (ring * 8))) | ((uint32_t)asid << (ring * 8));
xtensa_rasid_set(rasid); xtensa_rasid_set(rasid);
} }
/**
* @brief Invalidate a particular instruction TLB entry.
*
* @param entry Entry to be invalidated.
*/
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry) static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry)
{ {
__asm__ volatile("iitlb %0\n\t" __asm__ volatile("iitlb %0\n\t"
: : "a" (entry)); : : "a" (entry));
} }
/**
* @brief Synchronously invalidate of a particular instruction TLB entry.
*
* @param entry Entry to be invalidated.
*/
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry) static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry)
{ {
__asm__ volatile("iitlb %0\n\t" __asm__ volatile("iitlb %0\n\t"
@ -182,6 +244,11 @@ static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry)
: : "a" (entry)); : : "a" (entry));
} }
/**
* @brief Synchronously invalidate of a particular data TLB entry.
*
* @param entry Entry to be invalidated.
*/
static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry) static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry)
{ {
__asm__ volatile("idtlb %0\n\t" __asm__ volatile("idtlb %0\n\t"
@ -189,12 +256,23 @@ static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry)
: : "a" (entry)); : : "a" (entry));
} }
/**
* @brief Invalidate a particular data TLB entry.
*
* @param entry Entry to be invalidated.
*/
static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate(uint32_t entry) static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate(uint32_t entry)
{ {
__asm__ volatile("idtlb %0\n\t" __asm__ volatile("idtlb %0\n\t"
: : "a" (entry)); : : "a" (entry));
} }
/**
* @brief Synchronously write to a particular data TLB entry.
*
* @param pte Value to be written.
* @param entry Entry to be written.
*/
static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t entry) static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t entry)
{ {
__asm__ volatile("wdtlb %0, %1\n\t" __asm__ volatile("wdtlb %0, %1\n\t"
@ -202,18 +280,36 @@ static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t en
: : "a" (pte), "a"(entry)); : : "a" (pte), "a"(entry));
} }
/**
* @brief Write to a particular data TLB entry.
*
* @param pte Value to be written.
* @param entry Entry to be written.
*/
static ALWAYS_INLINE void xtensa_dtlb_entry_write(uint32_t pte, uint32_t entry) static ALWAYS_INLINE void xtensa_dtlb_entry_write(uint32_t pte, uint32_t entry)
{ {
__asm__ volatile("wdtlb %0, %1\n\t" __asm__ volatile("wdtlb %0, %1\n\t"
: : "a" (pte), "a"(entry)); : : "a" (pte), "a"(entry));
} }
/**
* @brief Synchronously write to a particular instruction TLB entry.
*
* @param pte Value to be written.
* @param entry Entry to be written.
*/
static ALWAYS_INLINE void xtensa_itlb_entry_write(uint32_t pte, uint32_t entry) static ALWAYS_INLINE void xtensa_itlb_entry_write(uint32_t pte, uint32_t entry)
{ {
__asm__ volatile("witlb %0, %1\n\t" __asm__ volatile("witlb %0, %1\n\t"
: : "a" (pte), "a"(entry)); : : "a" (pte), "a"(entry));
} }
/**
* @brief Synchronously write to a particular instruction TLB entry.
*
* @param pte Value to be written.
* @param entry Entry to be written.
*/
static ALWAYS_INLINE void xtensa_itlb_entry_write_sync(uint32_t pte, uint32_t entry) static ALWAYS_INLINE void xtensa_itlb_entry_write_sync(uint32_t pte, uint32_t entry)
{ {
__asm__ volatile("witlb %0, %1\n\t" __asm__ volatile("witlb %0, %1\n\t"
@ -239,9 +335,10 @@ static inline void xtensa_tlb_autorefill_invalidate(void)
entries = BIT(MAX(XCHAL_ITLB_ARF_ENTRIES_LOG2, entries = BIT(MAX(XCHAL_ITLB_ARF_ENTRIES_LOG2,
XCHAL_DTLB_ARF_ENTRIES_LOG2)); XCHAL_DTLB_ARF_ENTRIES_LOG2));
for (way = 0; way < Z_XTENSA_TLB_AUTOREFILL_WAYS; way++) { for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) {
for (i = 0; i < entries; i++) { for (i = 0; i < entries; i++) {
uint32_t entry = way + (i << Z_XTENSA_PPN_SHIFT); uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT);
xtensa_dtlb_entry_invalidate_sync(entry); xtensa_dtlb_entry_invalidate_sync(entry);
xtensa_itlb_entry_invalidate_sync(entry); xtensa_itlb_entry_invalidate_sync(entry);
} }
@ -273,43 +370,68 @@ static ALWAYS_INLINE void *xtensa_ptevaddr_get(void)
__asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables)); __asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables));
return (void *)(ptables & Z_XTENSA_PTEBASE_MASK); return (void *)(ptables & XTENSA_MMU_PTEBASE_MASK);
} }
/*
* The following functions are helpful when debugging. /**
* @brief Get the virtual address associated with a particular data TLB entry.
*
* @param entry TLB entry to be queried.
*/ */
static ALWAYS_INLINE void *xtensa_dtlb_vaddr_read(uint32_t entry) static ALWAYS_INLINE void *xtensa_dtlb_vaddr_read(uint32_t entry)
{ {
uint32_t vaddr; uint32_t vaddr;
__asm__ volatile("rdtlb0 %0, %1\n\t" : "=a" (vaddr) : "a" (entry)); __asm__ volatile("rdtlb0 %0, %1\n\t" : "=a" (vaddr) : "a" (entry));
return (void *)(vaddr & Z_XTENSA_PTE_VPN_MASK); return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
} }
/**
* @brief Get the physical address associated with a particular data TLB entry.
*
* @param entry TLB entry to be queried.
*/
static ALWAYS_INLINE uint32_t xtensa_dtlb_paddr_read(uint32_t entry) static ALWAYS_INLINE uint32_t xtensa_dtlb_paddr_read(uint32_t entry)
{ {
uint32_t paddr; uint32_t paddr;
__asm__ volatile("rdtlb1 %0, %1\n\t" : "=a" (paddr) : "a" (entry)); __asm__ volatile("rdtlb1 %0, %1\n\t" : "=a" (paddr) : "a" (entry));
return (paddr & Z_XTENSA_PTE_PPN_MASK); return (paddr & XTENSA_MMU_PTE_PPN_MASK);
} }
/**
* @brief Get the virtual address associated with a particular instruction TLB entry.
*
* @param entry TLB entry to be queried.
*/
static ALWAYS_INLINE void *xtensa_itlb_vaddr_read(uint32_t entry) static ALWAYS_INLINE void *xtensa_itlb_vaddr_read(uint32_t entry)
{ {
uint32_t vaddr; uint32_t vaddr;
__asm__ volatile("ritlb0 %0, %1\n\t" : "=a" (vaddr), "+a" (entry)); __asm__ volatile("ritlb0 %0, %1\n\t" : "=a" (vaddr), "+a" (entry));
return (void *)(vaddr & Z_XTENSA_PTE_VPN_MASK); return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
} }
/**
* @brief Get the physical address associated with a particular instruction TLB entry.
*
* @param entry TLB entry to be queried.
*/
static ALWAYS_INLINE uint32_t xtensa_itlb_paddr_read(uint32_t entry) static ALWAYS_INLINE uint32_t xtensa_itlb_paddr_read(uint32_t entry)
{ {
uint32_t paddr; uint32_t paddr;
__asm__ volatile("ritlb1 %0, %1\n\t" : "=a" (paddr), "+a" (entry)); __asm__ volatile("ritlb1 %0, %1\n\t" : "=a" (paddr), "+a" (entry));
return (paddr & Z_XTENSA_PTE_PPN_MASK); return (paddr & XTENSA_MMU_PTE_PPN_MASK);
} }
/**
* @brief Probe for instruction TLB entry from a virtual address.
*
* @param vaddr Virtual address.
*
* @return Return of the PITLB instruction.
*/
static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr) static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr)
{ {
uint32_t ret; uint32_t ret;
@ -318,6 +440,13 @@ static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr)
return ret; return ret;
} }
/**
* @brief Probe for data TLB entry from a virtual address.
*
* @param vaddr Virtual address.
*
* @return Return of the PDTLB instruction.
*/
static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr) static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr)
{ {
uint32_t ret; uint32_t ret;
@ -326,26 +455,57 @@ static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr)
return ret; return ret;
} }
/**
* @brief Invalidate an instruction TLB entry associated with a virtual address.
*
* This invalidated an instruction TLB entry associated with a virtual address
* if such TLB entry exists. Otherwise, do nothing.
*
* @param vaddr Virtual address.
*/
static inline void xtensa_itlb_vaddr_invalidate(void *vaddr) static inline void xtensa_itlb_vaddr_invalidate(void *vaddr)
{ {
uint32_t entry = xtensa_itlb_probe(vaddr); uint32_t entry = xtensa_itlb_probe(vaddr);
if (entry & Z_XTENSA_PITLB_HIT) { if (entry & XTENSA_MMU_PITLB_HIT) {
xtensa_itlb_entry_invalidate_sync(entry); xtensa_itlb_entry_invalidate_sync(entry);
} }
} }
/**
* @brief Invalidate a data TLB entry associated with a virtual address.
*
* This invalidated a data TLB entry associated with a virtual address
* if such TLB entry exists. Otherwise, do nothing.
*
* @param vaddr Virtual address.
*/
static inline void xtensa_dtlb_vaddr_invalidate(void *vaddr) static inline void xtensa_dtlb_vaddr_invalidate(void *vaddr)
{ {
uint32_t entry = xtensa_dtlb_probe(vaddr); uint32_t entry = xtensa_dtlb_probe(vaddr);
if (entry & Z_XTENSA_PDTLB_HIT) { if (entry & XTENSA_MMU_PDTLB_HIT) {
xtensa_dtlb_entry_invalidate_sync(entry); xtensa_dtlb_entry_invalidate_sync(entry);
} }
} }
/**
* @brief Tell hardware to use a page table very first time after boot.
*
* @param l1_page Pointer to the page table to be used.
*/
void xtensa_init_paging(uint32_t *l1_page); void xtensa_init_paging(uint32_t *l1_page);
/**
* @brief Switch to a new page table.
*
* @param asid The ASID of the memory domain associated with the incoming page table.
* @param l1_page Page table to be switched to.
*/
void xtensa_set_paging(uint32_t asid, uint32_t *l1_page); void xtensa_set_paging(uint32_t asid, uint32_t *l1_page);
/**
* @}
*/
#endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ */ #endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ */

View file

@ -4,67 +4,124 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#include <stdint.h>
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H
#define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H #define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H
#define Z_XTENSA_MMU_X BIT(0) /**
#define Z_XTENSA_MMU_W BIT(1) * @defgroup xtensa_mmu_apis Xtensa Memory Management Unit (MMU) APIs
#define Z_XTENSA_MMU_XW (BIT(1) | BIT(0)) * @ingroup xtensa_apis
* @{
#define Z_XTENSA_MMU_CACHED_WB BIT(2)
#define Z_XTENSA_MMU_CACHED_WT BIT(3)
/* This bit is used in the HW. We just use it to know
* which ring pte entries should use.
*/ */
#define Z_XTENSA_MMU_USER BIT(4)
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & Z_XTENSA_MMU_X) != 0) /**
#define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & Z_XENSA_MMU_W) != 0) * @name Memory region permission and caching mode.
#define K_MEM_PARTITION_IS_USER(attr) (((attr) & Z_XTENSA_MMU_USER) != 0) * @{
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW ((k_mem_partition_attr_t) \
{Z_XTENSA_MMU_W | Z_XTENSA_MMU_USER})
#define K_MEM_PARTITION_P_RW_U_NA ((k_mem_partition_attr_t) \
{0})
#define K_MEM_PARTITION_P_RO_U_RO ((k_mem_partition_attr_t) \
{Z_XTENSA_MMU_USER})
#define K_MEM_PARTITION_P_RO_U_NA ((k_mem_partition_attr_t) \
{0})
#define K_MEM_PARTITION_P_NA_U_NA ((k_mem_partition_attr_t) \
{0})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX ((k_mem_partition_attr_t) \
{Z_XTENSA_MMU_X})
/*
* This BIT tells the mapping code whether the uncached pointer should
* be shared between all threads. That is not used in the HW, it is
* just for the implementation.
*
* The pte mapping this memory will use an ASID that is set in the
* ring 4 spot in RASID.
*/ */
#define Z_XTENSA_MMU_MAP_SHARED BIT(30)
#define Z_XTENSA_MMU_ILLEGAL (BIT(3) | BIT(2)) /** Memory region is executable. */
#define XTENSA_MMU_PERM_X BIT(0)
/* Struct used to map a memory region */ /** Memory region is writable. */
struct xtensa_mmu_range { #define XTENSA_MMU_PERM_W BIT(1)
const char *name;
const uint32_t start; /** Memory region is both executable and writable */
const uint32_t end; #define XTENSA_MMU_PERM_WX (XTENSA_MMU_PERM_W | XTENSA_MMU_PERM_X)
const uint32_t attrs;
}; /** Memory region has write-back cache. */
#define XTENSA_MMU_CACHED_WB BIT(2)
/** Memory region has write-through cache. */
#define XTENSA_MMU_CACHED_WT BIT(3)
/**
* @}
*/
/**
* @name Memory domain and partitions
* @{
*/
typedef uint32_t k_mem_partition_attr_t; typedef uint32_t k_mem_partition_attr_t;
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) (((attr) & XTENSA_MMU_PERM_X) != 0)
#define K_MEM_PARTITION_IS_WRITABLE(attr) (((attr) & XTENSA_MMU_PERM_W) != 0)
#define K_MEM_PARTITION_IS_USER(attr) (((attr) & XTENSA_MMU_MAP_USER) != 0)
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW \
((k_mem_partition_attr_t) {XTENSA_MMU_PERM_W | XTENSA_MMU_MAP_USER})
#define K_MEM_PARTITION_P_RW_U_NA \
((k_mem_partition_attr_t) {0})
#define K_MEM_PARTITION_P_RO_U_RO \
((k_mem_partition_attr_t) {XTENSA_MMU_MAP_USER})
#define K_MEM_PARTITION_P_RO_U_NA \
((k_mem_partition_attr_t) {0})
#define K_MEM_PARTITION_P_NA_U_NA \
((k_mem_partition_attr_t) {0})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX \
((k_mem_partition_attr_t) {XTENSA_MMU_PERM_X})
/**
* @}
*/
/**
* @brief Software only bit to indicate a memory region can be accessed by user thread(s).
*
* This BIT tells the mapping code which ring PTE entries to use.
*/
#define XTENSA_MMU_MAP_USER BIT(4)
/**
* @brief Software only bit to indicate a memory region is shared by all threads.
*
* This BIT tells the mapping code whether the memory region should
* be shared between all threads. That is not used in the HW, it is
* just for the implementation.
*
* The PTE mapping this memory will use an ASID that is set in the
* ring 4 spot in RASID.
*/
#define XTENSA_MMU_MAP_SHARED BIT(30)
/**
* Struct used to map a memory region.
*/
struct xtensa_mmu_range {
/** Name of the memory region. */
const char *name;
/** Start address of the memory region. */
const uint32_t start;
/** End address of the memory region. */
const uint32_t end;
/** Attributes for the memory region. */
const uint32_t attrs;
};
/**
* @brief Additional memory regions required by SoC.
*
* These memory regions will be setup by MMU initialization code at boot.
*/
extern const struct xtensa_mmu_range xtensa_soc_mmu_ranges[]; extern const struct xtensa_mmu_range xtensa_soc_mmu_ranges[];
/** Number of SoC additional memory regions. */
extern int xtensa_soc_mmu_ranges_num; extern int xtensa_soc_mmu_ranges_num;
void z_xtensa_mmu_init(void); /**
* @brief Initialize hardware MMU.
*
* This initializes the MMU hardware and setup the memory regions at boot.
*/
void xtensa_mmu_init(void);
/** /**
* @brief Tell other processors to flush TLBs. * @brief Tell other processors to flush TLBs.
@ -76,7 +133,7 @@ void z_xtensa_mmu_init(void);
* *
* @note This needs to be implemented in the SoC layer. * @note This needs to be implemented in the SoC layer.
*/ */
void z_xtensa_mmu_tlb_ipi(void); void xtensa_mmu_tlb_ipi(void);
/** /**
* @brief Invalidate cache to page tables and flush TLBs. * @brief Invalidate cache to page tables and flush TLBs.
@ -84,6 +141,10 @@ void z_xtensa_mmu_tlb_ipi(void);
* This invalidates cache to all page tables and flush TLBs * This invalidates cache to all page tables and flush TLBs
* as they may have been modified by other processors. * as they may have been modified by other processors.
*/ */
void z_xtensa_mmu_tlb_shootdown(void); void xtensa_mmu_tlb_shootdown(void);
/**
* @}
*/
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H */ #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MMU_H */

View file

@ -16,7 +16,7 @@ const struct xtensa_mmu_range xtensa_soc_mmu_ranges[] = {
{ {
.start = (uint32_t)XCHAL_VECBASE_RESET_VADDR, .start = (uint32_t)XCHAL_VECBASE_RESET_VADDR,
.end = (uint32_t)CONFIG_SRAM_OFFSET, .end = (uint32_t)CONFIG_SRAM_OFFSET,
.attrs = Z_XTENSA_MMU_X | Z_XTENSA_MMU_CACHED_WB | Z_XTENSA_MMU_MAP_SHARED, .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB | XTENSA_MMU_MAP_SHARED,
.name = "vecbase", .name = "vecbase",
}, },
{ {
@ -27,7 +27,7 @@ const struct xtensa_mmu_range xtensa_soc_mmu_ranges[] = {
.start = (uint32_t)DT_REG_ADDR(DT_NODELABEL(rom0)), .start = (uint32_t)DT_REG_ADDR(DT_NODELABEL(rom0)),
.end = (uint32_t)DT_REG_ADDR(DT_NODELABEL(rom0)) + .end = (uint32_t)DT_REG_ADDR(DT_NODELABEL(rom0)) +
(uint32_t)DT_REG_SIZE(DT_NODELABEL(rom0)), (uint32_t)DT_REG_SIZE(DT_NODELABEL(rom0)),
.attrs = Z_XTENSA_MMU_X | Z_XTENSA_MMU_CACHED_WB, .attrs = XTENSA_MMU_PERM_X | XTENSA_MMU_CACHED_WB,
.name = "rom", .name = "rom",
}, },
}; };

View file

@ -267,7 +267,7 @@ ZTEST_USER(userspace, test_disable_mmu_mpu)
uint32_t addr = 0U; uint32_t addr = 0U;
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
uint32_t attr = addr | Z_XTENSA_MMU_XW; uint32_t attr = addr | XTENSA_MMU_PERM_WX;
__asm__ volatile("wdtlb %0, %1; witlb %0, %1" __asm__ volatile("wdtlb %0, %1; witlb %0, %1"
:: "r"(attr), "r"(addr)); :: "r"(attr), "r"(addr));