From 299a2cf62e6f7b9071c680fd66079e9d2d014325 Mon Sep 17 00:00:00 2001 From: Andrew Boie Date: Fri, 18 Dec 2020 12:01:31 -0800 Subject: [PATCH] mmu: arch_mem_map() may no longer fail Pre-allocation of paging structures is now required, such that no allocations are ever needed when mapping memory. Instantiation of new memory domains may still require allocations unless a common page table is used. Signed-off-by: Andrew Boie --- arch/x86/core/x86_mmu.c | 6 ++---- kernel/include/kernel_arch_interface.h | 13 ++++++++----- kernel/mmu.c | 18 ++++-------------- 3 files changed, 14 insertions(+), 23 deletions(-) diff --git a/arch/x86/core/x86_mmu.c b/arch/x86/core/x86_mmu.c index df00e57746e..4d1de69cf52 100644 --- a/arch/x86/core/x86_mmu.c +++ b/arch/x86/core/x86_mmu.c @@ -1084,7 +1084,7 @@ static pentry_t flags_to_entry(uint32_t flags) case K_MEM_CACHE_WB: break; default: - return -ENOTSUP; + __ASSERT(false, "bad memory mapping flags 0x%x", flags); } if ((flags & K_MEM_PERM_RW) != 0U) { @@ -1103,12 +1103,10 @@ static pentry_t flags_to_entry(uint32_t flags) } /* map new region virt..virt+size to phys with provided arch-neutral flags */ -int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) +void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags) { range_map_unlocked(virt, phys, size, flags_to_entry(flags), MASK_ALL, 0); - - return 0; } #if CONFIG_X86_STACK_PROTECTION diff --git a/kernel/include/kernel_arch_interface.h b/kernel/include/kernel_arch_interface.h index 4e014d05619..2cf4ac9925a 100644 --- a/kernel/include/kernel_arch_interface.h +++ b/kernel/include/kernel_arch_interface.h @@ -241,6 +241,13 @@ static inline bool arch_is_in_isr(void); * to this API are assumed to be serialized, and indeed all usage will * originate from kernel/mm.c which handles virtual memory management. * + * Architectures are expected to pre-allocate page tables for the entire + * address space, as defined by CONFIG_KERNEL_VM_BASE and + * CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of + * allocation for paging structures. + * + * Validation of arguments should be done via assertions. + * * This API is part of infrastructure still under development and may * change. * @@ -248,12 +255,8 @@ static inline bool arch_is_in_isr(void); * @param addr Page-aligned Source physical address to map * @param size Page-aligned size of the mapped memory region in bytes * @param flags Caching, access and control flags, see K_MAP_* macros - * @retval 0 Success - * @retval -ENOTSUP Unsupported cache mode with no suitable fallback, or - * unsupported flags - * @retval -ENOMEM Memory for additional paging structures unavailable */ -int arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags); +void arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags); /** * Remove mappings for a provided virtual address range diff --git a/kernel/mmu.c b/kernel/mmu.c index 7cf128f077a..15d984cade0 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -276,7 +276,6 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr) */ static int map_anon_page(void *addr, uint32_t flags) { - int ret; struct z_page_frame *pf; uintptr_t phys; bool lock = (flags & K_MEM_MAP_LOCK) != 0; @@ -285,14 +284,10 @@ static int map_anon_page(void *addr, uint32_t flags) if (pf == NULL) { return -ENOMEM; } - phys = z_page_frame_to_phys(pf); - ret = arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, - flags | K_MEM_CACHE_WB); - if (ret != 0) { - free_page_frame_list_put(pf); - return -ENOMEM; - } + phys = z_page_frame_to_phys(pf); + arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB); + if (lock) { pf->flags |= Z_PAGE_FRAME_PINNED; } @@ -385,7 +380,6 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags) { uintptr_t aligned_phys, addr_offset; size_t aligned_size; - int ret; k_spinlock_key_t key; uint8_t *dest_addr; @@ -413,11 +407,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags) LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr, aligned_phys, aligned_size, flags, addr_offset); - ret = arch_mem_map(dest_addr, aligned_phys, aligned_size, flags); - if (ret != 0) { - LOG_ERR("arch_mem_map() failed with %d", ret); - goto fail; - } + arch_mem_map(dest_addr, aligned_phys, aligned_size, flags); k_spin_unlock(&z_mm_lock, key); *virt_ptr = dest_addr + addr_offset;