From c692136f2147092e8182bc2e4e9c9f373474f294 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 29 Aug 2024 14:18:20 -0400 Subject: [PATCH] mmu: introduce k_mem_update_flags() It is sometimes necessary to modify/update memory permissions on some pages, especially with LLEXT where some allocated segments have to be executable. Signed-off-by: Nicolas Pitre --- include/zephyr/kernel/mm.h | 17 +++++++++++++++++ kernel/mmu.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/include/zephyr/kernel/mm.h b/include/zephyr/kernel/mm.h index 443de47d3bc..79c3dc4815f 100644 --- a/include/zephyr/kernel/mm.h +++ b/include/zephyr/kernel/mm.h @@ -258,6 +258,23 @@ static inline void k_mem_unmap(void *addr, size_t size) k_mem_unmap_phys_guard(addr, size, true); } +/** + * Modify memory mapping attribute flags + * + * This updates caching, access and control flags for the provided + * page-aligned memory region. + * + * Calling this function on a region which was not mapped to begin with is + * undefined behavior. However system memory implicitly mapped at boot time + * is supported. + * + * @param addr Page-aligned memory region base virtual address + * @param size Page-aligned memory region size + * @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags. + * @return 0 for success, negative error code otherwise. + */ +int k_mem_update_flags(void *addr, size_t size, uint32_t flags); + /** * Given an arbitrary region, provide a aligned region that covers it * diff --git a/kernel/mmu.c b/kernel/mmu.c index 8d086da5629..401e824b856 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -813,6 +813,36 @@ out: k_spin_unlock(&z_mm_lock, key); } +int k_mem_update_flags(void *addr, size_t size, uint32_t flags) +{ + uintptr_t phys; + k_spinlock_key_t key; + int ret; + + k_mem_assert_virtual_region(addr, size); + + key = k_spin_lock(&z_mm_lock); + + /* + * We can achieve desired result without explicit architecture support + * by unmapping and remapping the same physical memory using new flags. + */ + + ret = arch_page_phys_get(addr, &phys); + if (ret < 0) { + goto out; + } + + /* TODO: detect and handle paged-out memory as well */ + + arch_mem_unmap(addr, size); + arch_mem_map(addr, phys, size, flags); + +out: + k_spin_unlock(&z_mm_lock, key); + return ret; +} + size_t k_mem_free_get(void) { size_t ret;