kernel: mmu: abstract access to page frame flags and address

Introduce z_page_frame_set() and z_page_frame_clear() to manipulate
flags. Obtain the virtual address using the existing
z_page_frame_to_virt(). This will make changes to the page frame
structure easier.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2024-05-01 18:28:20 -04:00 committed by Anas Nashif
commit 57305971d1
8 changed files with 44 additions and 36 deletions

View file

@ -2008,9 +2008,7 @@ static void mark_addr_page_reserved(uintptr_t addr, size_t len)
continue;
}
struct z_page_frame *pf = z_phys_to_page_frame(pos);
pf->flags |= Z_PAGE_FRAME_RESERVED;
z_page_frame_set(z_phys_to_page_frame(pos), Z_PAGE_FRAME_RESERVED);
}
}

View file

@ -337,15 +337,12 @@ void xtensa_mmu_init(void)
__weak void arch_reserved_pages_update(void)
{
uintptr_t page;
struct z_page_frame *pf;
int idx;
for (page = CONFIG_SRAM_BASE_ADDRESS, idx = 0;
page < (uintptr_t)z_mapped_start;
page += CONFIG_MMU_PAGE_SIZE, idx++) {
pf = &z_page_frames[idx];
pf->flags |= Z_PAGE_FRAME_RESERVED;
z_page_frame_set(&z_page_frames[idx], Z_PAGE_FRAME_RESERVED);
}
}
#endif /* CONFIG_ARCH_HAS_RESERVED_PAGE_FRAMES */

View file

@ -267,10 +267,10 @@ void k_mem_paging_eviction_init(void);
* a loaded data page may be selected, in which case its associated page frame
* will have the Z_PAGE_FRAME_BACKED bit cleared (as it is no longer cached).
*
* pf->addr will indicate the virtual address the page is currently mapped to.
* Large, sparse backing stores which can contain the entire address space
* may simply generate location tokens purely as a function of pf->addr with no
* other management necessary.
* z_page_frame_to_virt(pf) will indicate the virtual address the page is
* currently mapped to. Large, sparse backing stores which can contain the
* entire address space may simply generate location tokens purely as a
* function of that virtual address with no other management necessary.
*
* This function distinguishes whether it was called on behalf of a page
* fault. A free backing store location must always be reserved in order for

View file

@ -162,6 +162,16 @@ static inline bool z_page_frame_is_available(struct z_page_frame *page)
return page->flags == 0U;
}
static inline void z_page_frame_set(struct z_page_frame *pf, uint8_t flags)
{
pf->flags |= flags;
}
static inline void z_page_frame_clear(struct z_page_frame *pf, uint8_t flags)
{
pf->flags &= ~flags;
}
static inline void z_assert_phys_aligned(uintptr_t phys)
{
__ASSERT(phys % CONFIG_MMU_PAGE_SIZE == 0U,

View file

@ -450,10 +450,12 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
* Zephyr equivalent of VSDOs
*/
PF_ASSERT(pf, !z_page_frame_is_mapped(pf) || z_page_frame_is_pinned(pf),
"non-pinned and already mapped to %p", pf->addr);
"non-pinned and already mapped to %p",
z_page_frame_to_virt(pf));
pf->flags |= Z_PAGE_FRAME_MAPPED;
pf->addr = addr;
z_page_frame_set(pf, Z_PAGE_FRAME_MAPPED);
pf->addr = UINT_TO_POINTER(POINTER_TO_UINT(addr)
& ~(CONFIG_MMU_PAGE_SIZE - 1));
}
/* LCOV_EXCL_START */
@ -475,7 +477,7 @@ static int virt_to_page_frame(void *virt, uintptr_t *phys)
Z_PAGE_FRAME_FOREACH(paddr, pf) {
if (z_page_frame_is_mapped(pf)) {
if (virt == pf->addr) {
if (virt == z_page_frame_to_virt(pf)) {
ret = 0;
if (phys != NULL) {
*phys = z_page_frame_to_phys(pf);
@ -523,7 +525,8 @@ static int map_anon_page(void *addr, uint32_t flags)
pf = k_mem_paging_eviction_select(&dirty);
__ASSERT(pf != NULL, "failed to get a page frame");
LOG_DBG("evicting %p at 0x%lx", pf->addr,
LOG_DBG("evicting %p at 0x%lx",
z_page_frame_to_virt(pf),
z_page_frame_to_phys(pf));
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
if (ret != 0) {
@ -542,7 +545,7 @@ static int map_anon_page(void *addr, uint32_t flags)
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
if (lock) {
pf->flags |= Z_PAGE_FRAME_PINNED;
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
}
frame_mapped_set(pf, addr);
@ -930,9 +933,9 @@ static void mark_linker_section_pinned(void *start_addr, void *end_addr,
frame_mapped_set(pf, addr);
if (pin) {
pf->flags |= Z_PAGE_FRAME_PINNED;
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
} else {
pf->flags &= ~Z_PAGE_FRAME_PINNED;
z_page_frame_clear(pf, Z_PAGE_FRAME_PINNED);
}
}
}
@ -975,7 +978,7 @@ void z_mem_manage_init(void)
* structures, and any code used to perform page fault
* handling, page-ins, etc.
*/
pf->flags |= Z_PAGE_FRAME_PINNED;
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
}
#endif /* CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
@ -1177,7 +1180,7 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
LOG_ERR("out of backing store memory");
return -ENOMEM;
}
arch_mem_page_out(pf->addr, *location_ptr);
arch_mem_page_out(z_page_frame_to_virt(pf), *location_ptr);
} else {
/* Shouldn't happen unless this function is mis-used */
__ASSERT(!dirty, "un-mapped page determined to be dirty");
@ -1186,7 +1189,7 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
/* Mark as busy so that z_page_frame_is_evictable() returns false */
__ASSERT(!z_page_frame_is_busy(pf), "page frame 0x%lx is already busy",
phys);
pf->flags |= Z_PAGE_FRAME_BUSY;
z_page_frame_set(pf, Z_PAGE_FRAME_BUSY);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
/* Update dirty parameter, since we set to true if it wasn't backed
* even if otherwise clean
@ -1222,7 +1225,7 @@ static int do_mem_evict(void *addr)
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
pf = z_phys_to_page_frame(phys);
__ASSERT(pf->addr == addr, "page frame address mismatch");
__ASSERT(z_page_frame_to_virt(pf) == addr, "page frame address mismatch");
ret = page_frame_prepare_locked(pf, &dirty, false, &location);
if (ret != 0) {
goto out;
@ -1294,7 +1297,7 @@ int z_page_frame_evict(uintptr_t phys)
ret = 0;
goto out;
}
flags = arch_page_info_get(pf->addr, NULL, false);
flags = arch_page_info_get(z_page_frame_to_virt(pf), NULL, false);
/* Shouldn't ever happen */
__ASSERT((flags & ARCH_DATA_PAGE_LOADED) != 0, "data page not loaded");
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0;
@ -1480,7 +1483,7 @@ static bool do_page_fault(void *addr, bool pin)
uintptr_t phys = page_in_location;
pf = z_phys_to_page_frame(phys);
pf->flags |= Z_PAGE_FRAME_PINNED;
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
}
/* This if-block is to pin the page if it is
@ -1500,7 +1503,8 @@ static bool do_page_fault(void *addr, bool pin)
/* Need to evict a page frame */
pf = do_eviction_select(&dirty);
__ASSERT(pf != NULL, "failed to get a page frame");
LOG_DBG("evicting %p at 0x%lx", pf->addr,
LOG_DBG("evicting %p at 0x%lx",
z_page_frame_to_virt(pf),
z_page_frame_to_phys(pf));
paging_stats_eviction_inc(faulting_thread, dirty);
@ -1522,14 +1526,13 @@ static bool do_page_fault(void *addr, bool pin)
#ifdef CONFIG_DEMAND_PAGING_ALLOW_IRQ
key = irq_lock();
pf->flags &= ~Z_PAGE_FRAME_BUSY;
z_page_frame_clear(pf, Z_PAGE_FRAME_BUSY);
#endif /* CONFIG_DEMAND_PAGING_ALLOW_IRQ */
z_page_frame_clear(pf, Z_PAGE_FRAME_MAPPED);
frame_mapped_set(pf, addr);
if (pin) {
pf->flags |= Z_PAGE_FRAME_PINNED;
z_page_frame_set(pf, Z_PAGE_FRAME_PINNED);
}
pf->flags |= Z_PAGE_FRAME_MAPPED;
pf->addr = UINT_TO_POINTER(POINTER_TO_UINT(addr)
& ~(CONFIG_MMU_PAGE_SIZE - 1));
arch_mem_page_in(addr, z_page_frame_to_phys(pf));
k_mem_paging_backing_store_page_finalize(pf, page_in_location);
@ -1593,7 +1596,7 @@ static void do_mem_unpin(void *addr)
"invalid data page at %p", addr);
if ((flags & ARCH_DATA_PAGE_LOADED) != 0) {
pf = z_phys_to_page_frame(phys);
pf->flags &= ~Z_PAGE_FRAME_PINNED;
z_page_frame_clear(pf, Z_PAGE_FRAME_PINNED);
}
irq_unlock(key);
}

View file

@ -31,7 +31,7 @@ void arch_reserved_pages_update(void)
}
struct z_page_frame *pf = z_phys_to_page_frame(pos);
pf->flags |= Z_PAGE_FRAME_RESERVED;
z_page_frame_set(pf, Z_PAGE_FRAME_RESERVED);
}
}

View file

@ -45,7 +45,7 @@ int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
bool page_fault)
{
/* Simply returns the virtual address */
*location = POINTER_TO_UINT(pf->addr);
*location = POINTER_TO_UINT(z_page_frame_to_virt(pf));
return 0;
}

View file

@ -36,7 +36,7 @@ static void nru_periodic_update(struct k_timer *timer)
}
/* Clear accessed bit in page tables */
(void)arch_page_info_get(pf->addr, NULL, true);
(void)arch_page_info_get(z_page_frame_to_virt(pf), NULL, true);
}
irq_unlock(key);
@ -58,7 +58,7 @@ struct z_page_frame *k_mem_paging_eviction_select(bool *dirty_ptr)
continue;
}
flags = arch_page_info_get(pf->addr, NULL, false);
flags = arch_page_info_get(z_page_frame_to_virt(pf), NULL, false);
accessed = (flags & ARCH_DATA_PAGE_ACCESSED) != 0UL;
dirty = (flags & ARCH_DATA_PAGE_DIRTY) != 0UL;