kernel: mmu: support for on-demand mappings

This provides memory mappings with the ability to be initialized in their
paged-out state and be paged in on demand. This is especially nice for
anonymous memory mappings as they no longer have to allocate all memory
at mem_map time. This also allows for file mappings to be implemented by
simply providing backing store location tokens.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2024-07-17 00:06:42 -04:00 committed by Anas Nashif
commit c9aa98ebc0
6 changed files with 144 additions and 13 deletions

View file

@ -765,6 +765,13 @@ config ARCH_HAS_DEMAND_PAGING
This hidden configuration should be selected by the architecture if
demand paging is supported.
config ARCH_HAS_DEMAND_MAPPING
bool
help
This hidden configuration should be selected by the architecture if
demand paging is supported and arch_mem_map() supports
K_MEM_MAP_UNPAGED.
config ARCH_HAS_RESERVED_PAGE_FRAMES
bool
help

View file

@ -114,6 +114,25 @@ extern "C" {
*/
#define K_MEM_MAP_LOCK BIT(17)
/**
* Region will be unpaged i.e. not mapped into memory
*
* This is meant to be used by kernel code and not by application code.
*
* Corresponding memory address range will be set so no actual memory will
* be allocated initially. Allocation will happen through demand paging when
* addresses in that range are accessed. This is incompatible with
* K_MEM_MAP_LOCK.
*
* When this flag is specified, the phys argument to arch_mem_map()
* is interpreted as a backing store location value not a physical address.
* This is very similar to arch_mem_page_out() in that regard.
* Two special location values are defined: ARCH_UNPAGED_ANON_ZERO and
* ARCH_UNPAGED_ANON_UNINIT. Those are to be used with anonymous memory
* mappings for zeroed and uninitialized pages respectively.
*/
#define K_MEM_MAP_UNPAGED BIT(18)
/** @} */
/**
@ -173,6 +192,54 @@ static inline void *k_mem_map(size_t size, uint32_t flags)
return k_mem_map_phys_guard((uintptr_t)NULL, size, flags, true);
}
#ifdef CONFIG_DEMAND_MAPPING
/**
* Create an unpaged mapping
*
* This maps backing-store "location" tokens into Zephyr's address space.
* Corresponding memory address range will be set so no actual memory will
* be allocated initially. Allocation will happen through demand paging when
* addresses in the mapped range are accessed.
*
* The kernel will choose a base virtual address and return it to the caller.
* The memory access permissions for all contexts will be set per the
* provided flags argument.
*
* If user thread access control needs to be managed in any way, do not enable
* K_MEM_PERM_USER flags here; instead manage the region's permissions
* with memory domain APIs after the mapping has been established. Setting
* K_MEM_PERM_USER here will allow all user threads to access this memory
* which is usually undesirable.
*
* This is incompatible with K_MEM_MAP_LOCK.
*
* The provided backing-store "location" token must be linearly incrementable
* by a page size across the entire mapping.
*
* Allocated pages will have write-back cache settings.
*
* The returned virtual memory pointer will be page-aligned. The size
* parameter, and any base address for re-mapping purposes must be page-
* aligned.
*
* Note that the allocation includes two guard pages immediately before
* and after the requested region. The total size of the allocation will be
* the requested size plus the size of these two guard pages.
*
* @param location Backing store initial location token
* @param size Size of the memory mapping. This must be page-aligned.
* @param flags K_MEM_PERM_*, K_MEM_MAP_* control flags.
* @return The mapping location, or NULL if insufficient virtual address
* space to establish the mapping, or insufficient memory for paging
* structures.
*/
static inline void *k_mem_map_unpaged(uintptr_t location, size_t size, uint32_t flags)
{
flags |= K_MEM_MAP_UNPAGED;
return k_mem_map_phys_guard(location, size, flags, false);
}
#endif
/**
* Un-map mapped memory
*

View file

@ -123,6 +123,15 @@ menuconfig DEMAND_PAGING
backing store for evicted pages.
if DEMAND_PAGING
config DEMAND_MAPPING
bool "Allow on-demand memory mappings"
depends on ARCH_HAS_DEMAND_MAPPING
default y
help
When this is enabled, RAM-based memory mappings don't actually
allocate memory at mem_map time. They are made to be populated
at access time using the demand paging mechanism instead.
config DEMAND_PAGING_ALLOW_IRQ
bool "Allow interrupts during page-ins/outs"
help

View file

@ -549,7 +549,7 @@ static int map_anon_page(void *addr, uint32_t flags)
}
phys = k_mem_page_frame_to_phys(pf);
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags);
if (lock) {
k_mem_page_frame_set(pf, K_MEM_PAGE_FRAME_PINNED);
@ -622,16 +622,34 @@ void *k_mem_map_phys_guard(uintptr_t phys, size_t size, uint32_t flags, bool is_
if (is_anon) {
/* Mapping from anonymous memory */
VIRT_FOREACH(dst, size, pos) {
ret = map_anon_page(pos, flags);
flags |= K_MEM_CACHE_WB;
#ifdef CONFIG_DEMAND_MAPPING
if ((flags & K_MEM_MAP_LOCK) == 0) {
flags |= K_MEM_MAP_UNPAGED;
VIRT_FOREACH(dst, size, pos) {
arch_mem_map(pos,
uninit ? ARCH_UNPAGED_ANON_UNINIT
: ARCH_UNPAGED_ANON_ZERO,
CONFIG_MMU_PAGE_SIZE, flags);
}
LOG_DBG("memory mapping anon pages %p to %p unpaged", dst, pos-1);
/* skip the memset() below */
uninit = true;
} else
#endif
{
VIRT_FOREACH(dst, size, pos) {
ret = map_anon_page(pos, flags);
if (ret != 0) {
/* TODO: call k_mem_unmap(dst, pos - dst) when
* implemented in #28990 and release any guard virtual
* page as well.
*/
dst = NULL;
goto out;
if (ret != 0) {
/* TODO:
* call k_mem_unmap(dst, pos - dst)
* when implemented in #28990 and
* release any guard virtual page as well.
*/
dst = NULL;
goto out;
}
}
}
} else {
@ -1114,6 +1132,20 @@ extern struct k_mem_paging_histogram_t z_paging_histogram_backing_store_page_out
static inline void do_backing_store_page_in(uintptr_t location)
{
#ifdef CONFIG_DEMAND_MAPPING
/* Check for special cases */
switch (location) {
case ARCH_UNPAGED_ANON_ZERO:
memset(K_MEM_SCRATCH_PAGE, 0, CONFIG_MMU_PAGE_SIZE);
__fallthrough;
case ARCH_UNPAGED_ANON_UNINIT:
/* nothing else to do */
return;
default:
break;
}
#endif /* CONFIG_DEMAND_MAPPING */
#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
uint32_t time_diff;

View file

@ -127,6 +127,12 @@ void k_mem_paging_backing_store_page_in(uintptr_t location)
void k_mem_paging_backing_store_page_finalize(struct k_mem_page_frame *pf,
uintptr_t location)
{
#ifdef CONFIG_DEMAND_MAPPING
/* ignore those */
if (location == ARCH_UNPAGED_ANON_ZERO || location == ARCH_UNPAGED_ANON_UNINIT) {
return;
}
#endif
k_mem_paging_backing_store_location_free(location);
}

View file

@ -344,9 +344,15 @@ ZTEST(demand_paging_stat, test_backing_store_capacity)
mem = k_mem_map(size, K_MEM_PERM_RW);
zassert_not_null(mem, "k_mem_map failed");
/* Show no memory is left */
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
if (!IS_ENABLED(CONFIG_DEMAND_MAPPING)) {
/* Show no memory is left */
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
} else {
/* Show it doesn't matter */
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
zassert_not_null(ret, "k_mem_map should have succeeded");
}
key = irq_lock();
faults = k_mem_num_pagefaults_get();
@ -448,6 +454,10 @@ ZTEST_USER(demand_paging_stat, test_user_get_hist)
void *demand_paging_api_setup(void)
{
arena = k_mem_map(arena_size, K_MEM_PERM_RW);
if (IS_ENABLED(CONFIG_DEMAND_MAPPING)) {
/* force pages in */
k_mem_page_in(arena, arena_size);
}
test_k_mem_page_out();
return NULL;