From 5e978d237cc7ae8071629dce28bac52498b70805 Mon Sep 17 00:00:00 2001 From: Anas Nashif Date: Fri, 22 Jan 2021 07:37:16 -0500 Subject: [PATCH] Revert "mmu: backing stores reserve page fault room" This reverts commit 7a642f81abdbb2cc91e0b42e084566d8bfb62985. Signed-off-by: Anas Nashif --- kernel/include/mmu.h | 9 +--- kernel/mmu.c | 7 ++-- subsys/demand_paging/backing_store/ram.c | 15 ++----- .../mem_protect/demand_paging/src/main.c | 42 ++----------------- 4 files changed, 11 insertions(+), 62 deletions(-) diff --git a/kernel/include/mmu.h b/kernel/include/mmu.h index 003d1d6062e..00d9579d04c 100644 --- a/kernel/include/mmu.h +++ b/kernel/include/mmu.h @@ -268,21 +268,14 @@ void z_eviction_init(void); * may simply generate location tokens purely as a function of pf->addr with no * other management necessary. * - * This function distinguishes whether it was called on behalf of a page - * fault. A free backing store location must always be reserved in order for - * page faults to succeed. If the page_fault parameter is not set, this - * function should return -ENOMEM even if one location is available. - * * This function is invoked with interrupts locked. * * @param addr Virtual address to obtain a storage location * @param [out] location storage location token - * @param page_fault Whether this request was for a page fault * @return 0 Success * @return -ENOMEM Backing store is full */ -int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, - bool page_fault); +int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location); /** * Free a backing store location diff --git a/kernel/mmu.c b/kernel/mmu.c index 404203e260c..15defbfd959 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -607,7 +607,7 @@ static void page_frame_free_locked(struct z_page_frame *pf) * Returns -ENOMEM if the backing store is full */ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr, - bool page_fault, uintptr_t *location_ptr) + bool page_in, uintptr_t *location_ptr) { uintptr_t phys; int ret; @@ -632,13 +632,12 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr, dirty = dirty || !z_page_frame_is_backed(pf); } - if (dirty || page_fault) { + if (dirty || page_in) { arch_mem_scratch(phys); } if (z_page_frame_is_mapped(pf)) { - ret = z_backing_store_location_get(pf, location_ptr, - page_fault); + ret = z_backing_store_location_get(pf, location_ptr); if (ret != 0) { LOG_ERR("out of backing store memory"); return -ENOMEM; diff --git a/subsys/demand_paging/backing_store/ram.c b/subsys/demand_paging/backing_store/ram.c index fd324396ff0..b2744cbde0c 100644 --- a/subsys/demand_paging/backing_store/ram.c +++ b/subsys/demand_paging/backing_store/ram.c @@ -51,7 +51,6 @@ static char backing_store[CONFIG_MMU_PAGE_SIZE * CONFIG_BACKING_STORE_RAM_PAGES]; static struct k_mem_slab backing_slabs; -static unsigned int free_slabs; static void *location_to_slab(uintptr_t location) { @@ -79,21 +78,17 @@ static uintptr_t slab_to_location(void *slab) return offset; } -int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, - bool page_fault) +int z_backing_store_location_get(struct z_page_frame *pf, + uintptr_t *location) { int ret; void *slab; - if ((!page_fault && free_slabs == 1) || free_slabs == 0) { + ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT); + if (ret != 0) { return -ENOMEM; } - - ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT); - __ASSERT(ret == 0, "slab count mismatch"); - (void)ret; *location = slab_to_location(slab); - free_slabs--; return 0; } @@ -103,7 +98,6 @@ void z_backing_store_location_free(uintptr_t location) void *slab = location_to_slab(location); k_mem_slab_free(&backing_slabs, &slab); - free_slabs++; } void z_backing_store_page_out(uintptr_t location) @@ -127,5 +121,4 @@ void z_backing_store_init(void) { k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE, CONFIG_BACKING_STORE_RAM_PAGES); - free_slabs = CONFIG_BACKING_STORE_RAM_PAGES; } diff --git a/tests/kernel/mem_protect/demand_paging/src/main.c b/tests/kernel/mem_protect/demand_paging/src/main.c index 547eff3452a..7a267d6e739 100644 --- a/tests/kernel/mem_protect/demand_paging/src/main.c +++ b/tests/kernel/mem_protect/demand_paging/src/main.c @@ -9,7 +9,7 @@ #include #ifdef CONFIG_BACKING_STORE_RAM_PAGES -#define EXTRA_PAGES (CONFIG_BACKING_STORE_RAM_PAGES - 1) +#define EXTRA_PAGES CONFIG_BACKING_STORE_RAM_PAGES #else #error "Unsupported configuration" #endif @@ -182,41 +182,6 @@ void test_z_mem_unpin(void) test_z_mem_page_out(); } -/* Show that even if we map enough anonymous memory to fill the backing - * store, we can still handle pagefaults. - * This eats up memory so should be last in the suite. - */ -void test_backing_store_capacity(void) -{ - char *mem, *ret; - int key; - unsigned long faults; - size_t size = (((CONFIG_BACKING_STORE_RAM_PAGES - 1) - HALF_PAGES) * - CONFIG_MMU_PAGE_SIZE); - - /* Consume the rest of memory */ - mem = k_mem_map(size, K_MEM_PERM_RW); - zassert_not_null(mem, "k_mem_map failed"); - - /* Show no memory is left */ - ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW); - zassert_is_null(ret, "k_mem_map shouldn't have succeeded"); - - key = irq_lock(); - faults = z_num_pagefaults_get(); - /* Poke all anonymous memory */ - for (size_t i = 0; i < HALF_BYTES; i++) { - arena[i] = nums[i % 10]; - } - for (size_t i = 0; i < size; i++) { - mem[i] = nums[i % 10]; - } - faults = z_num_pagefaults_get() - faults; - irq_unlock(key); - - zassert_not_equal(faults, 0, "should have had some pagefaults"); -} - /* ztest main entry*/ void test_main(void) { @@ -226,8 +191,7 @@ void test_main(void) ztest_unit_test(test_z_mem_page_out), ztest_unit_test(test_z_mem_page_in), ztest_unit_test(test_z_mem_pin), - ztest_unit_test(test_z_mem_unpin), - ztest_unit_test(test_backing_store_capacity)); - + ztest_unit_test(test_z_mem_unpin) + ); ztest_run_test_suite(test_demand_paging); }