Revert "mmu: backing stores reserve page fault room"
This reverts commit 7a642f81ab
.
Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
43cd3d3fac
commit
5e978d237c
4 changed files with 11 additions and 62 deletions
|
@ -268,21 +268,14 @@ void z_eviction_init(void);
|
||||||
* may simply generate location tokens purely as a function of pf->addr with no
|
* may simply generate location tokens purely as a function of pf->addr with no
|
||||||
* other management necessary.
|
* other management necessary.
|
||||||
*
|
*
|
||||||
* This function distinguishes whether it was called on behalf of a page
|
|
||||||
* fault. A free backing store location must always be reserved in order for
|
|
||||||
* page faults to succeed. If the page_fault parameter is not set, this
|
|
||||||
* function should return -ENOMEM even if one location is available.
|
|
||||||
*
|
|
||||||
* This function is invoked with interrupts locked.
|
* This function is invoked with interrupts locked.
|
||||||
*
|
*
|
||||||
* @param addr Virtual address to obtain a storage location
|
* @param addr Virtual address to obtain a storage location
|
||||||
* @param [out] location storage location token
|
* @param [out] location storage location token
|
||||||
* @param page_fault Whether this request was for a page fault
|
|
||||||
* @return 0 Success
|
* @return 0 Success
|
||||||
* @return -ENOMEM Backing store is full
|
* @return -ENOMEM Backing store is full
|
||||||
*/
|
*/
|
||||||
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
|
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location);
|
||||||
bool page_fault);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Free a backing store location
|
* Free a backing store location
|
||||||
|
|
|
@ -607,7 +607,7 @@ static void page_frame_free_locked(struct z_page_frame *pf)
|
||||||
* Returns -ENOMEM if the backing store is full
|
* Returns -ENOMEM if the backing store is full
|
||||||
*/
|
*/
|
||||||
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
||||||
bool page_fault, uintptr_t *location_ptr)
|
bool page_in, uintptr_t *location_ptr)
|
||||||
{
|
{
|
||||||
uintptr_t phys;
|
uintptr_t phys;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -632,13 +632,12 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
|
||||||
dirty = dirty || !z_page_frame_is_backed(pf);
|
dirty = dirty || !z_page_frame_is_backed(pf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dirty || page_fault) {
|
if (dirty || page_in) {
|
||||||
arch_mem_scratch(phys);
|
arch_mem_scratch(phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (z_page_frame_is_mapped(pf)) {
|
if (z_page_frame_is_mapped(pf)) {
|
||||||
ret = z_backing_store_location_get(pf, location_ptr,
|
ret = z_backing_store_location_get(pf, location_ptr);
|
||||||
page_fault);
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
LOG_ERR("out of backing store memory");
|
LOG_ERR("out of backing store memory");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -51,7 +51,6 @@
|
||||||
static char backing_store[CONFIG_MMU_PAGE_SIZE *
|
static char backing_store[CONFIG_MMU_PAGE_SIZE *
|
||||||
CONFIG_BACKING_STORE_RAM_PAGES];
|
CONFIG_BACKING_STORE_RAM_PAGES];
|
||||||
static struct k_mem_slab backing_slabs;
|
static struct k_mem_slab backing_slabs;
|
||||||
static unsigned int free_slabs;
|
|
||||||
|
|
||||||
static void *location_to_slab(uintptr_t location)
|
static void *location_to_slab(uintptr_t location)
|
||||||
{
|
{
|
||||||
|
@ -79,21 +78,17 @@ static uintptr_t slab_to_location(void *slab)
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
|
int z_backing_store_location_get(struct z_page_frame *pf,
|
||||||
bool page_fault)
|
uintptr_t *location)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
void *slab;
|
void *slab;
|
||||||
|
|
||||||
if ((!page_fault && free_slabs == 1) || free_slabs == 0) {
|
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
|
||||||
|
if (ret != 0) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
|
|
||||||
__ASSERT(ret == 0, "slab count mismatch");
|
|
||||||
(void)ret;
|
|
||||||
*location = slab_to_location(slab);
|
*location = slab_to_location(slab);
|
||||||
free_slabs--;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -103,7 +98,6 @@ void z_backing_store_location_free(uintptr_t location)
|
||||||
void *slab = location_to_slab(location);
|
void *slab = location_to_slab(location);
|
||||||
|
|
||||||
k_mem_slab_free(&backing_slabs, &slab);
|
k_mem_slab_free(&backing_slabs, &slab);
|
||||||
free_slabs++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_backing_store_page_out(uintptr_t location)
|
void z_backing_store_page_out(uintptr_t location)
|
||||||
|
@ -127,5 +121,4 @@ void z_backing_store_init(void)
|
||||||
{
|
{
|
||||||
k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
|
k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
|
||||||
CONFIG_BACKING_STORE_RAM_PAGES);
|
CONFIG_BACKING_STORE_RAM_PAGES);
|
||||||
free_slabs = CONFIG_BACKING_STORE_RAM_PAGES;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#include <mmu.h>
|
#include <mmu.h>
|
||||||
|
|
||||||
#ifdef CONFIG_BACKING_STORE_RAM_PAGES
|
#ifdef CONFIG_BACKING_STORE_RAM_PAGES
|
||||||
#define EXTRA_PAGES (CONFIG_BACKING_STORE_RAM_PAGES - 1)
|
#define EXTRA_PAGES CONFIG_BACKING_STORE_RAM_PAGES
|
||||||
#else
|
#else
|
||||||
#error "Unsupported configuration"
|
#error "Unsupported configuration"
|
||||||
#endif
|
#endif
|
||||||
|
@ -182,41 +182,6 @@ void test_z_mem_unpin(void)
|
||||||
test_z_mem_page_out();
|
test_z_mem_page_out();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Show that even if we map enough anonymous memory to fill the backing
|
|
||||||
* store, we can still handle pagefaults.
|
|
||||||
* This eats up memory so should be last in the suite.
|
|
||||||
*/
|
|
||||||
void test_backing_store_capacity(void)
|
|
||||||
{
|
|
||||||
char *mem, *ret;
|
|
||||||
int key;
|
|
||||||
unsigned long faults;
|
|
||||||
size_t size = (((CONFIG_BACKING_STORE_RAM_PAGES - 1) - HALF_PAGES) *
|
|
||||||
CONFIG_MMU_PAGE_SIZE);
|
|
||||||
|
|
||||||
/* Consume the rest of memory */
|
|
||||||
mem = k_mem_map(size, K_MEM_PERM_RW);
|
|
||||||
zassert_not_null(mem, "k_mem_map failed");
|
|
||||||
|
|
||||||
/* Show no memory is left */
|
|
||||||
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
|
|
||||||
zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
|
|
||||||
|
|
||||||
key = irq_lock();
|
|
||||||
faults = z_num_pagefaults_get();
|
|
||||||
/* Poke all anonymous memory */
|
|
||||||
for (size_t i = 0; i < HALF_BYTES; i++) {
|
|
||||||
arena[i] = nums[i % 10];
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < size; i++) {
|
|
||||||
mem[i] = nums[i % 10];
|
|
||||||
}
|
|
||||||
faults = z_num_pagefaults_get() - faults;
|
|
||||||
irq_unlock(key);
|
|
||||||
|
|
||||||
zassert_not_equal(faults, 0, "should have had some pagefaults");
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ztest main entry*/
|
/* ztest main entry*/
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
|
@ -226,8 +191,7 @@ void test_main(void)
|
||||||
ztest_unit_test(test_z_mem_page_out),
|
ztest_unit_test(test_z_mem_page_out),
|
||||||
ztest_unit_test(test_z_mem_page_in),
|
ztest_unit_test(test_z_mem_page_in),
|
||||||
ztest_unit_test(test_z_mem_pin),
|
ztest_unit_test(test_z_mem_pin),
|
||||||
ztest_unit_test(test_z_mem_unpin),
|
ztest_unit_test(test_z_mem_unpin)
|
||||||
ztest_unit_test(test_backing_store_capacity));
|
);
|
||||||
|
|
||||||
ztest_run_test_suite(test_demand_paging);
|
ztest_run_test_suite(test_demand_paging);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue