mmu: promote public APIs

These are application facing and are prefixed with k_.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2021-01-20 17:03:13 -08:00 committed by Anas Nashif
commit 6c97ab3167
4 changed files with 29 additions and 29 deletions

View file

@ -228,7 +228,7 @@ size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
* @retval -ENOMEM Insufficient space in backing store to satisfy request.
* The region may be partially paged out.
*/
int z_mem_page_out(void *addr, size_t size);
int k_mem_page_out(void *addr, size_t size);
/**
* Load a virtual data region into memory
@ -243,7 +243,7 @@ int z_mem_page_out(void *addr, size_t size);
* @param addr Base page-aligned virtual address
* @param size Page-aligned data region size
*/
void z_mem_page_in(void *addr, size_t size);
void k_mem_page_in(void *addr, size_t size);
/**
* Pin an aligned virtual data region, paging in as necessary
@ -258,7 +258,7 @@ void z_mem_page_in(void *addr, size_t size);
* @param addr Base page-aligned virtual address
* @param size Page-aligned data region size
*/
void z_mem_pin(void *addr, size_t size);
void k_mem_pin(void *addr, size_t size);
/**
* Un-pin an aligned virtual data region
@ -270,7 +270,7 @@ void z_mem_pin(void *addr, size_t size);
* @param addr Base page-aligned virtual address
* @param size Page-aligned data region size
*/
void z_mem_unpin(void *addr, size_t size);
void k_mem_unpin(void *addr, size_t size);
#endif /* CONFIG_DEMAND_PAGING */
#ifdef __cplusplus

View file

@ -713,7 +713,7 @@ out:
return ret;
}
int z_mem_page_out(void *addr, size_t size)
int k_mem_page_out(void *addr, size_t size)
{
__ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
addr);
@ -920,7 +920,7 @@ static void do_page_in(void *addr)
(void)ret;
}
void z_mem_page_in(void *addr, size_t size)
void k_mem_page_in(void *addr, size_t size)
{
__ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
"%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
@ -937,7 +937,7 @@ static void do_mem_pin(void *addr)
(void)ret;
}
void z_mem_pin(void *addr, size_t size)
void k_mem_pin(void *addr, size_t size)
{
__ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(),
"%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled",
@ -990,7 +990,7 @@ static void do_mem_unpin(void *addr)
irq_unlock(key);
}
void z_mem_unpin(void *addr, size_t size)
void k_mem_unpin(void *addr, size_t size)
{
__ASSERT(page_frames_initialized, "%s called on %p too early", __func__,
addr);

View file

@ -86,7 +86,7 @@ void test_touch_anon_pages(void)
printk("Kernel handled %lu page faults\n", faults);
}
void test_z_mem_page_out(void)
void test_k_mem_page_out(void)
{
unsigned long faults;
int key, ret;
@ -96,8 +96,8 @@ void test_z_mem_page_out(void)
*/
key = irq_lock();
faults = z_num_pagefaults_get();
ret = z_mem_page_out(arena, HALF_BYTES);
zassert_equal(ret, 0, "z_mem_page_out failed with %d", ret);
ret = k_mem_page_out(arena, HALF_BYTES);
zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret);
/* Write to the supposedly evicted region */
for (size_t i = 0; i < HALF_BYTES; i++) {
@ -110,12 +110,12 @@ void test_z_mem_page_out(void)
"unexpected num pagefaults expected %lu got %d",
HALF_PAGES, faults);
ret = z_mem_page_out(arena, arena_size);
zassert_equal(ret, -ENOMEM, "z_mem_page_out should have failed");
ret = k_mem_page_out(arena, arena_size);
zassert_equal(ret, -ENOMEM, "k_mem_page_out should have failed");
}
void test_z_mem_page_in(void)
void test_k_mem_page_in(void)
{
unsigned long faults;
int key, ret;
@ -125,10 +125,10 @@ void test_z_mem_page_in(void)
*/
key = irq_lock();
ret = z_mem_page_out(arena, HALF_BYTES);
zassert_equal(ret, 0, "z_mem_page_out failed with %d", ret);
ret = k_mem_page_out(arena, HALF_BYTES);
zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret);
z_mem_page_in(arena, HALF_BYTES);
k_mem_page_in(arena, HALF_BYTES);
faults = z_num_pagefaults_get();
/* Write to the supposedly evicted region */
@ -142,12 +142,12 @@ void test_z_mem_page_in(void)
faults);
}
void test_z_mem_pin(void)
void test_k_mem_pin(void)
{
unsigned long faults;
int key;
z_mem_pin(arena, HALF_BYTES);
k_mem_pin(arena, HALF_BYTES);
/* Write to the rest of the arena */
for (size_t i = HALF_BYTES; i < arena_size; i++) {
@ -167,19 +167,19 @@ void test_z_mem_pin(void)
faults);
/* Clean up */
z_mem_unpin(arena, HALF_BYTES);
k_mem_unpin(arena, HALF_BYTES);
}
void test_z_mem_unpin(void)
void test_k_mem_unpin(void)
{
/* Pin the memory (which we know works from prior test) */
z_mem_pin(arena, HALF_BYTES);
k_mem_pin(arena, HALF_BYTES);
/* Now un-pin it */
z_mem_unpin(arena, HALF_BYTES);
k_mem_unpin(arena, HALF_BYTES);
/* repeat the page_out scenario, which should work */
test_z_mem_page_out();
test_k_mem_page_out();
}
/* Show that even if we map enough anonymous memory to fill the backing
@ -223,10 +223,10 @@ void test_main(void)
ztest_test_suite(test_demand_paging,
ztest_unit_test(test_map_anon_pages),
ztest_unit_test(test_touch_anon_pages),
ztest_unit_test(test_z_mem_page_out),
ztest_unit_test(test_z_mem_page_in),
ztest_unit_test(test_z_mem_pin),
ztest_unit_test(test_z_mem_unpin),
ztest_unit_test(test_k_mem_page_out),
ztest_unit_test(test_k_mem_page_in),
ztest_unit_test(test_k_mem_pin),
ztest_unit_test(test_k_mem_unpin),
ztest_unit_test(test_backing_store_capacity));
ztest_run_test_suite(test_demand_paging);

View file

@ -214,7 +214,7 @@ void test_main(void)
/* This test sets up multiple mappings of RAM pages, which is only
* allowed for pinned memory
*/
z_mem_pin(test_page, sizeof(test_page));
k_mem_pin(test_page, sizeof(test_page));
#endif
ztest_test_suite(test_mem_map,
ztest_unit_test(test_z_phys_map_rw),