diff --git a/include/sys/mem_manage.h b/include/sys/mem_manage.h index 197c3cd2ead..71d68122a88 100644 --- a/include/sys/mem_manage.h +++ b/include/sys/mem_manage.h @@ -228,7 +228,7 @@ size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, * @retval -ENOMEM Insufficient space in backing store to satisfy request. * The region may be partially paged out. */ -int k_mem_page_out(void *addr, size_t size); +int z_mem_page_out(void *addr, size_t size); /** * Load a virtual data region into memory @@ -243,7 +243,7 @@ int k_mem_page_out(void *addr, size_t size); * @param addr Base page-aligned virtual address * @param size Page-aligned data region size */ -void k_mem_page_in(void *addr, size_t size); +void z_mem_page_in(void *addr, size_t size); /** * Pin an aligned virtual data region, paging in as necessary @@ -258,7 +258,7 @@ void k_mem_page_in(void *addr, size_t size); * @param addr Base page-aligned virtual address * @param size Page-aligned data region size */ -void k_mem_pin(void *addr, size_t size); +void z_mem_pin(void *addr, size_t size); /** * Un-pin an aligned virtual data region @@ -270,7 +270,7 @@ void k_mem_pin(void *addr, size_t size); * @param addr Base page-aligned virtual address * @param size Page-aligned data region size */ -void k_mem_unpin(void *addr, size_t size); +void z_mem_unpin(void *addr, size_t size); #endif /* CONFIG_DEMAND_PAGING */ #ifdef __cplusplus diff --git a/kernel/mmu.c b/kernel/mmu.c index 0c8c61fb719..404203e260c 100644 --- a/kernel/mmu.c +++ b/kernel/mmu.c @@ -713,7 +713,7 @@ out: return ret; } -int k_mem_page_out(void *addr, size_t size) +int z_mem_page_out(void *addr, size_t size) { __ASSERT(page_frames_initialized, "%s called on %p too early", __func__, addr); @@ -920,7 +920,7 @@ static void do_page_in(void *addr) (void)ret; } -void k_mem_page_in(void *addr, size_t size) +void z_mem_page_in(void *addr, size_t size) { __ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(), "%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled", @@ -937,7 +937,7 @@ static void do_mem_pin(void *addr) (void)ret; } -void k_mem_pin(void *addr, size_t size) +void z_mem_pin(void *addr, size_t size) { __ASSERT(!IS_ENABLED(CONFIG_DEMAND_PAGING_ALLOW_IRQ) || !k_is_in_isr(), "%s may not be called in ISRs if CONFIG_DEMAND_PAGING_ALLOW_IRQ is enabled", @@ -990,7 +990,7 @@ static void do_mem_unpin(void *addr) irq_unlock(key); } -void k_mem_unpin(void *addr, size_t size) +void z_mem_unpin(void *addr, size_t size) { __ASSERT(page_frames_initialized, "%s called on %p too early", __func__, addr); diff --git a/tests/kernel/mem_protect/demand_paging/src/main.c b/tests/kernel/mem_protect/demand_paging/src/main.c index 432b781ca92..547eff3452a 100644 --- a/tests/kernel/mem_protect/demand_paging/src/main.c +++ b/tests/kernel/mem_protect/demand_paging/src/main.c @@ -86,7 +86,7 @@ void test_touch_anon_pages(void) printk("Kernel handled %lu page faults\n", faults); } -void test_k_mem_page_out(void) +void test_z_mem_page_out(void) { unsigned long faults; int key, ret; @@ -96,8 +96,8 @@ void test_k_mem_page_out(void) */ key = irq_lock(); faults = z_num_pagefaults_get(); - ret = k_mem_page_out(arena, HALF_BYTES); - zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret); + ret = z_mem_page_out(arena, HALF_BYTES); + zassert_equal(ret, 0, "z_mem_page_out failed with %d", ret); /* Write to the supposedly evicted region */ for (size_t i = 0; i < HALF_BYTES; i++) { @@ -110,12 +110,12 @@ void test_k_mem_page_out(void) "unexpected num pagefaults expected %lu got %d", HALF_PAGES, faults); - ret = k_mem_page_out(arena, arena_size); - zassert_equal(ret, -ENOMEM, "k_mem_page_out should have failed"); + ret = z_mem_page_out(arena, arena_size); + zassert_equal(ret, -ENOMEM, "z_mem_page_out should have failed"); } -void test_k_mem_page_in(void) +void test_z_mem_page_in(void) { unsigned long faults; int key, ret; @@ -125,10 +125,10 @@ void test_k_mem_page_in(void) */ key = irq_lock(); - ret = k_mem_page_out(arena, HALF_BYTES); - zassert_equal(ret, 0, "k_mem_page_out failed with %d", ret); + ret = z_mem_page_out(arena, HALF_BYTES); + zassert_equal(ret, 0, "z_mem_page_out failed with %d", ret); - k_mem_page_in(arena, HALF_BYTES); + z_mem_page_in(arena, HALF_BYTES); faults = z_num_pagefaults_get(); /* Write to the supposedly evicted region */ @@ -142,12 +142,12 @@ void test_k_mem_page_in(void) faults); } -void test_k_mem_pin(void) +void test_z_mem_pin(void) { unsigned long faults; int key; - k_mem_pin(arena, HALF_BYTES); + z_mem_pin(arena, HALF_BYTES); /* Write to the rest of the arena */ for (size_t i = HALF_BYTES; i < arena_size; i++) { @@ -167,19 +167,19 @@ void test_k_mem_pin(void) faults); /* Clean up */ - k_mem_unpin(arena, HALF_BYTES); + z_mem_unpin(arena, HALF_BYTES); } -void test_k_mem_unpin(void) +void test_z_mem_unpin(void) { /* Pin the memory (which we know works from prior test) */ - k_mem_pin(arena, HALF_BYTES); + z_mem_pin(arena, HALF_BYTES); /* Now un-pin it */ - k_mem_unpin(arena, HALF_BYTES); + z_mem_unpin(arena, HALF_BYTES); /* repeat the page_out scenario, which should work */ - test_k_mem_page_out(); + test_z_mem_page_out(); } /* Show that even if we map enough anonymous memory to fill the backing @@ -223,10 +223,10 @@ void test_main(void) ztest_test_suite(test_demand_paging, ztest_unit_test(test_map_anon_pages), ztest_unit_test(test_touch_anon_pages), - ztest_unit_test(test_k_mem_page_out), - ztest_unit_test(test_k_mem_page_in), - ztest_unit_test(test_k_mem_pin), - ztest_unit_test(test_k_mem_unpin), + ztest_unit_test(test_z_mem_page_out), + ztest_unit_test(test_z_mem_page_in), + ztest_unit_test(test_z_mem_pin), + ztest_unit_test(test_z_mem_unpin), ztest_unit_test(test_backing_store_capacity)); ztest_run_test_suite(test_demand_paging); diff --git a/tests/kernel/mem_protect/mem_map/src/main.c b/tests/kernel/mem_protect/mem_map/src/main.c index 75881272304..d4d85178eca 100644 --- a/tests/kernel/mem_protect/mem_map/src/main.c +++ b/tests/kernel/mem_protect/mem_map/src/main.c @@ -214,7 +214,7 @@ void test_main(void) /* This test sets up multiple mappings of RAM pages, which is only * allowed for pinned memory */ - k_mem_pin(test_page, sizeof(test_page)); + z_mem_pin(test_page, sizeof(test_page)); #endif ztest_test_suite(test_mem_map, ztest_unit_test(test_z_phys_map_rw),