mmu: rename z_mem_map to z_phys_map

Renamed to make its semantics clearer; this function maps
*physical* memory addresses and is not equivalent to
posix mmap(), which might confuse people.

mem_map test case remains the same name as other memory
mapping scenarios will be added in the fullness of time.

Parameter names to z_phys_map adjusted slightly to be more
consistent with names used in other memory mapping functions.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-12-15 15:47:18 -08:00 committed by Anas Nashif
commit d2ad783a97
7 changed files with 36 additions and 39 deletions

View file

@ -35,7 +35,7 @@ static void find_rsdp(void)
}
if (zero_page_base == NULL) {
z_mem_map(&zero_page_base, 0, 4096, K_MEM_PERM_RW);
z_phys_map(&zero_page_base, 0, 4096, K_MEM_PERM_RW);
}
/* Physical (real mode!) address 0000:040e stores a (real

View file

@ -67,9 +67,9 @@ static bool map_msix_table_entries(pcie_bdf_t bdf,
return false;
}
z_mem_map((uint8_t **)&mapped_table,
bar.phys_addr + table_offset,
bar.size, K_MEM_PERM_RW);
z_phys_map((uint8_t **)&mapped_table,
bar.phys_addr + table_offset,
bar.size, K_MEM_PERM_RW);
for (i = 0; i < n_vector; i++) {
vectors[i].msix_vector = (struct msix_vector *)

View file

@ -88,8 +88,8 @@ static inline void device_map(mm_reg_t *virt_addr, uintptr_t phys_addr,
/* Pass along flags and add that we want supervisor mode
* read-write access.
*/
z_mem_map((uint8_t **)virt_addr, phys_addr, size,
flags | K_MEM_PERM_RW);
z_phys_map((uint8_t **)virt_addr, phys_addr, size,
flags | K_MEM_PERM_RW);
#else
ARG_UNUSED(size);
ARG_UNUSED(flags);

View file

@ -79,13 +79,13 @@ extern "C" {
* This API is part of infrastructure still under development and may
* change.
*
* @param linear_addr [out] Output linear address storage location
* @param phys_addr Physical address base of the memory region
* @param virt [out] Output virtual address storage location
* @param phys Physical address base of the memory region
* @param size Size of the memory region
* @param flags Caching mode and access flags, see K_MAP_* macros
*/
void z_mem_map(uint8_t **linear_addr, uintptr_t phys_addr, size_t size,
uint32_t flags);
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
uint32_t flags);
/**
* Given an arbitrary region, provide a aligned region that covers it

View file

@ -244,8 +244,6 @@ static inline bool arch_is_in_isr(void);
* This API is part of infrastructure still under development and may
* change.
*
* @see z_mem_map()
*
* @param dest Page-aligned Destination virtual address to map
* @param addr Page-aligned Source physical address to map
* @param size Page-aligned size of the mapped memory region in bytes

View file

@ -48,7 +48,7 @@ static struct k_spinlock mm_lock;
/* Current position for memory mappings in kernel memory.
* At the moment, all kernel memory mappings are permanent.
* z_mem_map() mappings start at the end of the address space, and grow
* Memory mappings start at the end of the address space, and grow
* downward.
*
* All of this is under heavy development and is subject to change.
@ -79,8 +79,7 @@ size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
return addr_offset;
}
void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size,
uint32_t flags)
void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
{
uintptr_t aligned_addr, addr_offset;
size_t aligned_size;
@ -89,7 +88,7 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size,
uint8_t *dest_virt;
addr_offset = k_mem_region_align(&aligned_addr, &aligned_size,
phys_addr, size,
phys, size,
CONFIG_MMU_PAGE_SIZE);
key = k_spin_lock(&mm_lock);
@ -120,7 +119,7 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size,
k_spin_unlock(&mm_lock, key);
if (ret == 0) {
*virt_addr = dest_virt + addr_offset;
*virt_ptr = dest_virt + addr_offset;
} else {
/* This happens if there is an insurmountable problem
* with the selected cache modes or access flags
@ -133,6 +132,6 @@ void z_mem_map(uint8_t **virt_addr, uintptr_t phys_addr, size_t size,
return;
fail:
LOG_ERR("memory mapping 0x%lx (size %zu, flags 0x%x) failed",
phys_addr, size, flags);
phys, size, flags);
k_panic();
}

View file

@ -32,7 +32,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
}
/* z_mem_map() doesn't have alignment requirements, any oddly-sized buffer
/* z_phys_map() doesn't have alignment requirements, any oddly-sized buffer
* can get mapped. This will span two pages.
*/
#define BUF_SIZE 5003
@ -43,7 +43,7 @@ void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf)
*
* @ingroup kernel_memprotect_tests
*/
void test_z_mem_map_rw(void)
void test_z_phys_map_rw(void)
{
uint8_t *mapped_rw, *mapped_ro;
uint8_t *buf = test_page + BUF_OFFSET;
@ -51,8 +51,8 @@ void test_z_mem_map_rw(void)
expect_fault = false;
/* Map in a page that allows writes */
z_mem_map(&mapped_rw, (uintptr_t)buf,
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
z_phys_map(&mapped_rw, (uintptr_t)buf,
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
/* Initialize buf with some bytes */
for (int i = 0; i < BUF_SIZE; i++) {
@ -60,8 +60,8 @@ void test_z_mem_map_rw(void)
}
/* Map again this time only allowing reads */
z_mem_map(&mapped_ro, (uintptr_t)buf,
BUF_SIZE, BASE_FLAGS);
z_phys_map(&mapped_ro, (uintptr_t)buf,
BUF_SIZE, BASE_FLAGS);
/* Check that the mapped area contains the expected data. */
for (int i = 0; i < BUF_SIZE; i++) {
@ -88,7 +88,7 @@ static void transplanted_function(bool *executed)
*
* @ingroup kernel_memprotect_tests
*/
void test_z_mem_map_exec(void)
void test_z_phys_map_exec(void)
{
uint8_t *mapped_rw, *mapped_exec, *mapped_ro;
bool executed = false;
@ -97,22 +97,22 @@ void test_z_mem_map_exec(void)
expect_fault = false;
/* Map with write permissions and copy the function into the page */
z_mem_map(&mapped_rw, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
z_phys_map(&mapped_rw, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_RW);
memcpy(mapped_rw, (void *)&transplanted_function, CONFIG_MMU_PAGE_SIZE);
/* Now map with execution enabled and try to run the copied fn */
z_mem_map(&mapped_exec, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_EXEC);
z_phys_map(&mapped_exec, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS | K_MEM_PERM_EXEC);
func = (void (*)(bool *executed))mapped_exec;
func(&executed);
zassert_true(executed, "function did not execute");
/* Now map without execution and execution should now fail */
z_mem_map(&mapped_ro, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS);
z_phys_map(&mapped_ro, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS);
func = (void (*)(bool *executed))mapped_ro;
expect_fault = true;
@ -122,7 +122,7 @@ void test_z_mem_map_exec(void)
ztest_test_fail();
}
#else
void test_z_mem_map_exec(void)
void test_z_phys_map_exec(void)
{
ztest_test_skip();
}
@ -133,18 +133,18 @@ void test_z_mem_map_exec(void)
*
* @ingroup kernel_memprotect_tests
*/
void test_z_mem_map_side_effect(void)
void test_z_phys_map_side_effect(void)
{
uint8_t *mapped;
expect_fault = false;
/* z_mem_map() is supposed to always create fresh mappings.
/* z_phys_map() is supposed to always create fresh mappings.
* Show that by mapping test_page to an RO region, we can still
* modify test_page.
*/
z_mem_map(&mapped, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS);
z_phys_map(&mapped, (uintptr_t)test_page,
sizeof(test_page), BASE_FLAGS);
/* Should NOT fault */
test_page[0] = 42;
@ -160,9 +160,9 @@ void test_z_mem_map_side_effect(void)
void test_main(void)
{
ztest_test_suite(test_mem_map,
ztest_unit_test(test_z_mem_map_rw),
ztest_unit_test(test_z_mem_map_exec),
ztest_unit_test(test_z_mem_map_side_effect)
ztest_unit_test(test_z_phys_map_rw),
ztest_unit_test(test_z_phys_map_exec),
ztest_unit_test(test_z_phys_map_side_effect)
);
ztest_run_test_suite(test_mem_map);
}