drivers: mm: add skeleton build files and common funcs

This adds skeleton Kconfig/CMakeLists.txt and common implementation
of some sys_mm_drv_*() functions.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2021-12-07 10:22:24 -08:00 committed by Anas Nashif
commit 11c3b1d379
7 changed files with 738 additions and 0 deletions

View file

@ -0,0 +1,5 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(mm_drv_common.c)

20
drivers/mm/Kconfig Normal file
View file

@ -0,0 +1,20 @@
# Copyright (c) 2021 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
menuconfig MM_DRV
bool "Memory Management Drivers [EXPERIMENTAL]"
select EXPERIMENTAL
select KERNEL_VM_SUPPORT
help
Include Memory Management drivers in system config
if MM_DRV
config MM_DRV_PAGE_SIZE
hex "Memory Page Size"
default 0x1000
help
Size of memory pages.
endif # MM_DRV

436
drivers/mm/mm_drv_common.c Normal file
View file

@ -0,0 +1,436 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Common Memory Management Driver Code
*
* This file provides common implementation of memory management driver
* functions, for example, sys_mm_drv_map_region() can use
* sys_mm_drv_map_page() to map page by page for the whole region.
* This avoids duplicate implementations of same functionality in
* different drivers. The implementations here are marked as
* weak functions so they can be overridden by the driver.
*/
#include <kernel.h>
#include <string.h>
#include <toolchain.h>
#include <sys/__assert.h>
#include <sys/check.h>
#include <sys/util.h>
#include <drivers/mm/system_mm.h>
#include "mm_drv_common.h"
struct k_spinlock sys_mm_drv_common_lock;
bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt)
{
size_t idx;
bool ret = true;
for (idx = 0; idx < cnt; idx++) {
if (!sys_mm_drv_is_addr_aligned(addr[idx])) {
ret = false;
break;
}
}
return ret;
}
bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size)
{
size_t offset;
bool ret = true;
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va = (uint8_t *)virt + offset;
if (sys_mm_drv_page_phys_get(va, NULL) != 0) {
ret = false;
break;
}
}
return ret;
}
bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size)
{
size_t offset;
bool ret = true;
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va = (uint8_t *)virt + offset;
if (sys_mm_drv_page_phys_get(va, NULL) != -EFAULT) {
ret = false;
break;
}
}
return ret;
}
int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
size_t size, uint32_t flags)
{
k_spinlock_key_t key;
int ret = 0;
size_t offset;
CHECKIF(!sys_mm_drv_is_addr_aligned(phys) ||
!sys_mm_drv_is_virt_addr_aligned(virt) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va = (uint8_t *)virt + offset;
uintptr_t pa = phys + offset;
int ret2 = sys_mm_drv_map_page(va, pa, flags);
if (ret2 != 0) {
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va);
ret = ret2;
}
}
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_map_region,
sys_mm_drv_map_region, int);
int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys,
size_t cnt, uint32_t flags)
{
k_spinlock_key_t key;
int ret = 0;
size_t idx, offset;
CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys, cnt) ||
!sys_mm_drv_is_virt_addr_aligned(virt)) {
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
offset = 0;
idx = 0;
while (idx < cnt) {
uint8_t *va = (uint8_t *)virt + offset;
int ret2 = sys_mm_drv_map_page(va, phys[idx], flags);
if (ret2 != 0) {
__ASSERT(false, "cannot map 0x%lx to %p\n", phys[idx], va);
ret = ret2;
}
offset += CONFIG_MM_DRV_PAGE_SIZE;
idx++;
}
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_map_array, sys_mm_drv_map_array, int);
int sys_mm_drv_simple_unmap_region(void *virt, size_t size)
{
k_spinlock_key_t key;
int ret = 0;
size_t offset;
CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va = (uint8_t *)virt + offset;
int ret2 = sys_mm_drv_unmap_page(va);
if (ret2 != 0) {
__ASSERT(false, "cannot unmap %p\n", va);
ret = ret2;
}
}
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_unmap_region,
sys_mm_drv_unmap_region, int);
int sys_mm_drv_simple_remap_region(void *virt_old, size_t size,
void *virt_new)
{
k_spinlock_key_t key;
size_t offset;
int ret = 0;
CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
(POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
ret = -EINVAL; /* overlaps */
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
!sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
ret = -EINVAL;
goto unlock_out;
}
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va_old = (uint8_t *)virt_old + offset;
uint8_t *va_new = (uint8_t *)virt_new + offset;
uintptr_t pa;
uint32_t flags;
int ret2;
bool to_map;
/*
* va_old is mapped as checked above, so no need
* to check for return value here.
*/
(void)sys_mm_drv_page_phys_get(va_old, &pa);
to_map = true;
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
if (ret2 != 0) {
__ASSERT(false, "cannot query page %p\n", va_old);
ret = ret2;
to_map = false;
}
ret2 = sys_mm_drv_unmap_page(va_old);
if (ret2 != 0) {
__ASSERT(false, "cannot unmap %p\n", va_old);
ret = ret2;
}
if (!to_map) {
/*
* Cannot retrieve flags of mapped virtual memory.
* Skip mapping this page as we don't want to map
* with unknown random flags.
*/
continue;
}
ret2 = sys_mm_drv_map_page(va_new, pa, flags);
if (ret2 != 0) {
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
ret = ret2;
}
}
unlock_out:
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_remap_region,
sys_mm_drv_remap_region, int);
int sys_mm_drv_simple_move_region(void *virt_old, size_t size,
void *virt_new, uintptr_t phys_new)
{
k_spinlock_key_t key;
size_t offset;
int ret = 0;
CHECKIF(!sys_mm_drv_is_addr_aligned(phys_new) ||
!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
(POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
ret = -EINVAL; /* overlaps */
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
!sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
ret = -EINVAL;
goto unlock_out;
}
for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
uint8_t *va_old = (uint8_t *)virt_old + offset;
uint8_t *va_new = (uint8_t *)virt_new + offset;
uintptr_t pa = phys_new + offset;
uint32_t flags;
int ret2;
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
if (ret2 != 0) {
__ASSERT(false, "cannot query page %p\n", va_old);
ret = ret2;
} else {
/*
* Only map the new page when we can retrieve
* flags of the old mapped page as We don't
* want to map with unknown random flags.
*/
ret2 = sys_mm_drv_map_page(va_new, pa, flags);
if (ret2 != 0) {
__ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
ret = ret2;
} else {
(void)memcpy(va_new, va_old,
CONFIG_MM_DRV_PAGE_SIZE);
}
}
ret2 = sys_mm_drv_unmap_page(va_old);
if (ret2 != 0) {
__ASSERT(false, "cannot unmap %p\n", va_old);
ret = ret2;
}
}
unlock_out:
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_move_region,
sys_mm_drv_move_region, int);
int sys_mm_drv_simple_move_array(void *virt_old, size_t size,
void *virt_new,
uintptr_t *phys_new, size_t phys_cnt)
{
k_spinlock_key_t key;
size_t idx, offset;
int ret = 0;
CHECKIF(!sys_mm_drv_is_addr_array_aligned(phys_new, phys_cnt) ||
!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
!sys_mm_drv_is_virt_addr_aligned(virt_new) ||
!sys_mm_drv_is_size_aligned(size)) {
ret = -EINVAL;
goto out;
}
if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
(POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
ret = -EINVAL; /* overlaps */
goto out;
}
key = k_spin_lock(&sys_mm_drv_common_lock);
if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
!sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
ret = -EINVAL;
goto unlock_out;
}
offset = 0;
idx = 0;
while (idx < phys_cnt) {
uint8_t *va_old = (uint8_t *)virt_old + offset;
uint8_t *va_new = (uint8_t *)virt_new + offset;
uint32_t flags;
int ret2;
ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
if (ret2 != 0) {
__ASSERT(false, "cannot query page %p\n", va_old);
ret = ret2;
} else {
/*
* Only map the new page when we can retrieve
* flags of the old mapped page as We don't
* want to map with unknown random flags.
*/
ret2 = sys_mm_drv_map_page(va_new, phys_new[idx], flags);
if (ret2 != 0) {
__ASSERT(false, "cannot map 0x%lx to %p\n",
phys_new[idx], va_new);
ret = ret2;
} else {
(void)memcpy(va_new, va_old,
CONFIG_MM_DRV_PAGE_SIZE);
}
}
ret2 = sys_mm_drv_unmap_page(va_old);
if (ret2 != 0) {
__ASSERT(false, "cannot unmap %p\n", va_old);
ret = ret2;
}
offset += CONFIG_MM_DRV_PAGE_SIZE;
idx++;
}
unlock_out:
k_spin_unlock(&sys_mm_drv_common_lock, key);
out:
return ret;
}
__weak FUNC_ALIAS(sys_mm_drv_simple_move_array,
sys_mm_drv_move_array, int);

273
drivers/mm/mm_drv_common.h Normal file
View file

@ -0,0 +1,273 @@
/*
* Copyright (c) 2021 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_
#define ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_
#include <kernel.h>
#include <toolchain.h>
#include <drivers/mm/system_mm.h>
extern struct k_spinlock sys_mm_drv_common_lock;
/**
* @brief Get the flags of mapped virtual address.
*
* The function queries the translation tables to find the flags of
* a mapped virtual address. This is used internally for remapping.
*
* Behavior when providing unaligned address is undefined, this
* is assumed to be page aligned.
*
* @param virt Page-aligned virtual address
* @param[out] flags flags of mapped virtual address
*
* @retval 0 if mapping is found and valid
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if virtual address is not mapped
*/
int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags);
/**
* @brief Test if address is page-aligned
*
* @param addr address to be tested
*
* @retval true if page-aligned
* @retval false if not page-aligned
*/
static inline bool sys_mm_drv_is_addr_aligned(uintptr_t addr)
{
return ((addr & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U);
}
/**
* @brief Test if address is page-aligned
*
* @param addr address to be tested
*
* @retval true if page-aligned
* @retval false if not page-aligned
*/
static inline bool sys_mm_drv_is_virt_addr_aligned(void *virt)
{
return sys_mm_drv_is_addr_aligned(POINTER_TO_UINT(virt));
}
/**
* @brief Test if size is page-aligned
*
* @param addr size to be tested
*
* @retval true if page-aligned
* @retval false if not page-aligned
*/
static inline bool sys_mm_drv_is_size_aligned(size_t size)
{
if ((size & (CONFIG_MM_DRV_PAGE_SIZE - 1)) == 0U) {
return true;
} else {
return false;
}
}
/**
* @brief Test if all physical addresses in array are page-aligned
*
* @param addr Array of physical addresses
* @param cnt Number of elements in the array
*
* @retval true if all are page-aligned
* @retval false if at least one is not page-aligned
*/
bool sys_mm_drv_is_addr_array_aligned(uintptr_t *addr, size_t cnt);
/**
* @brief Test if the virtual memory region is mapped
*
* @param virt Page-aligned base virtual address
* @param size Size of the virtual memory region
*
* @retval true if all pages in the region are mapped
* @retval false if at least one page is not mapped
*/
bool sys_mm_drv_is_virt_region_mapped(void *virt, size_t size);
/**
* @brief Test if the virtual memory region is unmapped
*
* @param virt Page-aligned base virtual address
* @param size Size of the virtual memory region
*
* @retval true if all pages in the region are unmapped
* @retval false if at least one page is mapped
*/
bool sys_mm_drv_is_virt_region_unmapped(void *virt, size_t size);
/**
* @brief Simple implementation of sys_mm_drv_map_region()
*
* This provides a simple implementation for sys_mm_drv_map_region()
* which is marked as a weak alias to sys_mm_drv_map_region().
*
* Drivers do not have to implement their own sys_mm_drv_map_region()
* if this works for them. Or they can override sys_mm_drv_map_region()
* and call sys_mm_drv_simple_map_region() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_map_region(), then
* this function will not be used.
*
* @see sys_mm_drv_map_region
*
* @param virt Page-aligned destination virtual address to map
* @param phys Page-aligned source physical address to map
* @param size Page-aligned size of the mapped memory region in bytes
* @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if any virtual addresses have already been mapped
*/
int sys_mm_drv_simple_map_region(void *virt, uintptr_t phys,
size_t size, uint32_t flags);
/**
* @brief Simple implementation of sys_mm_drv_map_array()
*
* This provides a simple implementation for sys_mm_drv_map_array()
* which is marked as a weak alias to sys_mm_drv_map_array().
*
* Drivers do not have to implement their own sys_mm_drv_map_array()
* if this works for them. Or they can override sys_mm_drv_map_array()
* and call sys_mm_drv_simple_map_array() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_map_array(), then
* this function will not be used.
*
* @see sys_mm_drv_map_array
*
* @param virt Page-aligned destination virtual address to map
* @param phys Array of pge-aligned source physical address to map
* @param cnt Number of elements in the physical page array
* @param flags Caching, access and control flags, see SYS_MM_MEM_* macros
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if any virtual addresses have already been mapped
*/
int sys_mm_drv_simple_map_array(void *virt, uintptr_t *phys,
size_t cnt, uint32_t flags);
/**
* @brief Simple implementation of sys_mm_drv_unmap_region()
*
* This provides a simple implementation for sys_mm_drv_unmap_region()
* which is marked as a weak alias to sys_mm_drv_unmap_region().
*
* Drivers do not have to implement their own sys_mm_drv_unmap_region()
* if this works for them. Or they can override sys_mm_drv_unmap_region()
* and call sys_mm_drv_simple_unmap_region() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_unmap_region(), then
* this function will not be used.
*
* @see sys_mm_drv_unmap_region
*
* @param virt Page-aligned base virtual address to un-map
* @param size Page-aligned region size
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if virtual addresses have already been mapped
*/
int sys_mm_drv_simple_unmap_region(void *virt, size_t size);
/**
* @brief Simple implementation of sys_mm_drv_remap_region()
*
* This provides a simple implementation for sys_mm_drv_remap_region()
* which is marked as a weak alias to sys_mm_drv_remap_region().
*
* Drivers do not have to implement their own sys_mm_drv_remap_region()
* if this works for them. Or they can override sys_mm_drv_remap_region()
* and call sys_mm_drv_simple_remap_region() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_remap_region(), then
* this function will not be used.
*
* @see sys_mm_drv_remap_region
*
* @param virt_old Page-aligned base virtual address of existing memory
* @param size Page-aligned size of the mapped memory region in bytes
* @param virt_new Page-aligned base virtual address to which to remap
* the memory
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if old virtual addresses are not all mapped or
* new virtual addresses are not all unmapped
*/
int sys_mm_drv_simple_remap_region(void *virt_old, size_t size,
void *virt_new);
/**
* @brief Simple implementation of sys_mm_drv_move_region()
*
* This provides a simple implementation for sys_mm_drv_move_region()
* which is marked as a weak alias to sys_mm_drv_move_region().
*
* Drivers do not have to implement their own sys_mm_drv_move_region()
* if this works for them. Or they can override sys_mm_drv_move_region()
* and call sys_mm_drv_simple_move_region() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_move_region(), then
* this function will not be used.
*
* @see sys_mm_drv_move_region
*
* @param virt_old Page-aligned base virtual address of existing memory
* @param size Page-aligned size of the mapped memory region in bytes
* @param virt_new Page-aligned base virtual address to which to map
* new physical pages
* @param phys_new Page-aligned base physical address to contain
* the moved memory
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if old virtual addresses are not all mapped or
* new virtual addresses are not all unmapped
*/
int sys_mm_drv_simple_move_region(void *virt_old, size_t size,
void *virt_new, uintptr_t phys_new);
/**
* @brief Simple implementation of sys_mm_drv_move_array()
*
* This provides a simple implementation for sys_mm_drv_move_array()
* which is marked as a weak alias to sys_mm_drv_move_array().
*
* Drivers do not have to implement their own sys_mm_drv_move_array()
* if this works for them. Or they can override sys_mm_drv_move_array()
* and call sys_mm_drv_simple_move_array() with some pre-processing done.
* Or the drivers can implement their own sys_mm_drv_move_array(), then
* this function will not be used.
*
* @see sys_mm_drv_move_array
*
* @param virt_old Page-aligned base virtual address of existing memory
* @param size Page-aligned size of the mapped memory region in bytes
* @param virt_new Page-aligned base virtual address to which to map
* new physical pages
* @param phys_new Array of page-aligned physical address to contain
* the moved memory
* @param phys_cnt Number of elements in the physical page array
*
* @retval 0 if successful
* @retval -EINVAL if invalid arguments are provided
* @retval -EFAULT if old virtual addresses are not all mapped or
* new virtual addresses are not all unmapped
*/
int sys_mm_drv_simple_move_array(void *virt_old, size_t size,
void *virt_new,
uintptr_t *phys_new, size_t phys_cnt);
#endif /* ZEPHYR_DRIVERS_SYSTEM_MM_DRV_COMMON_H_ */