mmu: arch_mem_map() may no longer fail
Pre-allocation of paging structures is now required, such that no allocations are ever needed when mapping memory. Instantiation of new memory domains may still require allocations unless a common page table is used. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
64f05d443a
commit
db56722729
3 changed files with 14 additions and 23 deletions
|
@ -1084,7 +1084,7 @@ static pentry_t flags_to_entry(uint32_t flags)
|
||||||
case K_MEM_CACHE_WB:
|
case K_MEM_CACHE_WB:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -ENOTSUP;
|
__ASSERT(false, "bad memory mapping flags 0x%x", flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((flags & K_MEM_PERM_RW) != 0U) {
|
if ((flags & K_MEM_PERM_RW) != 0U) {
|
||||||
|
@ -1103,12 +1103,10 @@ static pentry_t flags_to_entry(uint32_t flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* map new region virt..virt+size to phys with provided arch-neutral flags */
|
/* map new region virt..virt+size to phys with provided arch-neutral flags */
|
||||||
int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
range_map_unlocked(virt, phys, size, flags_to_entry(flags),
|
range_map_unlocked(virt, phys, size, flags_to_entry(flags),
|
||||||
MASK_ALL, 0);
|
MASK_ALL, 0);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if CONFIG_X86_STACK_PROTECTION
|
#if CONFIG_X86_STACK_PROTECTION
|
||||||
|
|
|
@ -241,6 +241,13 @@ static inline bool arch_is_in_isr(void);
|
||||||
* to this API are assumed to be serialized, and indeed all usage will
|
* to this API are assumed to be serialized, and indeed all usage will
|
||||||
* originate from kernel/mm.c which handles virtual memory management.
|
* originate from kernel/mm.c which handles virtual memory management.
|
||||||
*
|
*
|
||||||
|
* Architectures are expected to pre-allocate page tables for the entire
|
||||||
|
* address space, as defined by CONFIG_KERNEL_VM_BASE and
|
||||||
|
* CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
|
||||||
|
* allocation for paging structures.
|
||||||
|
*
|
||||||
|
* Validation of arguments should be done via assertions.
|
||||||
|
*
|
||||||
* This API is part of infrastructure still under development and may
|
* This API is part of infrastructure still under development and may
|
||||||
* change.
|
* change.
|
||||||
*
|
*
|
||||||
|
@ -248,12 +255,8 @@ static inline bool arch_is_in_isr(void);
|
||||||
* @param addr Page-aligned Source physical address to map
|
* @param addr Page-aligned Source physical address to map
|
||||||
* @param size Page-aligned size of the mapped memory region in bytes
|
* @param size Page-aligned size of the mapped memory region in bytes
|
||||||
* @param flags Caching, access and control flags, see K_MAP_* macros
|
* @param flags Caching, access and control flags, see K_MAP_* macros
|
||||||
* @retval 0 Success
|
|
||||||
* @retval -ENOTSUP Unsupported cache mode with no suitable fallback, or
|
|
||||||
* unsupported flags
|
|
||||||
* @retval -ENOMEM Memory for additional paging structures unavailable
|
|
||||||
*/
|
*/
|
||||||
int arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags);
|
void arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Remove mappings for a provided virtual address range
|
* Remove mappings for a provided virtual address range
|
||||||
|
|
18
kernel/mmu.c
18
kernel/mmu.c
|
@ -276,7 +276,6 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
|
||||||
*/
|
*/
|
||||||
static int map_anon_page(void *addr, uint32_t flags)
|
static int map_anon_page(void *addr, uint32_t flags)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
struct z_page_frame *pf;
|
struct z_page_frame *pf;
|
||||||
uintptr_t phys;
|
uintptr_t phys;
|
||||||
bool lock = (flags & K_MEM_MAP_LOCK) != 0;
|
bool lock = (flags & K_MEM_MAP_LOCK) != 0;
|
||||||
|
@ -285,14 +284,10 @@ static int map_anon_page(void *addr, uint32_t flags)
|
||||||
if (pf == NULL) {
|
if (pf == NULL) {
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
phys = z_page_frame_to_phys(pf);
|
|
||||||
|
|
||||||
ret = arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE,
|
phys = z_page_frame_to_phys(pf);
|
||||||
flags | K_MEM_CACHE_WB);
|
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
|
||||||
if (ret != 0) {
|
|
||||||
free_page_frame_list_put(pf);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
if (lock) {
|
if (lock) {
|
||||||
pf->flags |= Z_PAGE_FRAME_PINNED;
|
pf->flags |= Z_PAGE_FRAME_PINNED;
|
||||||
}
|
}
|
||||||
|
@ -385,7 +380,6 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
uintptr_t aligned_phys, addr_offset;
|
uintptr_t aligned_phys, addr_offset;
|
||||||
size_t aligned_size;
|
size_t aligned_size;
|
||||||
int ret;
|
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
uint8_t *dest_addr;
|
uint8_t *dest_addr;
|
||||||
|
|
||||||
|
@ -413,11 +407,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
|
||||||
LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr,
|
LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr,
|
||||||
aligned_phys, aligned_size, flags, addr_offset);
|
aligned_phys, aligned_size, flags, addr_offset);
|
||||||
|
|
||||||
ret = arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
|
arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
|
||||||
if (ret != 0) {
|
|
||||||
LOG_ERR("arch_mem_map() failed with %d", ret);
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
k_spin_unlock(&z_mm_lock, key);
|
k_spin_unlock(&z_mm_lock, key);
|
||||||
|
|
||||||
*virt_ptr = dest_addr + addr_offset;
|
*virt_ptr = dest_addr + addr_offset;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue