Revert "mmu: arch_mem_map() may no longer fail"

This reverts commit db56722729.

Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
Anas Nashif 2021-01-22 07:37:32 -05:00
commit a2ec139bf7
3 changed files with 22 additions and 13 deletions

View file

@ -1084,7 +1084,7 @@ static pentry_t flags_to_entry(uint32_t flags)
case K_MEM_CACHE_WB:
break;
default:
__ASSERT(false, "bad memory mapping flags 0x%x", flags);
return -ENOTSUP;
}
if ((flags & K_MEM_PERM_RW) != 0U) {
@ -1103,10 +1103,12 @@ static pentry_t flags_to_entry(uint32_t flags)
}
/* map new region virt..virt+size to phys with provided arch-neutral flags */
void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
{
range_map_unlocked(virt, phys, size, flags_to_entry(flags),
MASK_ALL, 0);
return 0;
}
#if CONFIG_X86_STACK_PROTECTION

View file

@ -241,13 +241,6 @@ static inline bool arch_is_in_isr(void);
* to this API are assumed to be serialized, and indeed all usage will
* originate from kernel/mm.c which handles virtual memory management.
*
* Architectures are expected to pre-allocate page tables for the entire
* address space, as defined by CONFIG_KERNEL_VM_BASE and
* CONFIG_KERNEL_VM_SIZE. This operation should never require any kind of
* allocation for paging structures.
*
* Validation of arguments should be done via assertions.
*
* This API is part of infrastructure still under development and may
* change.
*
@ -255,8 +248,12 @@ static inline bool arch_is_in_isr(void);
* @param addr Page-aligned Source physical address to map
* @param size Page-aligned size of the mapped memory region in bytes
* @param flags Caching, access and control flags, see K_MAP_* macros
* @retval 0 Success
* @retval -ENOTSUP Unsupported cache mode with no suitable fallback, or
* unsupported flags
* @retval -ENOMEM Memory for additional paging structures unavailable
*/
void arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags);
int arch_mem_map(void *dest, uintptr_t addr, size_t size, uint32_t flags);
/**
* Remove mappings for a provided virtual address range

View file

@ -276,6 +276,7 @@ static void frame_mapped_set(struct z_page_frame *pf, void *addr)
*/
static int map_anon_page(void *addr, uint32_t flags)
{
int ret;
struct z_page_frame *pf;
uintptr_t phys;
bool lock = (flags & K_MEM_MAP_LOCK) != 0;
@ -284,10 +285,14 @@ static int map_anon_page(void *addr, uint32_t flags)
if (pf == NULL) {
return -ENOMEM;
}
phys = z_page_frame_to_phys(pf);
arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE, flags | K_MEM_CACHE_WB);
ret = arch_mem_map(addr, phys, CONFIG_MMU_PAGE_SIZE,
flags | K_MEM_CACHE_WB);
if (ret != 0) {
free_page_frame_list_put(pf);
return -ENOMEM;
}
if (lock) {
pf->flags |= Z_PAGE_FRAME_PINNED;
}
@ -380,6 +385,7 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
{
uintptr_t aligned_phys, addr_offset;
size_t aligned_size;
int ret;
k_spinlock_key_t key;
uint8_t *dest_addr;
@ -407,7 +413,11 @@ void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size, uint32_t flags)
LOG_DBG("arch_mem_map(%p, 0x%lx, %zu, %x) offset %lu", dest_addr,
aligned_phys, aligned_size, flags, addr_offset);
arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
ret = arch_mem_map(dest_addr, aligned_phys, aligned_size, flags);
if (ret != 0) {
LOG_ERR("arch_mem_map() failed with %d", ret);
goto fail;
}
k_spin_unlock(&z_mm_lock, key);
*virt_ptr = dest_addr + addr_offset;