kernel: add CONFIG_ARCH_MAPS_ALL_RAM

Some arches like x86 need all memory mapped so that they can
fetch information placed arbitrarily by firmware, like ACPI
tables.

Ensure that if this is the case, the kernel won't accidentally
clobber it by thinking the relevant virtual memory is unused.
Otherwise this has no effect on page frame management.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2021-01-23 14:08:12 -08:00 committed by Anas Nashif
commit 14c5d1f1f7
5 changed files with 58 additions and 16 deletions

View file

@ -62,6 +62,7 @@ config X86
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_DEMAND_PAGING
select ARCH_MAPS_ALL_RAM
help
x86 architecture
@ -532,6 +533,26 @@ config ARCH_HAS_RESERVED_PAGE_FRAMES
memory mappings. The architecture will need to implement
arch_reserved_pages_update().
config ARCH_MAPS_ALL_RAM
bool
help
This hidden option is selected by the architecture to inform the kernel
that all RAM is mapped at boot, and not just the bounds of the Zephyr image.
If RAM starts at 0x0, the first page must remain un-mapped to catch NULL
pointer dereferences. With this enabled, the kernel will not assume that
virtual memory addresses past the kernel image are available for mappings,
but instead takes into account an entire RAM mapping instead.
This is typically set by architectures which need direct access to all memory.
It is the architecture's responsibility to mark reserved memory regions
as such in arch_reserved_pages_update().
Although the kernel will not disturb this RAM mapping by re-mapping the associated
virtual addresses elsewhere, this is limited to only management of the
virtual address space. The kernel's page frame ontology will not consider
this mapping at all; non-kernel pages will be considered free (unless marked
as reserved) and Z_PAGE_FRAME_MAPPED will not be set.
menuconfig MMU
bool "Enable MMU features"
depends on CPU_HAS_MMU

View file

@ -40,6 +40,22 @@
#define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end))
#define Z_KERNEL_VIRT_SIZE ((size_t)(&z_mapped_size))
#define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
CONFIG_SRAM_BASE_ADDRESS)
/* Only applies to boot RAM mappings within the Zephyr image that have never
* been remapped or paged out. Never use this unless you know exactly what you
* are doing.
*/
#define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + Z_VM_OFFSET))
#define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) - Z_VM_OFFSET))
#ifdef CONFIG_ARCH_MAPS_ALL_RAM
#define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
#else
#define Z_FREE_VM_START Z_KERNEL_VIRT_END
#endif
/*
* Macros and data structures for physical page frame accounting,
* APIs for use by eviction and backing store algorithms. This code

View file

@ -125,25 +125,32 @@ void z_page_frames_dump(void)
for (_pos = _base; \
_pos < ((uintptr_t)_base + _size); _pos += CONFIG_MMU_PAGE_SIZE)
/*
* Virtual address space management
*
* Call all of these functions with z_mm_lock held.
*
* Overall virtual memory map: When the kernel starts, it resides in
* virtual memory in the region Z_BOOT_KERNEL_VIRT_START to
* Z_BOOT_KERNEL_VIRT_END. Unused virtual memory past this, up to the limit
* virtual memory in the region Z_KERNEL_VIRT_START to
* Z_KERNEL_VIRT_END. Unused virtual memory past this, up to the limit
* noted by CONFIG_KERNEL_VM_SIZE may be used for runtime memory mappings.
*
* If CONFIG_ARCH_MAPS_ALL_RAM is set, we do not just map the kernel image,
* but have a mapping for all RAM in place. This is for special architectural
* purposes and does not otherwise affect page frame accounting or flags;
* the only guarantee is that such RAM mapping outside of the Zephyr image
* won't be disturbed by subsequent memory mapping calls.
*
* +--------------+ <- Z_VIRT_ADDR_START
* | Undefined VM | <- May contain ancillary regions like x86_64's locore
* +--------------+ <- Z_BOOT_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
* +--------------+ <- Z_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
* | Mapping for |
* | main kernel |
* | image |
* | |
* | |
* +--------------+ <- Z_BOOT_KERNEL_VIRT_END
* +--------------+ <- Z_FREE_VM_START
* | |
* | Unused, |
* | Available VM |
@ -175,7 +182,7 @@ static void *virt_region_get(size_t size)
{
uint8_t *dest_addr;
if ((mapping_pos - size) < Z_KERNEL_VIRT_END) {
if ((mapping_pos - size) < Z_FREE_VM_START) {
LOG_ERR("insufficient virtual address space (requested %zu)",
size);
return NULL;
@ -474,14 +481,6 @@ size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size,
return addr_offset;
}
#define VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
CONFIG_SRAM_BASE_ADDRESS)
/* Only applies to boot RAM mappings within the Zephyr image that have never
* been remapped or paged out. Never use this unless you know exactly what you
* are doing.
*/
#define BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + VM_OFFSET))
#ifdef CONFIG_USERSPACE
void z_kernel_map_fixup(void)
@ -500,7 +499,7 @@ void z_kernel_map_fixup(void)
if (kobject_size != 0) {
arch_mem_map(kobject_page_begin,
BOOT_VIRT_TO_PHYS(kobject_page_begin),
Z_BOOT_VIRT_TO_PHYS(kobject_page_begin),
kobject_size, K_MEM_PERM_RW | K_MEM_CACHE_WB);
}
}
@ -527,7 +526,7 @@ void z_mem_manage_init(void)
*/
VIRT_FOREACH(Z_KERNEL_VIRT_START, Z_KERNEL_VIRT_SIZE, addr)
{
pf = z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr));
pf = z_phys_to_page_frame(Z_BOOT_VIRT_TO_PHYS(addr));
frame_mapped_set(pf, addr);
/* TODO: for now we pin the whole Zephyr image. Demand paging

View file

@ -4,5 +4,10 @@ cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(syscalls)
target_include_directories(app PRIVATE
${ZEPHYR_BASE}/kernel/include
${ZEPHYR_BASE}/arch/${ARCH}/include
)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -9,6 +9,7 @@
#include <ztest.h>
#include <linker/linker-defs.h>
#include "test_syscalls.h"
#include <mmu.h>
#define BUF_SIZE 32
#define SLEEP_MS_LONG 15000
@ -18,7 +19,7 @@
#define FAULTY_ADDRESS 0x0FFFFFFF
#elif CONFIG_MMU
/* Just past the zephyr image mapping should be a non-present page */
#define FAULTY_ADDRESS ((uint8_t *)(&z_mapped_end))
#define FAULTY_ADDRESS Z_FREE_VM_START
#else
#define FAULTY_ADDRESS 0xFFFFFFF0
#endif