mmu: pin the whole kernel
This will enable testing of the implementation until the critical set of pages is identified and known to the kernel. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
cd0a50d5c9
commit
a45486e1d5
1 changed files with 14 additions and 2 deletions
16
kernel/mmu.c
16
kernel/mmu.c
|
@ -527,8 +527,20 @@ void z_mem_manage_init(void)
|
|||
*/
|
||||
VIRT_FOREACH(Z_KERNEL_VIRT_START, Z_KERNEL_VIRT_SIZE, addr)
|
||||
{
|
||||
frame_mapped_set(z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr)),
|
||||
addr);
|
||||
pf = z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr));
|
||||
frame_mapped_set(pf, addr);
|
||||
|
||||
/* TODO: for now we pin the whole Zephyr image. Demand paging
|
||||
* currently tested with anonymously-mapped pages which are not
|
||||
* pinned.
|
||||
*
|
||||
* We will need to setup linker regions for a subset of kernel
|
||||
* code/data pages which are pinned in memory and
|
||||
* may not be evicted. This will contain critical CPU data
|
||||
* structures, and any code used to perform page fault
|
||||
* handling, page-ins, etc.
|
||||
*/
|
||||
pf->flags |= Z_PAGE_FRAME_PINNED;
|
||||
}
|
||||
|
||||
/* Any remaining pages that aren't mapped, reserved, or pinned get
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue