x86: use unused PTE bits when mapping memory
Page table management for x86 is being revised such that there will not in many cases be a pristine, master set of page tables. Instead, when mapping memory, use unused PTE bits to store the original RW, US, and XD settings when the mapping was made. This will allow memory domains to alter page tables while still being able to restore the original mapping permissions. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
bd76bdb8ff
commit
745dd6f931
2 changed files with 58 additions and 36 deletions
|
@ -21,6 +21,10 @@
|
|||
|
||||
LOG_MODULE_DECLARE(os);
|
||||
|
||||
#define ENTRY_RW (MMU_RW | MMU_IGNORED0)
|
||||
#define ENTRY_US (MMU_US | MMU_IGNORED1)
|
||||
#define ENTRY_XD (MMU_XD | MMU_IGNORED2)
|
||||
|
||||
/* "dummy" pagetables for the first-phase build. The real page tables
|
||||
* are produced by gen-mmu.py based on data read in zephyr-prebuilt.elf,
|
||||
* and this dummy array is discarded.
|
||||
|
@ -640,10 +644,44 @@ static int page_map_set(pentry_t *ptables, void *virt, pentry_t entry_val,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static pentry_t flags_to_entry(uint32_t flags)
|
||||
{
|
||||
pentry_t entry_flags = MMU_P;
|
||||
|
||||
/* Translate flags argument into HW-recognized entry flags.
|
||||
*
|
||||
* Support for PAT is not implemented yet. Many systems may have
|
||||
* BIOS-populated MTRR values such that these cache settings are
|
||||
* redundant.
|
||||
*/
|
||||
switch (flags & K_MEM_CACHE_MASK) {
|
||||
case K_MEM_CACHE_NONE:
|
||||
entry_flags |= MMU_PCD;
|
||||
break;
|
||||
case K_MEM_CACHE_WT:
|
||||
entry_flags |= MMU_PWT;
|
||||
break;
|
||||
case K_MEM_CACHE_WB:
|
||||
break;
|
||||
default:
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if ((flags & K_MEM_PERM_RW) != 0U) {
|
||||
entry_flags |= ENTRY_RW;
|
||||
}
|
||||
|
||||
if ((flags & K_MEM_PERM_EXEC) == 0U) {
|
||||
entry_flags |= ENTRY_XD;
|
||||
}
|
||||
|
||||
return entry_flags;
|
||||
}
|
||||
|
||||
/* map region virt..virt+size to phys with provided arch-neutral flags */
|
||||
int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
||||
{
|
||||
pentry_t entry_flags = MMU_P;
|
||||
pentry_t entry_flags;
|
||||
pentry_t *ptables;
|
||||
|
||||
LOG_DBG("%s: %p -> %p (%zu) flags 0x%x",
|
||||
|
@ -665,36 +703,14 @@ int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
|||
*/
|
||||
ptables = z_x86_kernel_ptables;
|
||||
|
||||
/* Translate flags argument into HW-recognized entry flags.
|
||||
*
|
||||
* Support for PAT is not implemented yet. Many systems may have
|
||||
* BIOS-populated MTRR values such that these cache settings are
|
||||
* redundant.
|
||||
*/
|
||||
switch (flags & K_MEM_CACHE_MASK) {
|
||||
case K_MEM_CACHE_NONE:
|
||||
entry_flags |= MMU_PCD;
|
||||
break;
|
||||
case K_MEM_CACHE_WT:
|
||||
entry_flags |= MMU_PWT;
|
||||
break;
|
||||
case K_MEM_CACHE_WB:
|
||||
break;
|
||||
default:
|
||||
return -ENOTSUP;
|
||||
}
|
||||
if ((flags & K_MEM_PERM_RW) != 0U) {
|
||||
entry_flags |= MMU_RW;
|
||||
}
|
||||
if ((flags & K_MEM_PERM_USER) != 0U) {
|
||||
/* TODO: user mode support
|
||||
* entry_flags |= MMU_US;
|
||||
*/
|
||||
return -ENOTSUP;
|
||||
}
|
||||
if ((flags & K_MEM_PERM_EXEC) == 0U) {
|
||||
entry_flags |= MMU_XD;
|
||||
}
|
||||
|
||||
entry_flags = flags_to_entry(flags);
|
||||
|
||||
for (size_t offset = 0; offset < size; offset += CONFIG_MMU_PAGE_SIZE) {
|
||||
int ret;
|
||||
|
@ -726,7 +742,7 @@ int arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
|
|||
*/
|
||||
static void stack_guard_set(void *guard_page)
|
||||
{
|
||||
pentry_t pte = ((uintptr_t)guard_page) | MMU_P | MMU_XD;
|
||||
pentry_t pte = ((uintptr_t)guard_page) | MMU_P | ENTRY_XD;
|
||||
int ret;
|
||||
|
||||
assert_virt_addr_aligned(guard_page);
|
||||
|
|
|
@ -76,6 +76,10 @@ FLAG_IGNORED0 = bit(9)
|
|||
FLAG_IGNORED1 = bit(10)
|
||||
FLAG_IGNORED2 = bit(11)
|
||||
|
||||
ENTRY_RW = FLAG_RW | FLAG_IGNORED0
|
||||
ENTRY_US = FLAG_US | FLAG_IGNORED1
|
||||
ENTRY_XD = FLAG_XD | FLAG_IGNORED2
|
||||
|
||||
def debug(text):
|
||||
if not args.verbose:
|
||||
return
|
||||
|
@ -249,14 +253,16 @@ class Pt(MMUTable):
|
|||
addr_mask = 0xFFFFF000
|
||||
type_code = 'I'
|
||||
num_entries = 1024
|
||||
supported_flags = FLAG_P | FLAG_RW | FLAG_US | FLAG_G
|
||||
supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G |
|
||||
FLAG_IGNORED0 | FLAG_IGNORED1)
|
||||
|
||||
class PtXd(Pt):
|
||||
"""Page table for either PAE or IA-32e"""
|
||||
addr_mask = 0x07FFFFFFFFFFF000
|
||||
type_code = 'Q'
|
||||
num_entries = 512
|
||||
supported_flags = FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_XD
|
||||
supported_flags = (FLAG_P | FLAG_RW | FLAG_US | FLAG_G | FLAG_XD |
|
||||
FLAG_IGNORED0 | FLAG_IGNORED1 | FLAG_IGNORED2)
|
||||
|
||||
|
||||
class PtableSet(object):
|
||||
|
@ -441,12 +447,12 @@ def main():
|
|||
if is_perm_regions:
|
||||
# Don't allow execution by default for any pages. We'll adjust this
|
||||
# in later calls to pt.set_region_perms()
|
||||
map_flags = FLAG_P | FLAG_XD
|
||||
map_flags = FLAG_P | ENTRY_XD
|
||||
else:
|
||||
map_flags = FLAG_P
|
||||
|
||||
pt = pclass(ptables_phys)
|
||||
pt.map(ram_base, ram_size, map_flags | FLAG_RW)
|
||||
pt.map(ram_base, ram_size, map_flags | ENTRY_RW)
|
||||
|
||||
if isdef("CONFIG_XIP"):
|
||||
# Additionally identity-map all ROM as read-only
|
||||
|
@ -462,16 +468,16 @@ def main():
|
|||
# - User mode needs access as we currently do not separate application
|
||||
# text/rodata from kernel text/rodata
|
||||
if isdef("CONFIG_GDBSTUB"):
|
||||
pt.set_region_perms("_image_text", FLAG_P | FLAG_US | FLAG_RW)
|
||||
pt.set_region_perms("_image_text", FLAG_P | ENTRY_US | ENTRY_RW)
|
||||
else:
|
||||
pt.set_region_perms("_image_text", FLAG_P | FLAG_US)
|
||||
pt.set_region_perms("_image_rodata", FLAG_P | FLAG_US | FLAG_XD)
|
||||
pt.set_region_perms("_image_text", FLAG_P | ENTRY_US)
|
||||
pt.set_region_perms("_image_rodata", FLAG_P | ENTRY_US | ENTRY_XD)
|
||||
|
||||
if isdef("CONFIG_COVERAGE_GCOV") and isdef("CONFIG_USERSPACE"):
|
||||
# If GCOV is enabled, user mode must be able to write to its
|
||||
# common data area
|
||||
pt.set_region_perms("__gcov_bss",
|
||||
FLAG_P | FLAG_RW | FLAG_US | FLAG_XD)
|
||||
FLAG_P | ENTRY_RW | ENTRY_US | ENTRY_XD)
|
||||
|
||||
if isdef("CONFIG_X86_64"):
|
||||
# Set appropriate permissions for locore areas much like we did
|
||||
|
@ -483,12 +489,12 @@ def main():
|
|||
# KPTI is turned on. There is no sensitive data in them, and
|
||||
# they contain text/data needed to take an exception or
|
||||
# interrupt.
|
||||
flag_user = FLAG_US
|
||||
flag_user = ENTRY_US
|
||||
else:
|
||||
flag_user = 0
|
||||
|
||||
pt.set_region_perms("_locore", FLAG_P | flag_user)
|
||||
pt.set_region_perms("_lorodata", FLAG_P | FLAG_XD | flag_user)
|
||||
pt.set_region_perms("_lorodata", FLAG_P | ENTRY_XD | flag_user)
|
||||
|
||||
pt.write_output(args.output)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue