x86: modify MMU APIs for multiple page tables

Current set of APIs and macros assumed that only one set
of page tables would ever be in use.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-02-06 16:51:38 -08:00 committed by Anas Nashif
commit f093285345
10 changed files with 97 additions and 82 deletions

View file

@ -348,7 +348,7 @@ __csSet:
#ifdef CONFIG_X86_MMU
/* load the page directory address into the registers*/
movl $__mmu_tables_start, %eax
movl $z_x86_kernel_pdpt, %eax
movl %eax, %cr3
/* Enable PAE */

View file

@ -301,11 +301,11 @@ static void dump_entry_flags(x86_page_entry_data_t flags)
"Execute Disable" : "Execute Enabled");
}
static void dump_mmu_flags(void *addr)
static void dump_mmu_flags(struct x86_mmu_pdpt *pdpt, void *addr)
{
x86_page_entry_data_t pde_flags, pte_flags;
_x86_mmu_get_flags(addr, &pde_flags, &pte_flags);
_x86_mmu_get_flags(pdpt, addr, &pde_flags, &pte_flags);
printk("PDE: ");
dump_entry_flags(pde_flags);
@ -331,7 +331,7 @@ static void dump_page_fault(NANO_ESF *esf)
cr2);
#ifdef CONFIG_X86_MMU
dump_mmu_flags((void *)cr2);
dump_mmu_flags(&z_x86_kernel_pdpt, (void *)cr2);
#endif
}
#endif /* CONFIG_EXCEPTION_DEBUG */
@ -390,7 +390,7 @@ struct task_state_segment _df_tss = {
.es = DATA_SEG,
.ss = DATA_SEG,
.eip = (u32_t)_df_handler_top,
.cr3 = (u32_t)X86_MMU_PDPT
.cr3 = (u32_t)&z_x86_kernel_pdpt
};
static FUNC_NORETURN __used void _df_handler_bottom(void)
@ -407,7 +407,8 @@ static FUNC_NORETURN __used void _df_handler_bottom(void)
* one byte, since if a single push operation caused the fault ESP
* wouldn't be decremented
*/
_x86_mmu_get_flags((u8_t *)_df_esf.esp - 1, &pde_flags, &pte_flags);
_x86_mmu_get_flags(&z_x86_kernel_pdpt,
(u8_t *)_df_esf.esp - 1, &pde_flags, &pte_flags);
if ((pte_flags & MMU_ENTRY_PRESENT) != 0) {
printk("***** Double Fault *****\n");
reason = _NANO_ERR_CPU_EXCEPTION;
@ -445,7 +446,7 @@ static FUNC_NORETURN __used void _df_handler_top(void)
_main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG;
_main_tss.eip = (u32_t)_df_handler_bottom;
_main_tss.cr3 = (u32_t)X86_MMU_PDPT;
_main_tss.cr3 = (u32_t)&z_x86_kernel_pdpt;
_main_tss.eflags = 0;
/* NT bit is set in EFLAGS so we will task switch back to _main_tss

View file

@ -75,15 +75,16 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t *stack,
if ((options & K_USER) == 0) {
/* Running in kernel mode, kernel stack region is also a guard
* page */
_x86_mmu_set_flags((void *)(stack_buf - MMU_PAGE_SIZE),
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)(stack_buf - MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
MMU_PTE_P_MASK);
}
#endif /* CONFIG_X86_USERSPACE */
#if CONFIG_X86_STACK_PROTECTION
_x86_mmu_set_flags(stack, MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
MMU_PTE_P_MASK);
_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack, MMU_PAGE_SIZE,
MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
#endif
stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
@ -131,13 +132,15 @@ void _x86_swap_update_page_tables(struct k_thread *incoming,
struct k_thread *outgoing)
{
/* Outgoing thread stack no longer accessible */
_x86_mmu_set_flags((void *)outgoing->stack_info.start,
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)outgoing->stack_info.start,
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_SUPERVISOR, MMU_PTE_US_MASK);
/* Userspace can now access the incoming thread's stack */
_x86_mmu_set_flags((void *)incoming->stack_info.start,
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)incoming->stack_info.start,
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_USER, MMU_PTE_US_MASK);
@ -173,7 +176,8 @@ FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
_current->stack_info.size);
/* Set up the kernel stack used during privilege elevation */
_x86_mmu_set_flags((void *)(_current->stack_info.start - MMU_PAGE_SIZE),
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)(_current->stack_info.start - MMU_PAGE_SIZE),
MMU_PAGE_SIZE,
(MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
MMU_ENTRY_SUPERVISOR),

View file

@ -41,16 +41,17 @@ MMU_BOOT_REGION((u32_t)&__kernel_ram_start, (u32_t)&__kernel_ram_size,
MMU_ENTRY_EXECUTE_DISABLE);
void _x86_mmu_get_flags(void *addr,
void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
x86_page_entry_data_t *pde_flags,
x86_page_entry_data_t *pte_flags)
{
*pde_flags = (x86_page_entry_data_t)(X86_MMU_GET_PDE(addr)->value &
~(x86_page_entry_data_t)MMU_PDE_PAGE_TABLE_MASK);
*pde_flags =
(x86_page_entry_data_t)(X86_MMU_GET_PDE(pdpt, addr)->value &
~(x86_page_entry_data_t)MMU_PDE_PAGE_TABLE_MASK);
if ((*pde_flags & MMU_ENTRY_PRESENT) != 0) {
*pte_flags = (x86_page_entry_data_t)
(X86_MMU_GET_PTE(addr)->value &
(X86_MMU_GET_PTE(pdpt, addr)->value &
~(x86_page_entry_data_t)MMU_PTE_PAGE_MASK);
} else {
*pte_flags = 0;
@ -88,7 +89,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
}
struct x86_mmu_pd *pd_address =
X86_MMU_GET_PD_ADDR_INDEX(pdpte);
X86_MMU_GET_PD_ADDR_INDEX(&z_x86_kernel_pdpt, pdpte);
/* Iterate for all the pde's the buffer might take up.
* (depends on the size of the buffer and start address
@ -164,7 +165,7 @@ static inline void tlb_flush_page(void *addr)
}
void _x86_mmu_set_flags(void *ptr,
void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
@ -179,8 +180,8 @@ void _x86_mmu_set_flags(void *ptr,
while (size != 0) {
/* TODO we're not generating 2MB entries at the moment */
__ASSERT(X86_MMU_GET_PDE(addr)->ps != 1, "2MB PDE found");
pte = X86_MMU_GET_PTE(addr);
__ASSERT(X86_MMU_GET_PDE(pdpt, addr)->ps != 1, "2MB PDE found");
pte = X86_MMU_GET_PTE(pdpt, addr);
pte->value = (pte->value & ~mask) | flags;
tlb_flush_page((void *)addr);
@ -229,13 +230,15 @@ static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
partitions_count++;
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
/* Set the partition attributes */
_x86_mmu_set_flags((void *)partition.start,
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)partition.start,
partition.size,
partition.attr,
K_MEM_PARTITION_PERM_MASK);
} else {
/* Reset the pages to supervisor RW only */
_x86_mmu_set_flags((void *)partition.start,
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
@ -275,7 +278,7 @@ void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
partition = domain->partitions[partition_id];
_x86_mmu_set_flags((void *)partition.start,
_x86_mmu_set_flags(&z_x86_kernel_pdpt, (void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);

View file

@ -39,7 +39,7 @@ static inline void kernel_arch_init(void)
_kernel.irq_stack = K_THREAD_STACK_BUFFER(_interrupt_stack) +
CONFIG_ISR_STACK_SIZE;
#if CONFIG_X86_STACK_PROTECTION
_x86_mmu_set_flags(_interrupt_stack, MMU_PAGE_SIZE,
_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
MMU_ENTRY_NOT_PRESENT, MMU_PTE_P_MASK);
#endif
}

View file

@ -146,49 +146,48 @@
* Returns the page table entry for the addr
* use the union to extract page entry related information.
*/
#define X86_MMU_GET_PTE(addr)\
#define X86_MMU_GET_PTE(pdpt, addr)\
((union x86_mmu_pte *)\
(&X86_MMU_GET_PT_ADDR(addr)->entry[MMU_PAGE_NUM(addr)]))
(&X86_MMU_GET_PT_ADDR(pdpt, addr)->entry[MMU_PAGE_NUM(addr)]))
/*
* Returns the Page table address for the particular address.
* Page Table address(returned value) is always 4KBytes aligned.
*/
#define X86_MMU_GET_PT_ADDR(addr) \
#define X86_MMU_GET_PT_ADDR(pdpt, addr) \
((struct x86_mmu_pt *)\
(X86_MMU_GET_PDE(addr)->pt << MMU_PAGE_SHIFT))
(X86_MMU_GET_PDE(pdpt, addr)->pt << MMU_PAGE_SHIFT))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PDE(addr)\
#define X86_MMU_GET_PDE(pdpt, addr)\
((union x86_mmu_pde_pt *) \
(&X86_MMU_GET_PD_ADDR(addr)->entry[MMU_PDE_NUM(addr)]))
(&X86_MMU_GET_PD_ADDR(pdpt, addr)->entry[MMU_PDE_NUM(addr)]))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PD_ADDR(addr) \
#define X86_MMU_GET_PD_ADDR(pdpt, addr) \
((struct x86_mmu_pd *) \
(X86_MMU_GET_PDPTE(addr)->pd << MMU_PAGE_SHIFT))
(X86_MMU_GET_PDPTE(pdpt, addr)->pd << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry */
#define X86_MMU_GET_PDPTE(addr) \
((union x86_mmu_pdpte *) \
(&X86_MMU_PDPT->entry[MMU_PDPTE_NUM(addr)]))
#define X86_MMU_GET_PDPTE(pdpt, addr) \
(&((pdpt)->entry[MMU_PDPTE_NUM(addr)]))
/* Return the Page directory address.
* input is the entry number
*/
#define X86_MMU_GET_PD_ADDR_INDEX(index) \
#define X86_MMU_GET_PD_ADDR_INDEX(pdpt, index) \
((struct x86_mmu_pd *) \
(X86_MMU_GET_PDPTE_INDEX(index)->pd << MMU_PAGE_SHIFT))
(X86_MMU_GET_PDPTE_INDEX(pdpt, index)->pd << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry.
* Input is the entry number
*/
#define X86_MMU_GET_PDPTE_INDEX(index) \
((union x86_mmu_pdpte *)(&X86_MMU_PDPT->entry[index]))
#define X86_MMU_GET_PDPTE_INDEX(pdpt, index) \
(&((pdpt)->entry[index]))
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | \

View file

@ -647,11 +647,8 @@ extern struct task_state_segment _main_tss;
extern const NANO_ESF _default_esf;
#ifdef CONFIG_X86_MMU
/* Linker variable. It is needed to access the start of the Page directory */
extern u64_t __mmu_tables_start;
#define X86_MMU_PDPT ((struct x86_mmu_pdpt *)\
(u32_t *)(void *)&__mmu_tables_start)
/* kernel's page table */
extern struct x86_mmu_pdpt z_x86_kernel_pdpt;
/**
* @brief Fetch page table flags for a particular page
@ -659,11 +656,12 @@ extern u64_t __mmu_tables_start;
* Given a memory address, return the flags for the containing page's
* PDE and PTE entries. Intended for debugging.
*
* @param pdpt Which page table to use
* @param addr Memory address to example
* @param pde_flags Output parameter for page directory entry flags
* @param pte_flags Output parameter for page table entry flags
*/
void _x86_mmu_get_flags(void *addr,
void _x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
x86_page_entry_data_t *pde_flags,
x86_page_entry_data_t *pte_flags);
@ -674,14 +672,14 @@ void _x86_mmu_get_flags(void *addr,
* Modify bits in the existing page tables for a particular memory
* range, which must be page-aligned
*
* @param pdpt Which page table to use
* @param ptr Starting memory address which must be page-aligned
* @param size Size of the region, must be page size multiple
* @param flags Value of bits to set in the page table entries
* @param mask Mask indicating which particular bits in the page table entries to
* modify
*/
void _x86_mmu_set_flags(void *ptr,
void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask);

View file

@ -373,6 +373,7 @@ SECTIONS
/* Page Tables are located here if MMU is enabled.*/
MMU_PAGE_ALIGN
__mmu_tables_start = .;
z_x86_kernel_pdpt = .;
KEEP(*(.mmu_data));
__mmu_tables_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)

View file

@ -45,7 +45,7 @@ static void starting_addr_range(u32_t start_addr_range)
for (addr_range = start_addr_range; addr_range <=
(start_addr_range + STARTING_ADDR_RANGE_LMT);
addr_range += 0x1000) {
value = X86_MMU_GET_PTE(addr_range);
value = X86_MMU_GET_PTE(&z_x86_kernel_pdpt, addr_range);
status &= check_param(value, REGION_PERM);
zassert_false((status == 0), "error at %d permissions %d\n",
addr_range, REGION_PERM);
@ -60,7 +60,7 @@ static void before_start_addr_range(u32_t start_addr_range)
for (addr_range = start_addr_range - 0x7000;
addr_range < (start_addr_range); addr_range += 0x1000) {
value = X86_MMU_GET_PTE(addr_range);
value = X86_MMU_GET_PTE(&z_x86_kernel_pdpt, addr_range);
status &= check_param_nonset_region(value, REGION_PERM);
zassert_false((status == 0), "error at %d permissions %d\n",
@ -76,7 +76,7 @@ static void ending_start_addr_range(u32_t start_addr_range)
for (addr_range = start_addr_range + ADDR_SIZE; addr_range <
(start_addr_range + ADDR_SIZE + 0x10000);
addr_range += 0x1000) {
value = X86_MMU_GET_PTE(addr_range);
value = X86_MMU_GET_PTE(&z_x86_kernel_pdpt, addr_range);
status &= check_param_nonset_region(value, REGION_PERM);
zassert_false((status == 0), "error at %d permissions %d\n",
addr_range, REGION_PERM);

View file

@ -27,10 +27,19 @@ void reset_flag(void);
void reset_multi_pte_page_flag(void);
void reset_multi_pde_flag(void);
#define PDPT &z_x86_kernel_pdpt
#define ADDR_PAGE_1 ((u8_t *)__bss_start + SKIP_SIZE * MMU_PAGE_SIZE)
#define ADDR_PAGE_2 ((u8_t *)__bss_start + (SKIP_SIZE + 1) * MMU_PAGE_SIZE)
#define PRESET_PAGE_1_VALUE (X86_MMU_GET_PTE(ADDR_PAGE_1)->p = 1)
#define PRESET_PAGE_2_VALUE (X86_MMU_GET_PTE(ADDR_PAGE_2)->p = 1)
#define PRESET_PAGE_1_VALUE (X86_MMU_GET_PTE(PDPT, ADDR_PAGE_1)->p = 1)
#define PRESET_PAGE_2_VALUE (X86_MMU_GET_PTE(PDPT, ADDR_PAGE_2)->p = 1)
static void set_flags(void *ptr, size_t size, x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
{
_x86_mmu_set_flags(PDPT, ptr, size, flags, mask);
}
/* if Failure occurs
@ -45,7 +54,7 @@ static int buffer_rw_read(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
@ -66,7 +75,7 @@ static int buffer_writeable_write(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
@ -86,7 +95,7 @@ static int buffer_readable_read(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
@ -106,7 +115,7 @@ static int buffer_readable_write(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
@ -128,7 +137,7 @@ static int buffer_supervisor_rw(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -149,7 +158,7 @@ static int buffer_supervisor_w(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -170,7 +179,7 @@ static int buffer_user_rw_user(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -189,7 +198,7 @@ static int buffer_user_rw_supervisor(void)
{
PRESET_PAGE_1_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -210,12 +219,12 @@ static int multi_page_buffer_user(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -236,12 +245,12 @@ static int multi_page_buffer_write_user(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -262,12 +271,12 @@ static int multi_page_buffer_read_user(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -288,12 +297,12 @@ static int multi_page_buffer_read(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -314,12 +323,12 @@ static int multi_pde_buffer_rw(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
@ -341,12 +350,12 @@ static int multi_pde_buffer_writeable_write(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
@ -367,12 +376,12 @@ static int multi_pde_buffer_readable_read(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
@ -393,12 +402,12 @@ static int multi_pde_buffer_readable_write(void)
PRESET_PAGE_1_VALUE;
PRESET_PAGE_2_VALUE;
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
@ -417,7 +426,7 @@ static int multi_pde_buffer_readable_write(void)
void reset_flag(void)
{
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -425,12 +434,12 @@ void reset_flag(void)
void reset_multi_pte_page_flag(void)
{
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
@ -438,12 +447,12 @@ void reset_multi_pte_page_flag(void)
void reset_multi_pde_flag(void)
{
_x86_mmu_set_flags(ADDR_PAGE_1,
set_flags(ADDR_PAGE_1,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PDE_RW_MASK | MMU_PDE_US_MASK);
_x86_mmu_set_flags(ADDR_PAGE_2,
set_flags(ADDR_PAGE_2,
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PDE_RW_MASK | MMU_PDE_US_MASK);