x86: MMU: Create PAE page structures and unions.

Created structures and unions needed to enable the software to
access these tables.
Also updated the helper macros to ease the usage of the MMU page
tables.

JIRA: ZEP-2511

Signed-off-by: Adithya Baglody <adithya.nagaraj.baglody@intel.com>
This commit is contained in:
Adithya Baglody 2017-09-18 17:07:16 +05:30 committed by Andrew Boie
commit 725de70d86
5 changed files with 488 additions and 63 deletions

View file

@ -232,17 +232,33 @@ EXC_FUNC_NOCODE(IV_MACHINE_CHECK);
#define SGX BIT(15)
#ifdef CONFIG_X86_MMU
static void dump_entry_flags(u32_t flags)
static void dump_entry_flags(x86_page_entry_data_t flags)
{
printk("0x%03x %s %s %s\n", flags,
flags & MMU_ENTRY_PRESENT ? "Present" : "Non-present",
flags & MMU_ENTRY_WRITE ? "Writable" : "Read-only",
flags & MMU_ENTRY_USER ? "User" : "Supervisor");
#ifdef CONFIG_X86_PAE_MODE
printk("0x%x%x %s, %s, %s, %s\n", (u32_t)(flags>>32),
(u32_t)(flags),
flags & (x86_page_entry_data_t)MMU_ENTRY_PRESENT ?
"Present" : "Non-present",
flags & (x86_page_entry_data_t)MMU_ENTRY_WRITE ?
"Writable" : "Read-only",
flags & (x86_page_entry_data_t)MMU_ENTRY_USER ?
"User" : "Supervisor",
flags & (x86_page_entry_data_t)MMU_ENTRY_EXECUTE_DISABLE ?
"Execute Disable" : "Execute Enabled");
#else
printk("0x%03x %s, %s, %s\n", flags,
flags & (x86_page_entry_data_t)MMU_ENTRY_PRESENT ?
"Present" : "Non-present",
flags & (x86_page_entry_data_t)MMU_ENTRY_WRITE ?
"Writable" : "Read-only",
flags & (x86_page_entry_data_t)MMU_ENTRY_USER ?
"User" : "Supervisor");
#endif
}
static void dump_mmu_flags(void *addr)
{
u32_t pde_flags, pte_flags;
x86_page_entry_data_t pde_flags, pte_flags;
_x86_mmu_get_flags(addr, &pde_flags, &pte_flags);
@ -303,13 +319,17 @@ struct task_state_segment _df_tss = {
.es = DATA_SEG,
.ss = DATA_SEG,
.eip = (u32_t)_df_handler_top,
#ifdef CONFIG_X86_PAE_MODE
.cr3 = (u32_t)X86_MMU_PDPT
#else
.cr3 = (u32_t)X86_MMU_PD
#endif
};
static FUNC_NORETURN __used void _df_handler_bottom(void)
{
/* We're back in the main hardware task on the interrupt stack */
u32_t pte_flags, pde_flags;
x86_page_entry_data_t pte_flags, pde_flags;
int reason;
/* Restore the top half so it is runnable again */
@ -358,7 +378,11 @@ static FUNC_NORETURN __used void _df_handler_top(void)
_main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG;
_main_tss.eip = (u32_t)_df_handler_bottom;
#ifdef CONFIG_X86_PAE_MODE
_main_tss.cr3 = (u32_t)X86_MMU_PDPT;
#else
_main_tss.cr3 = (u32_t)X86_MMU_PD;
#endif
/* NT bit is set in EFLAGS so we will task switch back to _main_tss
* and run _df_handler_bottom

View file

@ -21,7 +21,7 @@ MMU_BOOT_REGION((u32_t)&_image_rom_start, (u32_t)&_image_rom_size,
#ifdef CONFIG_APPLICATION_MEMORY
/* User threads by default can read/write app-level memory. */
MMU_BOOT_REGION((u32_t)&__app_ram_start, (u32_t)&__app_ram_size,
MMU_ENTRY_WRITE | MMU_ENTRY_USER);
MMU_ENTRY_WRITE | MMU_ENTRY_USER | MMU_ENTRY_EXECUTE_DISABLE);
#endif
/* __kernel_ram_size includes all unused memory, which is used for heaps.
@ -29,15 +29,22 @@ MMU_BOOT_REGION((u32_t)&__app_ram_start, (u32_t)&__app_ram_size,
* automatically for stacks.
*/
MMU_BOOT_REGION((u32_t)&__kernel_ram_start, (u32_t)&__kernel_ram_size,
MMU_ENTRY_WRITE | MMU_ENTRY_RUNTIME_USER);
MMU_ENTRY_WRITE |
MMU_ENTRY_RUNTIME_USER |
MMU_ENTRY_EXECUTE_DISABLE);
void _x86_mmu_get_flags(void *addr, u32_t *pde_flags, u32_t *pte_flags)
void _x86_mmu_get_flags(void *addr,
x86_page_entry_data_t *pde_flags,
x86_page_entry_data_t *pte_flags)
{
*pde_flags = (x86_page_entry_data_t)(X86_MMU_GET_PDE(addr)->value &
~(x86_page_entry_data_t)MMU_PDE_PAGE_TABLE_MASK);
*pde_flags = X86_MMU_GET_PDE(addr)->value & ~MMU_PDE_PAGE_TABLE_MASK;
if (*pde_flags & MMU_ENTRY_PRESENT) {
*pte_flags = X86_MMU_GET_PTE(addr)->value & ~MMU_PTE_PAGE_MASK;
*pte_flags = (x86_page_entry_data_t)
(X86_MMU_GET_PTE(addr)->value &
~(x86_page_entry_data_t)MMU_PTE_PAGE_MASK);
} else {
*pte_flags = 0;
}
@ -50,76 +57,129 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
u32_t end_pde_num;
u32_t starting_pte_num;
u32_t ending_pte_num;
struct x86_mmu_page_table *pte_address;
u32_t pde;
u32_t pte;
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pte pte_value;
u32_t start_pdpte_num = MMU_PDPTE_NUM(addr);
u32_t end_pdpte_num = MMU_PDPTE_NUM((char *)addr + size - 1);
u32_t pdpte;
#else
union x86_mmu_pte pte_value;
#endif
struct x86_mmu_page_table *pte_address;
start_pde_num = MMU_PDE_NUM(addr);
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
starting_pte_num = MMU_PAGE_NUM((char *)addr);
/* Iterate for all the pde's the buffer might take up.
* (depends on the size of the buffer and start address of the buff)
*/
for (pde = start_pde_num; pde <= end_pde_num; pde++) {
union x86_mmu_pde_pt pde_value = X86_MMU_PD->entry[pde].pt;
if (!pde_value.p || !pde_value.us || (write && !pde_value.rw)) {
return -EPERM;
#ifdef CONFIG_X86_PAE_MODE
for (pdpte = start_pdpte_num; pdpte <= end_pdpte_num; pdpte++) {
if (pdpte != start_pdpte_num) {
start_pde_num = 0;
}
pte_address = X86_MMU_GET_PT_ADDR(addr);
/* loop over all the possible page tables for the required
* size. If the pde is not the last one then the last pte
* would be 1023. So each pde will be using all the
* page table entires except for the last pde.
* For the last pde, pte is calculated using the last
* memory address of the buffer.
*/
if (pde != end_pde_num) {
ending_pte_num = 1023;
if (pdpte != end_pdpte_num) {
end_pde_num = 0;
} else {
ending_pte_num = MMU_PAGE_NUM((char *)addr + size - 1);
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
}
/* For all the pde's appart from the starting pde, will have
* the start pte number as zero.
struct x86_mmu_page_directory *pd_address =
X86_MMU_GET_PD_ADDR_INDEX(pdpte);
#endif
/* Iterate for all the pde's the buffer might take up.
* (depends on the size of the buffer and start address
* of the buff)
*/
if (pde != start_pde_num) {
starting_pte_num = 0;
}
for (pde = start_pde_num; pde <= end_pde_num; pde++) {
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pde pde_value =
pd_address->entry[pde];
#else
union x86_mmu_pde_pt pde_value =
X86_MMU_PD->entry[pde].pt;
#endif
pte_value.value = 0xFFFFFFFF;
if (!pde_value.p ||
!pde_value.us ||
(write && !pde_value.rw)) {
return -EPERM;
}
/* Bitwise AND all the pte values. */
for (pte = starting_pte_num; pte <= ending_pte_num; pte++) {
pte_value.value &= pte_address->entry[pte].value;
}
pte_address = (struct x86_mmu_page_table *)
(pde_value.page_table << MMU_PAGE_SHIFT);
if (!pte_value.p || !pte_value.us || (write && !pte_value.rw)) {
return -EPERM;
/* loop over all the possible page tables for the
* required size. If the pde is not the last one
* then the last pte would be 1023. So each pde
* will be using all the page table entires except
* for the last pde. For the last pde, pte is
* calculated using the last memory address
* of the buffer.
*/
if (pde != end_pde_num) {
ending_pte_num = 1023;
} else {
ending_pte_num =
MMU_PAGE_NUM((char *)addr + size - 1);
}
/* For all the pde's appart from the starting pde,
* will have the start pte number as zero.
*/
if (pde != start_pde_num) {
starting_pte_num = 0;
}
pte_value.value = 0xFFFFFFFF;
/* Bitwise AND all the pte values.
* An optimization done to make sure a compare is
* done only once.
*/
for (pte = starting_pte_num;
pte <= ending_pte_num;
pte++) {
pte_value.value &=
pte_address->entry[pte].value;
}
if (!pte_value.p ||
!pte_value.us ||
(write && !pte_value.rw)) {
return -EPERM;
}
}
#ifdef CONFIG_X86_PAE_MODE
}
#endif
return 0;
}
static inline void tlb_flush_page(void *addr)
{
/* Invalidate TLB entries corresponding to the page containing the
* specified address
*/
char *page = (char *)addr;
__asm__ ("invlpg %0" :: "m" (*page));
}
void _x86_mmu_set_flags(void *ptr, size_t size, u32_t flags, u32_t mask)
void _x86_mmu_set_flags(void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
{
#ifdef CONFIG_X86_PAE_MODE
union x86_mmu_pae_pte *pte;
#else
union x86_mmu_pte *pte;
#endif
u32_t addr = (u32_t)ptr;
@ -128,9 +188,13 @@ void _x86_mmu_set_flags(void *ptr, size_t size, u32_t flags, u32_t mask)
while (size) {
#ifdef CONFIG_X86_PAE_MODE
/* TODO we're not generating 2MB entries at the moment */
__ASSERT(X86_MMU_GET_PDE(addr)->ps != 1, "2MB PDE found");
#else
/* TODO we're not generating 4MB entries at the moment */
__ASSERT(X86_MMU_GET_4MB_PDE(addr)->ps != 1, "4MB PDE found");
#endif
pte = X86_MMU_GET_PTE(addr);
pte->value = (pte->value & ~mask) | flags;

View file

@ -381,6 +381,8 @@
#define CR0_PAGING_ENABLE 0x80010000
/* Clear the 5th bit in CR4 */
#define CR4_PAE_DISABLE 0xFFFFFFEF
/* Set the 5th bit in CR4 */
#define CR4_PAE_ENABLE 0x00000020
#ifndef _ASMLANGUAGE

View file

@ -29,7 +29,21 @@
#define MMU_PDE_A_MASK 0x00000020
#define MMU_PDE_PS_MASK 0x00000080
#define MMU_PDE_IGNORED_MASK 0x00000F40
#define MMU_PDE_PAGE_TABLE_MASK 0xfffff000
#ifdef CONFIG_X86_PAE_MODE
#define MMU_PDE_XD_MASK 0x8000000000000000
#define MMU_PDE_PAGE_TABLE_MASK 0x00000000fffff000
#define MMU_PDE_NUM_SHIFT 21
#define MMU_PDE_NUM(v) (((u32_t)(v) >> MMU_PDE_NUM_SHIFT) & 0x1ff)
#define MMU_ENTRIES_PER_PGT 512
#define MMU_PDPTE_NUM_SHIFT 30
#define MMU_PDPTE_NUM(v) (((u32_t)(v) >> MMU_PDPTE_NUM_SHIFT) & 0x3)
#else
#define MMU_PDE_PAGE_TABLE_MASK 0xfffff000
#define MMU_PDE_NUM_SHIFT 22
#define MMU_PDE_NUM(v) ((u32_t)(v) >> MMU_PDE_NUM_SHIFT)
#define MMU_ENTRIES_PER_PGT 1024
#endif
/*
* The following bitmasks correspond to the bit-fields in the
@ -53,11 +67,6 @@
#define MMU_4MB_PDE_CLEAR_PS 0x00000000
#define MMU_4MB_PDE_SET_PS 0x00000080
#define MMU_PDE_NUM_SHIFT 22
#define MMU_PDE_NUM(v) ((u32_t)(v) >> MMU_PDE_NUM_SHIFT)
#define MMU_PGT_NUM(v) MMU_PDE_NUM(v)
#define MMU_P4M_NUM(v) MMU_PDE_NUM(v)
#define MMU_ENTRIES_PER_PGT 1024
/*
* The following bitmasks correspond to the bit-fields in the
@ -75,11 +84,19 @@
#define MMU_PTE_G_MASK 0x00000100
#define MMU_PTE_ALLOC_MASK 0x00000200
#define MMU_PTE_CUSTOM_MASK 0x00000c00
#ifdef CONFIG_X86_PAE_MODE
#define MMU_PTE_XD_MASK 0x8000000000000000
#define MMU_PTE_PAGE_MASK 0x00000000fffff000
#define MMU_PTE_MASK_ALL 0xffffffffffffffff
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x1ff)
#else
#define MMU_PTE_PAGE_MASK 0xfffff000
#define MMU_PTE_MASK_ALL 0xffffffff
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x3ff)
#endif
#define MMU_PAGE_NUM_SHIFT 12
#define MMU_PAGE_NUM(v) (((u32_t)(v) >> MMU_PAGE_NUM_SHIFT) & 0x3ff)
/*
* The following values are to are to be OR'ed together to mark the use or
@ -113,6 +130,11 @@
#define MMU_ENTRY_NOT_ALLOC 0x00000000
#define MMU_ENTRY_ALLOC 0x00000200
#ifdef CONFIG_X86_PAE_MODE
#define MMU_ENTRY_EXECUTE_DISABLE 0x8000000000000000
#else
#define MMU_ENTRY_EXECUTE_DISABLE 0x0
#endif
/* Special flag argument for MMU_BOOT region invocations */
@ -137,11 +159,60 @@
#define MMU_ENTRY_RUNTIME_WRITE 0x20000000
/* Helper macros to ease the usage of the MMU page table structures.
*/
#ifdef CONFIG_X86_PAE_MODE
/*
* Returns the page table entry for the addr
* use the union to extract page entry related information.
*/
#define X86_MMU_GET_PTE(addr)\
((union x86_mmu_pae_pte *)\
(&X86_MMU_GET_PT_ADDR(addr)->entry[MMU_PAGE_NUM(addr)]))
/*
* Returns the Page table address for the particular address.
* Page Table address(returned value) is always 4KBytes aligned.
*/
#define X86_MMU_GET_PT_ADDR(addr) \
((struct x86_mmu_page_table *)\
(X86_MMU_GET_PDE(addr)->page_table << MMU_PAGE_SHIFT))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PDE(addr)\
((union x86_mmu_pae_pde *) \
(&X86_MMU_GET_PD_ADDR(addr)->entry[MMU_PDE_NUM(addr)]))
/* Returns the page directory entry for the addr
* use the union to extract page directory entry related information.
*/
#define X86_MMU_GET_PD_ADDR(addr) \
((struct x86_mmu_page_directory *) \
(X86_MMU_GET_PDPTE(addr)->page_directory << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry */
#define X86_MMU_GET_PDPTE(addr) \
((union x86_mmu_pae_pdpte *) \
(&X86_MMU_PDPT->entry[MMU_PDPTE_NUM(addr)]))
/* Return the Page directory address.
* input is the entry number
*/
#define X86_MMU_GET_PD_ADDR_INDEX(index) \
((struct x86_mmu_page_directory *) \
(X86_MMU_GET_PDPTE_INDEX(index)->page_directory << MMU_PAGE_SHIFT))
/* Returns the page directory pointer entry.
* Input is the entry number
*/
#define X86_MMU_GET_PDPTE_INDEX(index) \
((union x86_mmu_pae_pdpte *)(&X86_MMU_PDPT->entry[index]))
#else
/* Normal 32-Bit paging */
#define X86_MMU_GET_PT_ADDR(addr) \
((struct x86_mmu_page_table *)\
(X86_MMU_PD->entry[MMU_PDE_NUM(addr)].pt.page_table \
@ -161,6 +232,8 @@
((union x86_mmu_pde_pt *)\
(&X86_MMU_PD->entry[MMU_PDE_NUM(addr)].pt))
#define X86_MMU_GET_PD_ADDR(addr) (X86_MMU_PD)
/* Returns the 4 MB page directory entry for the addr
* use the union to extract page directory entry related information.
*/
@ -168,6 +241,9 @@
((union x86_mmu_pde_4mb *)\
(&X86_MMU_PD->entry[MMU_PDE_NUM(addr)].fourmb))
#endif /* CONFIG_X86_PAE_MODE */
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
@ -177,7 +253,7 @@
struct mmu_region {
u32_t address; /*Start address of the memory region */
u32_t size; /* Size of the memory region*/
u32_t flags; /* Permissions needed for this region*/
u64_t flags; /* Permissions needed for this region*/
};
/* permission_flags are calculated using the macros
@ -381,20 +457,266 @@ union x86_mmu_pte {
};
};
/* PAE paging mode structures and unions */
/*
* The following defines the format of a 64-bit page directory pointer entry
* that references a page directory table
*/
union x86_mmu_pae_pdpte {
/** access Page directory entry through use of bitmasks */
u64_t value;
struct {
/** present: must be 1 to reference a page table */
u64_t p:1;
u64_t reserved:2;
/** page-level write-through: determines the memory type used
* to access the page table referenced by this entry
*/
u64_t pwt:1;
/** page-level cache disable: determines the memory
* type used to access the page table referenced by
* this entry
*/
u64_t pcd:1;
u64_t ignored1:7;
/** page table: physical address of page table */
u64_t page_directory:20;
u64_t ignored3:32;
};
};
/*
* The following defines the format of a 32-bit page directory entry
* that references a page table (as opposed to a 4 Mb page).
*/
union x86_mmu_pae_pde {
/** access Page directory entry through use of bitmasks */
u64_t value;
struct {
/** present: must be 1 to reference a page table */
u64_t p:1;
/** read/write: if 0, writes may not be allowed to the region
* controlled by this entry
*/
u64_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the region controlled by this entry
*/
u64_t us:1;
/** page-level write-through: determines the memory type used
* to access the page table referenced by this entry
*/
u64_t pwt:1;
/** page-level cache disable: determines the memory
* type used to access the page table referenced by
* this entry
*/
u64_t pcd:1;
/** accessed: if 1 -> entry has been used to translate
*/
u64_t a:1;
u64_t ignored1:1;
/** page size: ignored when CR4.PSE=0 */
u64_t ps:1;
u64_t ignored2:4;
/** page table: physical address of page table */
u64_t page_table:20;
u64_t ignored3:31;
/* Execute disable */
u64_t xd:1;
};
};
/*
* The following defines the format of a 64-bit page directory entry
* that references a 2 Mb page (as opposed to a page table).
*/
union x86_mmu_pae_pde_2mb {
u32_t value;
struct {
/** present: must be 1 to map a 4 Mb page */
u64_t p:1;
/** read/write: if 0, writes may not be allowed to the 4 Mb
* page referenced by this entry
*/
u64_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the 4 Mb page referenced by this entry
*/
u64_t us:1;
/** page-level write-through: determines the memory type used
* to access the 4 Mb page referenced by
* this entry
*/
u64_t pwt:1;
/** page-level cache disable: determines the memory type used
* to access the 4 Mb page referenced by this entry
*/
u64_t pcd:1;
/** accessed: if 1 -> entry has been used to translate */
u64_t a:1;
/** dirty: indicates whether software has written to the 4 Mb
* page referenced by this entry
*/
u64_t d:1;
/** page size: must be 1 otherwise this entry references a page
* table entry
*/
u64_t ps:1;
/** global: if CR4.PGE=1, then determines whether this
* translation is global, i.e. used regardless of PCID
*/
u64_t g:1;
u64_t ignored1:3;
/** If PAT is supported, indirectly determines the memory type
* used to access the 4 Mb page, otherwise must be 0
*/
u64_t pat:1;
u64_t reserved1:8;
/** page table: physical address of page table */
u64_t page_table:11;
u64_t reserved2:31;
/** execute disable */
u64_t xd:1;
};
};
/*
* The following defines the format of a 64-bit page table entry that maps
* a 4 Kb page.
*/
union x86_mmu_pae_pte {
u64_t value;
struct {
/** present: must be 1 to map a 4 Kb page */
u64_t p:1;
/** read/write: if 0, writes may not be allowed to the 4 Kb
* page controlled by this entry
*/
u64_t rw:1;
/** user/supervisor: if 0, accesses with CPL=3 are not allowed
* to the 4 Kb page controlled by this entry
*/
u64_t us:1;
/** page-level write-through: determines the memory type used
* to access the 4 Kb page referenced by this entry
*/
u64_t pwt:1;
/** page-level cache disable: determines the memory type used
* to access the 4 Kb page referenced by this entry
*/
u64_t pcd:1;
/** accessed: if 1 -> 4 Kb page has been referenced */
u64_t a:1;
/** dirty: if 1 -> 4 Kb page has been written to */
u64_t d:1;
/** If PAT is supported, indirectly determines the memory type
* used to access the 4 Kb page, otherwise must be 0
*/
u64_t pat:1;
/** global: if CR4.PGE=1, then determines whether this
* translation is global, i.e. used regardless of PCID
*/
u64_t g:1;
/** allocated: if 1 -> this PTE has been allocated/ reserved;
* this is only used by software, i.e. this bit is ignored by
* the MMU
*/
u64_t ignore1:3;
/** page: physical address of the 4 Kb page */
u64_t page:20;
u64_t ignore2:31;
/* Execute disable */
u64_t xd:1;
};
};
#ifdef CONFIG_X86_PAE_MODE
typedef u64_t x86_page_entry_data_t;
#else
typedef u32_t x86_page_entry_data_t;
#endif
#ifdef CONFIG_X86_PAE_MODE
struct x86_mmu_page_directory_pointer {
union x86_mmu_pae_pdpte entry[512];
};
#endif
union x86_mmu_pde {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pde_pt pt;
union x86_mmu_pde_4mb fourmb;
#else
union x86_mmu_pae_pde pt;
union x86_mmu_pae_pde_2mb twomb;
#endif
};
/** Page Directory structure for 32-bit paging mode */
/** Page Directory structure for 32-bit/PAE paging mode */
struct x86_mmu_page_directory {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pde entry[1024];
#else
union x86_mmu_pae_pde entry[512];
#endif
};
/** Page Table structure for 32-bit paging mode */
/** Page Table structure for 32-bit/PAE paging mode */
struct x86_mmu_page_table {
#ifndef CONFIG_X86_PAE_MODE
union x86_mmu_pte entry[1024];
#else
union x86_mmu_pae_pte entry[512];
#endif
};
#endif /* _ASMLANGUAGE */

View file

@ -754,11 +754,18 @@ static inline int _arch_is_user_context(void)
extern const NANO_ESF _default_esf;
#ifdef CONFIG_X86_MMU
/* Linker variable. It needed to access the start of the Page directory */
extern u32_t __mmu_tables_start;
/* Linker variable. It is needed to access the start of the Page directory */
#ifdef CONFIG_X86_PAE_MODE
extern u64_t __mmu_tables_start;
#define X86_MMU_PDPT ((struct x86_mmu_page_directory_pointer *)\
(u32_t *)(void *)&__mmu_tables_start)
#else
extern u32_t __mmu_tables_start;
#define X86_MMU_PD ((struct x86_mmu_page_directory *)\
(void *)&__mmu_tables_start)
#endif
/**
@ -771,7 +778,9 @@ extern u32_t __mmu_tables_start;
* @param pde_flags Output parameter for page directory entry flags
* @param pte_flags Output parameter for page table entry flags
*/
void _x86_mmu_get_flags(void *addr, u32_t *pde_flags, u32_t *pte_flags);
void _x86_mmu_get_flags(void *addr,
x86_page_entry_data_t *pde_flags,
x86_page_entry_data_t *pte_flags);
/**
@ -786,7 +795,11 @@ void _x86_mmu_get_flags(void *addr, u32_t *pde_flags, u32_t *pte_flags);
* @mask Mask indicating which particular bits in the page table entries to
* modify
*/
void _x86_mmu_set_flags(void *ptr, size_t size, u32_t flags, u32_t mask);
void _x86_mmu_set_flags(void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask);
#endif /* CONFIG_X86_MMU */
#endif /* !_ASMLANGUAGE */