x86: generate runtime 64-bit page tables

- Bring in CONFIG_X86_MMU and some related defines to
  common X86 Kconfig
- Don't set ARCH_HAS_USERSPACE for intel64 yet when
  X86_MMU is enabled
- Uplevel x86_mmu.c to common code
- Add logic for handling PML4 table and generating PDPTs
- move z_x86_paging_init() to common kernel_arch_func.h
- Uplevel inclusion of mmustructs.h to common x86 arch.h,
  both need it for memory domain defines

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-10-10 13:05:43 -07:00 committed by Andrew Boie
commit f6e82ea1bd
10 changed files with 398 additions and 142 deletions

View file

@ -15,6 +15,7 @@
#include <stddef.h>
#include <stdbool.h>
#include <irq.h>
#include <arch/x86/mmustructs.h>
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
{

View file

@ -17,7 +17,6 @@
#include "sys_io.h"
#include <drivers/interrupt_controller/sysapic.h>
#include <kernel_arch_thread.h>
#include <arch/x86/mmustructs.h>
#include <stdbool.h>
#include <arch/common/ffs.h>
#include <misc/util.h>

View file

@ -10,7 +10,7 @@
#include <sys/util.h>
#define MMU_PAGE_SIZE 4096U
#define MMU_PAGE_SIZE 4096UL
#define MMU_PAGE_MASK 0xfffU
#define MMU_PAGE_SHIFT 12U
#define PAGES(x) ((x) << (MMU_PAGE_SHIFT))
@ -38,17 +38,32 @@
#define Z_X86_MMU_G BIT64(8) /** Global */
#define Z_X86_MMU_XD BIT64(63) /** Execute Disable */
#ifdef CONFIG_X86_LONGMODE
#define Z_X86_MMU_PROT_KEY_MASK 0x7800000000000000ULL
#endif
/*
* Structure-specific flags / masks
*/
#define Z_X86_MMU_PDPTE_PAT BIT64(12)
#define Z_X86_MMU_PDE_PAT BIT64(12)
#define Z_X86_MMU_PTE_PAT BIT64(7) /** Page Attribute Table */
#define Z_X86_MMU_PDPTE_PD_MASK 0x00000000FFFFF000ULL
#define Z_X86_MMU_PDE_PT_MASK 0x00000000FFFFF000ULL
#define Z_X86_MMU_PDE_2MB_MASK 0x00000000FFC00000ULL
#define Z_X86_MMU_PTE_ADDR_MASK 0x00000000FFFFF000ULL
/* The true size of the mask depends on MAXADDR, which is found at run-time.
* As a simplification, roll the area for the memory address, and the
* reserved or ignored regions immediately above it, into a single area.
* This will work as expected if valid memory addresses are written.
*/
#ifdef CONFIG_X86_LONGMODE
#define Z_X86_MMU_PML4E_PDPT_MASK 0x7FFFFFFFFFFFF000ULL
#endif
#define Z_X86_MMU_PDPTE_PD_MASK 0x7FFFFFFFFFFFF000ULL
#ifdef CONFIG_X86_LONGMODE
#define Z_X86_MMU_PDPTE_1G_MASK 0x07FFFFFFC0000000ULL
#endif
#define Z_X86_MMU_PDE_PT_MASK 0x7FFFFFFFFFFFF000ULL
#define Z_X86_MMU_PDE_2MB_MASK 0x07FFFFFFFFC00000ULL
#define Z_X86_MMU_PTE_ADDR_MASK 0x07FFFFFFFFFFF000ULL
/*
* These flags indicate intention when setting access properties.
@ -148,9 +163,14 @@ struct mmu_region {
#define MMU_BOOT_REGION(addr, region_size, permission_flags) \
Z_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags)
#define Z_X86_NUM_PDPT_ENTRIES 4
#define Z_X86_NUM_PD_ENTRIES 512
#define Z_X86_NUM_PT_ENTRIES 512
#ifdef CONFIG_X86_LONGMODE
#define Z_X86_NUM_PML4_ENTRIES 512U
#define Z_X86_NUM_PDPT_ENTRIES 512U
#else
#define Z_X86_NUM_PDPT_ENTRIES 4U
#endif
#define Z_X86_NUM_PD_ENTRIES 512U
#define Z_X86_NUM_PT_ENTRIES 512U
/* Memory range covered by an instance of various table types */
#define Z_X86_PT_AREA (MMU_PAGE_SIZE * Z_X86_NUM_PT_ENTRIES)
@ -159,6 +179,12 @@ struct mmu_region {
typedef u64_t k_mem_partition_attr_t;
#ifdef CONFIG_X86_LONGMODE
struct x86_mmu_pml4 {
u64_t entry[Z_X86_NUM_PML4_ENTRIES];
};
#endif
struct x86_mmu_pdpt {
u64_t entry[Z_X86_NUM_PDPT_ENTRIES];
};
@ -172,12 +198,32 @@ struct x86_mmu_pt {
};
struct x86_page_tables {
#ifdef CONFIG_X86_LONGMODE
struct x86_mmu_pml4 pml4;
#else
struct x86_mmu_pdpt pdpt;
#endif
};
/*
* Inline functions for getting the next linked structure
*/
#ifdef CONFIG_X86_LONGMODE
static inline u64_t *z_x86_pml4_get_pml4e(struct x86_mmu_pml4 *pml4,
uintptr_t addr)
{
int index = (addr >> 39U) & (Z_X86_NUM_PML4_ENTRIES - 1);
return &pml4->entry[index];
}
static inline struct x86_mmu_pdpt *z_x86_pml4e_get_pdpt(u64_t pml4e)
{
uintptr_t addr = pml4e & Z_X86_MMU_PML4E_PDPT_MASK;
return (struct x86_mmu_pdpt *)addr;
}
#endif
static inline u64_t *z_x86_pdpt_get_pdpte(struct x86_mmu_pdpt *pdpt,
uintptr_t addr)
@ -191,6 +237,9 @@ static inline struct x86_mmu_pd *z_x86_pdpte_get_pd(u64_t pdpte)
{
uintptr_t addr = pdpte & Z_X86_MMU_PDPTE_PD_MASK;
#ifdef CONFIG_X86_LONGMODE
__ASSERT((pdpte & Z_X86_MMU_PS) == 0, "PDPT is for 1GB page");
#endif
return (struct x86_mmu_pd *)addr;
}
@ -221,6 +270,25 @@ static inline u64_t *z_x86_pt_get_pte(struct x86_mmu_pt *pt, uintptr_t addr)
* Inline functions for obtaining page table structures from the top-level
*/
#ifdef CONFIG_X86_LONGMODE
static inline struct x86_mmu_pml4 *
z_x86_get_pml4(struct x86_page_tables *ptables)
{
return &ptables->pml4;
}
static inline u64_t *z_x86_get_pml4e(struct x86_page_tables *ptables,
uintptr_t addr)
{
return z_x86_pml4_get_pml4e(z_x86_get_pml4(ptables), addr);
}
static inline struct x86_mmu_pdpt *
z_x86_get_pdpt(struct x86_page_tables *ptables, uintptr_t addr)
{
return z_x86_pml4e_get_pdpt(*z_x86_get_pml4e(ptables, addr));
}
#else
static inline struct x86_mmu_pdpt *
z_x86_get_pdpt(struct x86_page_tables *ptables, uintptr_t addr)
{
@ -228,6 +296,7 @@ z_x86_get_pdpt(struct x86_page_tables *ptables, uintptr_t addr)
return &ptables->pdpt;
}
#endif /* CONFIG_X86_LONGMODE */
static inline u64_t *z_x86_get_pdpte(struct x86_page_tables *ptables,
uintptr_t addr)