x86: move page table reservation macros
We don't need this for stacks any more and only use this for pre-calculating the boot page tables size. Move to C code, this doesn't need to be in headers anywhere. Names adjusted for conciseness. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
39523f45c5
commit
a15be58019
2 changed files with 90 additions and 109 deletions
|
@ -61,13 +61,6 @@ static struct k_spinlock x86_mmu_lock;
|
|||
static sys_slist_t x86_domain_list;
|
||||
#endif
|
||||
|
||||
/* "dummy" pagetables for the first-phase build. The real page tables
|
||||
* are produced by gen-mmu.py based on data read in zephyr-prebuilt.elf,
|
||||
* and this dummy array is discarded.
|
||||
*/
|
||||
Z_GENERIC_SECTION(.dummy_pagetables)
|
||||
char z_x86_dummy_pagetables[Z_X86_INITIAL_PAGETABLE_SIZE];
|
||||
|
||||
/*
|
||||
* Definitions for building an ontology of paging levels and capabilities
|
||||
* at each level
|
||||
|
@ -178,6 +171,96 @@ static const struct paging_level paging_levels[] = {
|
|||
|
||||
#define NUM_LEVELS ARRAY_SIZE(paging_levels)
|
||||
|
||||
/*
|
||||
* Macros for reserving space for page tables
|
||||
*
|
||||
* We need to reserve a block of memory equal in size to the page tables
|
||||
* generated by gen_mmu.py so that memory addresses do not shift between
|
||||
* build phases. These macros ultimately specify INITIAL_PAGETABLE_SIZE.
|
||||
*/
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
#ifdef CONFIG_X86_64
|
||||
#define NUM_PML4_ENTRIES 512U
|
||||
#define NUM_PDPT_ENTRIES 512U
|
||||
#else
|
||||
#define NUM_PDPT_ENTRIES 4U
|
||||
#endif /* CONFIG_X86_64 */
|
||||
#define NUM_PD_ENTRIES 512U
|
||||
#define NUM_PT_ENTRIES 512U
|
||||
#else
|
||||
#define NUM_PD_ENTRIES 1024U
|
||||
#define NUM_PT_ENTRIES 1024U
|
||||
#endif /* !CONFIG_X86_64 && !CONFIG_X86_PAE */
|
||||
/* Memory range covered by an instance of various table types */
|
||||
#define PT_AREA ((uintptr_t)(CONFIG_MMU_PAGE_SIZE * NUM_PT_ENTRIES))
|
||||
#define PD_AREA (PT_AREA * NUM_PD_ENTRIES)
|
||||
#ifdef CONFIG_X86_64
|
||||
#define PDPT_AREA (PD_AREA * NUM_PDPT_ENTRIES)
|
||||
#endif
|
||||
|
||||
#define VM_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_sram))
|
||||
#define VM_SIZE DT_REG_SIZE(DT_CHOSEN(zephyr_sram))
|
||||
|
||||
/* Define a range [PT_START, PT_END) which is the memory range
|
||||
* covered by all the page tables needed for system RAM
|
||||
*/
|
||||
#define PT_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PT_AREA))
|
||||
#define PT_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PT_AREA))
|
||||
|
||||
/* Number of page tables needed to cover system RAM. Depends on the specific
|
||||
* bounds of system RAM, but roughly 1 page table per 2MB of RAM
|
||||
*/
|
||||
#define NUM_PT ((PT_END - PT_START) / PT_AREA)
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
/* Same semantics as above, but for the page directories needed to cover
|
||||
* system RAM.
|
||||
*/
|
||||
#define PD_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PD_AREA))
|
||||
#define PD_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PD_AREA))
|
||||
/* Number of page directories needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 page directory per 1GB of RAM
|
||||
*/
|
||||
#define NUM_PD ((PD_END - PD_START) / PD_AREA)
|
||||
#else
|
||||
/* 32-bit page tables just have one toplevel page directory */
|
||||
#define NUM_PD 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Same semantics as above, but for the page directory pointer tables needed
|
||||
* to cover system RAM. On 32-bit there is just one 4-entry PDPT.
|
||||
*/
|
||||
#define PDPT_START ((uintptr_t)ROUND_DOWN(VM_ADDR, PDPT_AREA))
|
||||
#define PDPT_END ((uintptr_t)ROUND_UP(VM_ADDR + VM_SIZE, PDPT_AREA))
|
||||
/* Number of PDPTs needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 PDPT per 512GB of RAM
|
||||
*/
|
||||
#define NUM_PDPT ((PDPT_END - PDPT_START) / PDPT_AREA)
|
||||
|
||||
/* All pages needed for page tables, using computed values plus one more for
|
||||
* the top-level PML4
|
||||
*/
|
||||
#define NUM_TABLE_PAGES (NUM_PT + NUM_PD + NUM_PDPT + 1)
|
||||
#else /* !CONFIG_X86_64 */
|
||||
/* Number of pages we need to reserve in the stack for per-thread page tables */
|
||||
#define NUM_TABLE_PAGES (NUM_PT + NUM_PD)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Toplevel PDPT wasn't included as it is not a page in size */
|
||||
#define INITIAL_PTABLE_SIZE ((NUM_TABLE_PAGES * CONFIG_MMU_PAGE_SIZE) + 0x20)
|
||||
#else
|
||||
#define INITIAL_PTABLE_SIZE (NUM_TABLE_PAGES * CONFIG_MMU_PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
/* "dummy" pagetables for the first-phase build. The real page tables
|
||||
* are produced by gen-mmu.py based on data read in zephyr-prebuilt.elf,
|
||||
* and this dummy array is discarded.
|
||||
*/
|
||||
Z_GENERIC_SECTION(.dummy_pagetables)
|
||||
static __used char dummy_pagetables[INITIAL_PTABLE_SIZE];
|
||||
|
||||
/*
|
||||
* Utility functions
|
||||
*/
|
||||
|
|
|
@ -10,108 +10,6 @@
|
|||
|
||||
#include <sys/util.h>
|
||||
|
||||
/* Macros for reserving space for page tables
|
||||
*
|
||||
* Z_X86_NUM_TABLE_PAGES. In order to produce a set of page tables which has
|
||||
* virtual mappings for all system RAM, Z_X86_NUM_TABLE_PAGES is the number of
|
||||
* memory pages required. If CONFIG_X86_PAE is enabled, an additional 0x20
|
||||
* bytes are required for the toplevel 4-entry PDPT.
|
||||
*
|
||||
* Z_X86_INITIAL_PAGETABLE_SIZE is the total amount of memory in bytes
|
||||
* required, for any paging mode.
|
||||
*
|
||||
* These macros are currently used for two purposes:
|
||||
* - Reserving memory in the stack for thread-level page tables (slated
|
||||
* for eventual removal when USERSPACE is reworked to fully utilize
|
||||
* virtual memory and page tables are maintained at the process level)
|
||||
* - Reserving room for dummy pagetable memory for the first link, so that
|
||||
* memory addresses are not disturbed by the insertion of the real page
|
||||
* tables created by gen_mmu.py in the second link phase.
|
||||
*/
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
#ifdef CONFIG_X86_64
|
||||
#define Z_X86_NUM_PML4_ENTRIES 512U
|
||||
#define Z_X86_NUM_PDPT_ENTRIES 512U
|
||||
#else
|
||||
#define Z_X86_NUM_PDPT_ENTRIES 4U
|
||||
#endif /* CONFIG_X86_64 */
|
||||
#define Z_X86_NUM_PD_ENTRIES 512U
|
||||
#define Z_X86_NUM_PT_ENTRIES 512U
|
||||
#else
|
||||
#define Z_X86_NUM_PD_ENTRIES 1024U
|
||||
#define Z_X86_NUM_PT_ENTRIES 1024U
|
||||
#endif /* !CONFIG_X86_64 && !CONFIG_X86_PAE */
|
||||
/* Memory range covered by an instance of various table types */
|
||||
#define Z_X86_PT_AREA ((uintptr_t)(CONFIG_MMU_PAGE_SIZE * \
|
||||
Z_X86_NUM_PT_ENTRIES))
|
||||
#define Z_X86_PD_AREA (Z_X86_PT_AREA * Z_X86_NUM_PD_ENTRIES)
|
||||
#ifdef CONFIG_X86_64
|
||||
#define Z_X86_PDPT_AREA (Z_X86_PD_AREA * Z_X86_NUM_PDPT_ENTRIES)
|
||||
#endif
|
||||
|
||||
#define PHYS_RAM_ADDR DT_REG_ADDR(DT_CHOSEN(zephyr_sram))
|
||||
#define PHYS_RAM_SIZE DT_REG_SIZE(DT_CHOSEN(zephyr_sram))
|
||||
|
||||
/* Define a range [Z_X86_PT_START, Z_X86_PT_END) which is the memory range
|
||||
* covered by all the page tables needed for system RAM
|
||||
*/
|
||||
#define Z_X86_PT_START ((uintptr_t)ROUND_DOWN(PHYS_RAM_ADDR, Z_X86_PT_AREA))
|
||||
#define Z_X86_PT_END ((uintptr_t)ROUND_UP(PHYS_RAM_ADDR + PHYS_RAM_SIZE, \
|
||||
Z_X86_PT_AREA))
|
||||
|
||||
/* Number of page tables needed to cover system RAM. Depends on the specific
|
||||
* bounds of system RAM, but roughly 1 page table per 2MB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PT ((Z_X86_PT_END - Z_X86_PT_START) / Z_X86_PT_AREA)
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
/* Same semantics as above, but for the page directories needed to cover
|
||||
* system RAM.
|
||||
*/
|
||||
#define Z_X86_PD_START ((uintptr_t)ROUND_DOWN(PHYS_RAM_ADDR, Z_X86_PD_AREA))
|
||||
#define Z_X86_PD_END ((uintptr_t)ROUND_UP(PHYS_RAM_ADDR + PHYS_RAM_SIZE, \
|
||||
Z_X86_PD_AREA))
|
||||
/* Number of page directories needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 page directory per 1GB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PD ((Z_X86_PD_END - Z_X86_PD_START) / Z_X86_PD_AREA)
|
||||
#else
|
||||
/* 32-bit page tables just have one toplevel page directory */
|
||||
#define Z_X86_NUM_PD 1
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Same semantics as above, but for the page directory pointer tables needed
|
||||
* to cover system RAM. On 32-bit there is just one 4-entry PDPT.
|
||||
*/
|
||||
#define Z_X86_PDPT_START ((uintptr_t)ROUND_DOWN(PHYS_RAM_ADDR, \
|
||||
Z_X86_PDPT_AREA))
|
||||
#define Z_X86_PDPT_END ((uintptr_t)ROUND_UP(PHYS_RAM_ADDR + PHYS_RAM_SIZE, \
|
||||
Z_X86_PDPT_AREA))
|
||||
/* Number of PDPTs needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 PDPT per 512GB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PDPT ((Z_X86_PDPT_END - Z_X86_PDPT_START) / Z_X86_PDPT_AREA)
|
||||
|
||||
/* All pages needed for page tables, using computed values plus one more for
|
||||
* the top-level PML4
|
||||
*/
|
||||
#define Z_X86_NUM_TABLE_PAGES (Z_X86_NUM_PT + Z_X86_NUM_PD + \
|
||||
Z_X86_NUM_PDPT + 1)
|
||||
#else /* !CONFIG_X86_64 */
|
||||
/* Number of pages we need to reserve in the stack for per-thread page tables */
|
||||
#define Z_X86_NUM_TABLE_PAGES (Z_X86_NUM_PT + Z_X86_NUM_PD)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* Toplevel PDPT wasn't included as it is not a page in size */
|
||||
#define Z_X86_INITIAL_PAGETABLE_SIZE ((Z_X86_NUM_TABLE_PAGES * \
|
||||
CONFIG_MMU_PAGE_SIZE) + 0x20)
|
||||
#else
|
||||
#define Z_X86_INITIAL_PAGETABLE_SIZE (Z_X86_NUM_TABLE_PAGES * \
|
||||
CONFIG_MMU_PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* K_MEM_PARTITION_* defines
|
||||
*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue