x86: use per-thread page tables
Previously, context switching on x86 with memory protection enabled involved walking the page tables, de-configuring all the partitions in the outgoing thread's memory domain, and then configuring all the partitions in the incoming thread's domain, on a global set of page tables. We now have a much faster design. Each thread has reserved in its stack object a number of pages to store page directories and page tables pertaining to the system RAM area. Each thread also has a toplevel PDPT which is configured to use the per-thread tables for system RAM, and the global tables for the rest of the address space. The result of this is on context switch, at most we just have to update the CR3 register to the incoming thread's PDPT. The x86_mmu_api test was making too many assumptions and has been adjusted to work with the new design. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
8915e41b7b
commit
8014e075f4
10 changed files with 558 additions and 318 deletions
|
@ -738,9 +738,18 @@ extern struct task_state_segment _main_tss;
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MMU
|
||||
/* kernel's page table */
|
||||
/* Kernel's page table. Always active when threads are running in supervisor
|
||||
* mode, or handling an interrupt.
|
||||
*
|
||||
* If KPTI is not enabled, this is used as a template to create per-thread
|
||||
* page tables for when threads run in user mode.
|
||||
*/
|
||||
extern struct x86_mmu_pdpt z_x86_kernel_pdpt;
|
||||
#ifdef CONFIG_X86_KPTI
|
||||
/* Separate page tables for user mode threads. The top-level PDPT is never
|
||||
* installed into the CPU; instead used as a template for creating per-thread
|
||||
* page tables.
|
||||
*/
|
||||
extern struct x86_mmu_pdpt z_x86_user_pdpt;
|
||||
#define USER_PDPT z_x86_user_pdpt
|
||||
#else
|
||||
|
@ -774,14 +783,16 @@ void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
|
|||
* @param flags Value of bits to set in the page table entries
|
||||
* @param mask Mask indicating which particular bits in the page table entries to
|
||||
* modify
|
||||
* @param flush Whether to flush the TLB for the modified pages, only needed
|
||||
* when modifying the active page tables
|
||||
*/
|
||||
void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
||||
size_t size,
|
||||
x86_page_entry_data_t flags,
|
||||
x86_page_entry_data_t mask);
|
||||
|
||||
void z_x86_reset_pages(void *start, size_t size);
|
||||
size_t size,
|
||||
x86_page_entry_data_t flags,
|
||||
x86_page_entry_data_t mask, bool flush);
|
||||
|
||||
int z_x86_mmu_validate(struct x86_mmu_pdpt *pdpt, void *addr, size_t size,
|
||||
int write);
|
||||
#endif /* CONFIG_X86_MMU */
|
||||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue