x86: use per-thread page tables
Previously, context switching on x86 with memory protection enabled involved walking the page tables, de-configuring all the partitions in the outgoing thread's memory domain, and then configuring all the partitions in the incoming thread's domain, on a global set of page tables. We now have a much faster design. Each thread has reserved in its stack object a number of pages to store page directories and page tables pertaining to the system RAM area. Each thread also has a toplevel PDPT which is configured to use the per-thread tables for system RAM, and the global tables for the rest of the address space. The result of this is on context switch, at most we just have to update the CR3 register to the incoming thread's PDPT. The x86_mmu_api test was making too many assumptions and has been adjusted to work with the new design. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
8915e41b7b
commit
8014e075f4
10 changed files with 558 additions and 318 deletions
|
@ -49,7 +49,7 @@ static inline void kernel_arch_init(void)
|
|||
#endif
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
|
||||
MMU_ENTRY_READ, MMU_PTE_RW_MASK);
|
||||
MMU_ENTRY_READ, MMU_PTE_RW_MASK, true);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -76,18 +76,24 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
|
|||
|
||||
extern void k_cpu_atomic_idle(unsigned int key);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3,
|
||||
u32_t stack_end,
|
||||
u32_t stack_start);
|
||||
|
||||
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
|
||||
#define X86_MEM_DOMAIN_SET_PAGES (0U)
|
||||
#define X86_MEM_DOMAIN_RESET_PAGES (1U)
|
||||
void z_x86_thread_pt_init(struct k_thread *thread);
|
||||
|
||||
extern void z_x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
|
||||
u32_t page_conf);
|
||||
void z_x86_apply_mem_domain(struct x86_mmu_pdpt *pdpt,
|
||||
struct k_mem_domain *mem_domain);
|
||||
|
||||
static inline struct x86_mmu_pdpt *z_x86_pdpt_get(struct k_thread *thread)
|
||||
{
|
||||
uintptr_t addr = thread->stack_info.start;
|
||||
|
||||
return (struct x86_mmu_pdpt *)(addr - sizeof(struct x86_mmu_pdpt));
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#include <stddef.h> /* For size_t */
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue