x86: implement interrupt stack trampoline
Upon hard/soft irq or exception entry/exit, handle transitions off or onto the trampoline stack, which is the only stack that can be used on the kernel side when the shadow page table is active. We swap page tables when on this stack. Adjustments to page tables are now as follows: - Any adjustments for stack memory access now are always done to the user page tables - Any adjustments for memory domains are now always done to the user page tables - With KPTI, resetting a page now clears the present bit Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
eea0f93a83
commit
2cfeba8507
10 changed files with 275 additions and 50 deletions
|
@ -72,7 +72,9 @@ SECTION_FUNC(TEXT, _exception_enter)
|
||||||
|
|
||||||
cld
|
cld
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
call z_x86_trampoline_to_kernel
|
||||||
|
#endif
|
||||||
/*
|
/*
|
||||||
* Swap ecx and handler function on the current stack;
|
* Swap ecx and handler function on the current stack;
|
||||||
*/
|
*/
|
||||||
|
@ -210,7 +212,7 @@ nestedException:
|
||||||
addl $4, %esp /* "pop" error code */
|
addl $4, %esp /* "pop" error code */
|
||||||
|
|
||||||
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
||||||
iret
|
KPTI_IRET
|
||||||
|
|
||||||
#if CONFIG_X86_KERNEL_OOPS
|
#if CONFIG_X86_KERNEL_OOPS
|
||||||
SECTION_FUNC(TEXT, _kernel_oops_handler)
|
SECTION_FUNC(TEXT, _kernel_oops_handler)
|
||||||
|
|
|
@ -331,6 +331,12 @@ static void dump_page_fault(NANO_ESF *esf)
|
||||||
cr2);
|
cr2);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_MMU
|
#ifdef CONFIG_X86_MMU
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
if (err & US) {
|
||||||
|
dump_mmu_flags(&z_x86_user_pdpt, (void *)cr2);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
dump_mmu_flags(&z_x86_kernel_pdpt, (void *)cr2);
|
dump_mmu_flags(&z_x86_kernel_pdpt, (void *)cr2);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -376,9 +382,20 @@ static __noinit char _df_stack[8];
|
||||||
|
|
||||||
static FUNC_NORETURN __used void _df_handler_top(void);
|
static FUNC_NORETURN __used void _df_handler_top(void);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
extern char z_trampoline_stack_end[];
|
||||||
|
#endif
|
||||||
|
|
||||||
_GENERIC_SECTION(.tss)
|
_GENERIC_SECTION(.tss)
|
||||||
struct task_state_segment _main_tss = {
|
struct task_state_segment _main_tss = {
|
||||||
.ss0 = DATA_SEG
|
.ss0 = DATA_SEG,
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
/* Stack to land on when we get a soft/hard IRQ in user mode.
|
||||||
|
* In a special kernel page that, unlike all other kernel pages,
|
||||||
|
* is marked present in the user page table.
|
||||||
|
*/
|
||||||
|
.esp0 = (u32_t)&z_trampoline_stack_end
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Special TSS for handling double-faults with a known good stack */
|
/* Special TSS for handling double-faults with a known good stack */
|
||||||
|
|
|
@ -69,6 +69,20 @@
|
||||||
* void _interrupt_enter(void *isr, void *isr_param);
|
* void _interrupt_enter(void *isr, void *isr_param);
|
||||||
*/
|
*/
|
||||||
SECTION_FUNC(TEXT, _interrupt_enter)
|
SECTION_FUNC(TEXT, _interrupt_enter)
|
||||||
|
/*
|
||||||
|
* Note that the processor has pushed both the EFLAGS register
|
||||||
|
* and the logical return address (cs:eip) onto the stack prior
|
||||||
|
* to invoking the handler specified in the IDT. The stack looks
|
||||||
|
* like this:
|
||||||
|
*
|
||||||
|
* 24 SS (only on privilege level change)
|
||||||
|
* 20 ESP (only on privilege level change)
|
||||||
|
* 16 EFLAGS
|
||||||
|
* 12 CS
|
||||||
|
* 8 EIP
|
||||||
|
* 4 isr_param
|
||||||
|
* 0 isr <-- stack pointer
|
||||||
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
#ifdef CONFIG_EXECUTION_BENCHMARKING
|
||||||
pushl %eax
|
pushl %eax
|
||||||
|
@ -91,20 +105,9 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
||||||
|
|
||||||
cld
|
cld
|
||||||
|
|
||||||
/*
|
#ifdef CONFIG_X86_KPTI
|
||||||
* Note that the processor has pushed both the EFLAGS register
|
call z_x86_trampoline_to_kernel
|
||||||
* and the logical return address (cs:eip) onto the stack prior
|
#endif
|
||||||
* to invoking the handler specified in the IDT. The stack looks
|
|
||||||
* like this:
|
|
||||||
*
|
|
||||||
* EFLAGS
|
|
||||||
* CS
|
|
||||||
* EIP
|
|
||||||
* isr_param
|
|
||||||
* isr <-- stack pointer
|
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Swap EAX with isr_param and EDX with isr.
|
* Swap EAX with isr_param and EDX with isr.
|
||||||
* Push ECX onto the stack
|
* Push ECX onto the stack
|
||||||
|
@ -314,7 +317,7 @@ alreadyOnIntStack:
|
||||||
popl %eax
|
popl %eax
|
||||||
|
|
||||||
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
||||||
iret
|
KPTI_IRET
|
||||||
|
|
||||||
#endif /* CONFIG_PREEMPT_ENABLED */
|
#endif /* CONFIG_PREEMPT_ENABLED */
|
||||||
|
|
||||||
|
@ -350,7 +353,7 @@ nestedInterrupt:
|
||||||
popl %edx
|
popl %edx
|
||||||
popl %eax
|
popl %eax
|
||||||
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
/* Pop of EFLAGS will re-enable interrupts and restore direction flag */
|
||||||
iret
|
KPTI_IRET
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||||
|
|
|
@ -132,22 +132,28 @@ void _x86_swap_update_page_tables(struct k_thread *incoming,
|
||||||
struct k_thread *outgoing)
|
struct k_thread *outgoing)
|
||||||
{
|
{
|
||||||
/* Outgoing thread stack no longer accessible */
|
/* Outgoing thread stack no longer accessible */
|
||||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
z_x86_reset_pages((void *)outgoing->stack_info.start,
|
||||||
(void *)outgoing->stack_info.start,
|
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE));
|
||||||
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE),
|
|
||||||
MMU_ENTRY_SUPERVISOR, MMU_PTE_US_MASK);
|
|
||||||
|
|
||||||
|
|
||||||
/* Userspace can now access the incoming thread's stack */
|
/* Userspace can now access the incoming thread's stack */
|
||||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
_x86_mmu_set_flags(&USER_PDPT,
|
||||||
(void *)incoming->stack_info.start,
|
(void *)incoming->stack_info.start,
|
||||||
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
|
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
|
||||||
MMU_ENTRY_USER, MMU_PTE_US_MASK);
|
MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
|
||||||
|
K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
|
||||||
|
|
||||||
|
#ifndef CONFIG_X86_KPTI
|
||||||
/* In case of privilege elevation, use the incoming thread's kernel
|
/* In case of privilege elevation, use the incoming thread's kernel
|
||||||
* stack, the top of the thread stack is the bottom of the kernel stack
|
* stack, the top of the thread stack is the bottom of the kernel
|
||||||
|
* stack.
|
||||||
|
*
|
||||||
|
* If KPTI is enabled, then privilege elevation always lands on the
|
||||||
|
* trampoline stack and the irq/sycall code has to manually transition
|
||||||
|
* off of it to the thread's kernel stack after switching page
|
||||||
|
* tables.
|
||||||
*/
|
*/
|
||||||
_main_tss.esp0 = incoming->stack_info.start;
|
_main_tss.esp0 = incoming->stack_info.start;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* If either thread defines different memory domains, efficiently
|
/* If either thread defines different memory domains, efficiently
|
||||||
* switch between them
|
* switch between them
|
||||||
|
|
|
@ -21,10 +21,174 @@ GTEXT(z_arch_user_string_nlen_fixup)
|
||||||
/* Imports */
|
/* Imports */
|
||||||
GDATA(_k_syscall_table)
|
GDATA(_k_syscall_table)
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
/* Switch from the shadow to the kernel page table, switch to the interrupted
|
||||||
|
* thread's kernel stack, and copy all context from the trampoline stack.
|
||||||
|
*
|
||||||
|
* Assumes all registers are callee-saved since this gets called from other
|
||||||
|
* ASM code. Assumes a particular stack layout which is correct for
|
||||||
|
* _exception_enter and _interrupt_enter when invoked with a call instruction:
|
||||||
|
*
|
||||||
|
* 28 SS
|
||||||
|
* 24 ES
|
||||||
|
* 20 EFLAGS
|
||||||
|
* 16 CS
|
||||||
|
* 12 EIP
|
||||||
|
* 8 isr_param or exc code
|
||||||
|
* 4 isr or exc handler
|
||||||
|
* 0 return address
|
||||||
|
*/
|
||||||
|
SECTION_FUNC(TEXT, z_x86_trampoline_to_kernel)
|
||||||
|
/* Check interrupted code segment to see if we came from ring 3
|
||||||
|
* and hence on the trampoline stack
|
||||||
|
*/
|
||||||
|
testb $3, 16(%esp) /* Offset of CS */
|
||||||
|
jz 1f
|
||||||
|
|
||||||
|
/* Stash these regs as we need to use them */
|
||||||
|
pushl %esi
|
||||||
|
pushl %edi
|
||||||
|
|
||||||
|
/* Switch to kernel page table */
|
||||||
|
movl $z_x86_kernel_pdpt, %esi
|
||||||
|
movl %esi, %cr3
|
||||||
|
|
||||||
|
/* Save old trampoline stack pointer in %edi */
|
||||||
|
movl %esp, %edi
|
||||||
|
|
||||||
|
/* %esp = _kernel->current->stack_info.start
|
||||||
|
*
|
||||||
|
* This is the lowest address of the user mode stack, and higest
|
||||||
|
* address of the kernel stack, they are adjacent.
|
||||||
|
* We want to transplant context here.
|
||||||
|
*/
|
||||||
|
movl $_kernel, %esi
|
||||||
|
movl _kernel_offset_to_current(%esi), %esi
|
||||||
|
movl _thread_offset_to_stack_start(%esi), %esp
|
||||||
|
|
||||||
|
/* Transplant stack context and restore ESI/EDI. Taking care to zero
|
||||||
|
* or put uninteresting values where we stashed ESI/EDI since the
|
||||||
|
* trampoline page is insecure and there might a context switch
|
||||||
|
* on the way out instead of returning to the original thread
|
||||||
|
* immediately.
|
||||||
|
*/
|
||||||
|
pushl 36(%edi) /* SS */
|
||||||
|
pushl 32(%edi) /* ESP */
|
||||||
|
pushl 28(%edi) /* EFLAGS */
|
||||||
|
pushl 24(%edi) /* CS */
|
||||||
|
pushl 20(%edi) /* EIP */
|
||||||
|
pushl 16(%edi) /* error code or isr parameter */
|
||||||
|
pushl 12(%edi) /* exception/irq handler */
|
||||||
|
pushl 8(%edi) /* return address */
|
||||||
|
movl 4(%edi), %esi /* restore ESI */
|
||||||
|
movl $0, 4(%edi) /* Zero old esi storage area */
|
||||||
|
xchgl %edi, (%edi) /* Exchange old edi to restore it and put
|
||||||
|
old sp in the storage area */
|
||||||
|
|
||||||
|
/* Trampoline stack should have nothing sensitive in it at this point */
|
||||||
|
1:
|
||||||
|
ret
|
||||||
|
|
||||||
|
/* Copy interrupt return stack context to the trampoline stack, switch back
|
||||||
|
* to the user page table, and only then 'iret'. We jump to this instead
|
||||||
|
* of calling 'iret' if KPTI is turned on.
|
||||||
|
*
|
||||||
|
* Stack layout is expected to be as follows:
|
||||||
|
*
|
||||||
|
* 16 SS
|
||||||
|
* 12 ESP
|
||||||
|
* 8 EFLAGS
|
||||||
|
* 4 CS
|
||||||
|
* 0 EIP
|
||||||
|
*
|
||||||
|
* This function is conditionally macroed to KPTI_IRET/KPTI_IRET_USER
|
||||||
|
*/
|
||||||
|
SECTION_FUNC(TEXT, z_x86_trampoline_to_user)
|
||||||
|
/* Check interrupted code segment to see if we came from ring 3
|
||||||
|
* and hence on the trampoline stack
|
||||||
|
*/
|
||||||
|
testb $3, 4(%esp) /* Offset of CS */
|
||||||
|
jz 1f
|
||||||
|
|
||||||
|
/* Otherwise, fall through ... */
|
||||||
|
|
||||||
|
SECTION_FUNC(TEXT, z_x86_trampoline_to_user_always)
|
||||||
|
/* Stash EDI, need a free register */
|
||||||
|
pushl %edi
|
||||||
|
|
||||||
|
/* Store old stack pointer and switch to trampoline stack */
|
||||||
|
movl %esp, %edi
|
||||||
|
movl $z_trampoline_stack_end, %esp
|
||||||
|
|
||||||
|
/* Lock IRQs until we get out, we don't want anyone else using the
|
||||||
|
* trampoline stack
|
||||||
|
*/
|
||||||
|
cli
|
||||||
|
|
||||||
|
/* Copy context */
|
||||||
|
pushl 20(%edi) /* SS */
|
||||||
|
pushl 16(%edi) /* ESP */
|
||||||
|
pushl 12(%edi) /* EFLAGS */
|
||||||
|
pushl 8(%edi) /* CS */
|
||||||
|
pushl 4(%edi) /* EIP */
|
||||||
|
xchgl %edi, (%edi) /* Exchange old edi to restore it and put
|
||||||
|
trampoline stack address in its old storage
|
||||||
|
area */
|
||||||
|
/* Switch to user page table */
|
||||||
|
pushl %eax
|
||||||
|
movl $z_x86_user_pdpt, %eax
|
||||||
|
movl %eax, %cr3
|
||||||
|
popl %eax
|
||||||
|
movl $0, -4(%esp) /* Delete stashed EAX data */
|
||||||
|
|
||||||
|
/* Trampoline stack should have nothing sensitive in it at this point */
|
||||||
|
1:
|
||||||
|
iret
|
||||||
|
#endif /* CONFIG_X86_KPTI */
|
||||||
|
|
||||||
/* Landing site for syscall SW IRQ. Marshal arguments and call C function for
|
/* Landing site for syscall SW IRQ. Marshal arguments and call C function for
|
||||||
* further processing. We're on the kernel stack for the invoking thread.
|
* further processing. We're on the kernel stack for the invoking thread,
|
||||||
|
* unless KPTI is enabled, in which case we're on the trampoline stack and
|
||||||
|
* need to get off it before enabling interrupts.
|
||||||
*/
|
*/
|
||||||
SECTION_FUNC(TEXT, _x86_syscall_entry_stub)
|
SECTION_FUNC(TEXT, _x86_syscall_entry_stub)
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
/* Stash these regs as we need to use them */
|
||||||
|
pushl %esi
|
||||||
|
pushl %edi
|
||||||
|
|
||||||
|
/* Switch to kernel page table */
|
||||||
|
movl $z_x86_kernel_pdpt, %esi
|
||||||
|
movl %esi, %cr3
|
||||||
|
|
||||||
|
/* Save old trampoline stack pointer in %edi */
|
||||||
|
movl %esp, %edi
|
||||||
|
|
||||||
|
/* %esp = _kernel->current->stack_info.start
|
||||||
|
*
|
||||||
|
* This is the lowest address of the user mode stack, and higest
|
||||||
|
* address of the kernel stack, they are adjacent.
|
||||||
|
* We want to transplant context here.
|
||||||
|
*/
|
||||||
|
movl $_kernel, %esi
|
||||||
|
movl _kernel_offset_to_current(%esi), %esi
|
||||||
|
movl _thread_offset_to_stack_start(%esi), %esp
|
||||||
|
|
||||||
|
/* Transplant context according to layout above. Variant of logic
|
||||||
|
* in x86_trampoline_to_kernel */
|
||||||
|
pushl 24(%edi) /* SS */
|
||||||
|
pushl 20(%edi) /* ESP */
|
||||||
|
pushl 16(%edi) /* EFLAGS */
|
||||||
|
pushl 12(%edi) /* CS */
|
||||||
|
pushl 8(%edi) /* EIP */
|
||||||
|
movl 4(%edi), %esi /* restore ESI */
|
||||||
|
movl $0, 4(%edi) /* Zero old esi storage area */
|
||||||
|
xchgl %edi, (%edi) /* Exchange old edi to restore it and put
|
||||||
|
old sp in the storage area */
|
||||||
|
|
||||||
|
/* Trampoline stack should have nothing sensitive in it at this point */
|
||||||
|
#endif /* CONFIG_X86_KPTI */
|
||||||
|
|
||||||
sti /* re-enable interrupts */
|
sti /* re-enable interrupts */
|
||||||
cld /* clear direction flag, restored on 'iret' */
|
cld /* clear direction flag, restored on 'iret' */
|
||||||
|
|
||||||
|
@ -74,7 +238,7 @@ _id_ok:
|
||||||
pop %ecx /* Clean ECX and get arg6 off the stack */
|
pop %ecx /* Clean ECX and get arg6 off the stack */
|
||||||
pop %edx /* Clean EDX and get ssf off the stack */
|
pop %edx /* Clean EDX and get ssf off the stack */
|
||||||
#endif
|
#endif
|
||||||
iret
|
KPTI_IRET_USER
|
||||||
|
|
||||||
_bad_syscall:
|
_bad_syscall:
|
||||||
/* ESI had a bogus syscall value in it, replace with the bad syscall
|
/* ESI had a bogus syscall value in it, replace with the bad syscall
|
||||||
|
@ -237,4 +401,4 @@ SECTION_FUNC(TEXT, _x86_userspace_enter)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* We will land in _thread_entry() in user mode after this */
|
/* We will land in _thread_entry() in user mode after this */
|
||||||
iret
|
KPTI_IRET_USER
|
||||||
|
|
|
@ -89,7 +89,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct x86_mmu_pd *pd_address =
|
struct x86_mmu_pd *pd_address =
|
||||||
X86_MMU_GET_PD_ADDR_INDEX(&z_x86_kernel_pdpt, pdpte);
|
X86_MMU_GET_PD_ADDR_INDEX(&USER_PDPT, pdpte);
|
||||||
|
|
||||||
/* Iterate for all the pde's the buffer might take up.
|
/* Iterate for all the pde's the buffer might take up.
|
||||||
* (depends on the size of the buffer and start address
|
* (depends on the size of the buffer and start address
|
||||||
|
@ -192,6 +192,22 @@ void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_USERSPACE
|
#ifdef CONFIG_X86_USERSPACE
|
||||||
|
void z_x86_reset_pages(void *start, size_t size)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
/* Clear both present bit and access flags. Only applies
|
||||||
|
* to threads running in user mode.
|
||||||
|
*/
|
||||||
|
_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
|
||||||
|
MMU_ENTRY_NOT_PRESENT,
|
||||||
|
K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
|
||||||
|
#else
|
||||||
|
/* Mark as supervisor read-write, user mode no access */
|
||||||
|
_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
|
||||||
|
K_MEM_PARTITION_P_RW_U_NA,
|
||||||
|
K_MEM_PARTITION_PERM_MASK);
|
||||||
|
#endif /* CONFIG_X86_KPTI */
|
||||||
|
}
|
||||||
|
|
||||||
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
|
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
|
||||||
#define X86_MEM_DOMAIN_SET_PAGES (0U)
|
#define X86_MEM_DOMAIN_SET_PAGES (0U)
|
||||||
|
@ -230,18 +246,22 @@ static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
|
||||||
partitions_count++;
|
partitions_count++;
|
||||||
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
|
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
|
||||||
/* Set the partition attributes */
|
/* Set the partition attributes */
|
||||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
u64_t attr, mask;
|
||||||
|
|
||||||
|
#if CONFIG_X86_KPTI
|
||||||
|
attr = partition.attr | MMU_ENTRY_PRESENT;
|
||||||
|
mask = K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK;
|
||||||
|
#else
|
||||||
|
attr = partition.attr;
|
||||||
|
mask = K_MEM_PARTITION_PERM_MASK;
|
||||||
|
#endif /* CONFIG_X86_KPTI */
|
||||||
|
|
||||||
|
_x86_mmu_set_flags(&USER_PDPT,
|
||||||
(void *)partition.start,
|
(void *)partition.start,
|
||||||
partition.size,
|
partition.size, attr, mask);
|
||||||
partition.attr,
|
|
||||||
K_MEM_PARTITION_PERM_MASK);
|
|
||||||
} else {
|
} else {
|
||||||
/* Reset the pages to supervisor RW only */
|
z_x86_reset_pages((void *)partition.start,
|
||||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
partition.size);
|
||||||
(void *)partition.start,
|
|
||||||
partition.size,
|
|
||||||
K_MEM_PARTITION_P_RW_U_NA,
|
|
||||||
K_MEM_PARTITION_PERM_MASK);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
|
@ -277,12 +297,7 @@ void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
|
||||||
"invalid partitions");
|
"invalid partitions");
|
||||||
|
|
||||||
partition = domain->partitions[partition_id];
|
partition = domain->partitions[partition_id];
|
||||||
|
z_x86_reset_pages((void *)partition.start, partition.size);
|
||||||
_x86_mmu_set_flags(&z_x86_kernel_pdpt, (void *)partition.start,
|
|
||||||
partition.size,
|
|
||||||
K_MEM_PARTITION_P_RW_U_NA,
|
|
||||||
K_MEM_PARTITION_PERM_MASK);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -649,7 +649,12 @@ extern const NANO_ESF _default_esf;
|
||||||
#ifdef CONFIG_X86_MMU
|
#ifdef CONFIG_X86_MMU
|
||||||
/* kernel's page table */
|
/* kernel's page table */
|
||||||
extern struct x86_mmu_pdpt z_x86_kernel_pdpt;
|
extern struct x86_mmu_pdpt z_x86_kernel_pdpt;
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
extern struct x86_mmu_pdpt z_x86_user_pdpt;
|
||||||
|
#define USER_PDPT z_x86_user_pdpt
|
||||||
|
#else
|
||||||
|
#define USER_PDPT z_x86_kernel_pdpt
|
||||||
|
#endif
|
||||||
/**
|
/**
|
||||||
* @brief Fetch page table flags for a particular page
|
* @brief Fetch page table flags for a particular page
|
||||||
*
|
*
|
||||||
|
@ -684,6 +689,8 @@ void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
|
||||||
x86_page_entry_data_t flags,
|
x86_page_entry_data_t flags,
|
||||||
x86_page_entry_data_t mask);
|
x86_page_entry_data_t mask);
|
||||||
|
|
||||||
|
void z_x86_reset_pages(void *start, size_t size);
|
||||||
|
|
||||||
#endif /* CONFIG_X86_MMU */
|
#endif /* CONFIG_X86_MMU */
|
||||||
|
|
||||||
#endif /* !_ASMLANGUAGE */
|
#endif /* !_ASMLANGUAGE */
|
||||||
|
|
|
@ -74,6 +74,16 @@
|
||||||
|
|
||||||
#endif /* CONFIG_RETPOLINE */
|
#endif /* CONFIG_RETPOLINE */
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_KPTI
|
||||||
|
GTEXT(z_x86_trampoline_to_user)
|
||||||
|
GTEXT(z_x86_trampoline_to_kernel)
|
||||||
|
|
||||||
|
#define KPTI_IRET jmp z_x86_trampoline_to_user
|
||||||
|
#define KPTI_IRET_USER jmp z_x86_trampoline_to_user_always
|
||||||
|
#else
|
||||||
|
#define KPTI_IRET iret
|
||||||
|
#define KPTI_IRET_USER iret
|
||||||
|
#endif /* CONFIG_X86_KPTI */
|
||||||
#endif /* _ASMLANGUAGE */
|
#endif /* _ASMLANGUAGE */
|
||||||
|
|
||||||
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ASM_H_ */
|
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ASM_H_ */
|
||||||
|
|
|
@ -61,7 +61,8 @@
|
||||||
#define _thread_offset_to_esf \
|
#define _thread_offset_to_esf \
|
||||||
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)
|
(___thread_t_arch_OFFSET + ___thread_arch_t_esf_OFFSET)
|
||||||
|
|
||||||
|
#define _thread_offset_to_stack_start \
|
||||||
|
(___thread_t_stack_info_OFFSET + ___thread_stack_info_t_start_OFFSET)
|
||||||
/* end - threads */
|
/* end - threads */
|
||||||
|
|
||||||
#endif /* ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_ */
|
#endif /* ZEPHYR_KERNEL_INCLUDE_OFFSETS_SHORT_H_ */
|
||||||
|
|
|
@ -27,7 +27,7 @@ void reset_flag(void);
|
||||||
void reset_multi_pte_page_flag(void);
|
void reset_multi_pte_page_flag(void);
|
||||||
void reset_multi_pde_flag(void);
|
void reset_multi_pde_flag(void);
|
||||||
|
|
||||||
#define PDPT &z_x86_kernel_pdpt
|
#define PDPT &USER_PDPT
|
||||||
|
|
||||||
#define ADDR_PAGE_1 ((u8_t *)__bss_start + SKIP_SIZE * MMU_PAGE_SIZE)
|
#define ADDR_PAGE_1 ((u8_t *)__bss_start + SKIP_SIZE * MMU_PAGE_SIZE)
|
||||||
#define ADDR_PAGE_2 ((u8_t *)__bss_start + (SKIP_SIZE + 1) * MMU_PAGE_SIZE)
|
#define ADDR_PAGE_2 ((u8_t *)__bss_start + (SKIP_SIZE + 1) * MMU_PAGE_SIZE)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue