x86: use per-thread page tables

Previously, context switching on x86 with memory protection
enabled involved walking the page tables, de-configuring all
the partitions in the outgoing thread's memory domain, and
then configuring all the partitions in the incoming thread's
domain, on a global set of page tables.

We now have a much faster design. Each thread has reserved in
its stack object a number of pages to store page directories
and page tables pertaining to the system RAM area. Each
thread also has a toplevel PDPT which is configured to use
the per-thread tables for system RAM, and the global tables
for the rest of the address space.

The result of this is on context switch, at most we just have
to update the CR3 register to the incoming thread's PDPT.

The x86_mmu_api test was making too many assumptions and has
been adjusted to work with the new design.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-07-29 18:22:30 -07:00 committed by Carles Cufí
commit 8014e075f4
10 changed files with 558 additions and 318 deletions

View file

@ -22,8 +22,8 @@
GTEXT(_x86_user_thread_entry_wrapper)
/* externs */
#ifdef CONFIG_X86_USERSPACE
GTEXT(_x86_swap_update_page_tables)
#if !defined(CONFIG_X86_KPTI) && defined(CONFIG_X86_USERSPACE)
GTEXT(z_x86_swap_update_page_tables)
#endif
GDATA(_k_neg_eagain)
@ -148,21 +148,23 @@ SECTION_FUNC(TEXT, __swap)
* thread to be swapped in, and %edi still contains &_kernel. %edx
* has the pointer to the outgoing thread.
*/
#ifdef CONFIG_X86_USERSPACE
#if defined(CONFIG_X86_USERSPACE) && !defined(CONFIG_X86_KPTI)
#ifdef CONFIG_X86_IAMCU
push %eax
#else
push %edx
push %eax
#endif
call _x86_swap_update_page_tables
call z_x86_swap_update_page_tables
#ifdef CONFIG_X86_IAMCU
pop %eax
#else
pop %eax
pop %edx
#endif
/* Page tables updated. All memory access after this point needs to be
* to memory that has the same mappings and access attributes wrt
* supervisor mode!
*/
#endif
#ifdef CONFIG_EAGER_FP_SHARING

View file

@ -38,6 +38,141 @@ struct _x86_initial_frame {
void *p3;
};
#ifdef CONFIG_X86_USERSPACE
/* Nothing to do here if KPTI is enabled. We are in supervisor mode, so the
* active PDPT is the kernel's page tables. If the incoming thread is in user
* mode we are going to switch CR3 to the thread- specific tables when we go
* through z_x86_trampoline_to_user.
*
* We don't need to update _main_tss either, privilege elevation always lands
* on the trampoline stack and the irq/sycall code has to manually transition
* off of it to the thread's kernel stack after switching page tables.
*/
#ifndef CONFIG_X86_KPTI
/* Change to new set of page tables. ONLY intended for use from
* z_x88_swap_update_page_tables(). This changes CR3, no memory access
* afterwards is legal unless it is known for sure that the relevant
* mappings are identical wrt supervisor mode until we iret out.
*/
static inline void page_tables_set(struct x86_mmu_pdpt *pdpt)
{
__asm__ volatile("movl %0, %%cr3\n\t" : : "r" (pdpt) : "memory");
}
/* Update the to the incoming thread's page table, and update the location
* of the privilege elevation stack.
*
* May be called ONLY during context switch and when supervisor
* threads drop synchronously to user mode. Hot code path!
*/
void z_x86_swap_update_page_tables(struct k_thread *incoming)
{
struct x86_mmu_pdpt *pdpt;
/* If we're a user thread, we want the active page tables to
* be the per-thread instance.
*
* However, if we're a supervisor thread, use the master
* kernel page tables instead.
*/
if ((incoming->base.user_options & K_USER) != 0) {
pdpt = z_x86_pdpt_get(incoming);
/* In case of privilege elevation, use the incoming
* thread's kernel stack. This area starts immediately
* before the PDPT.
*/
_main_tss.esp0 = (uintptr_t)pdpt;
} else {
pdpt = &z_x86_kernel_pdpt;
}
/* Check first that we actually need to do this, since setting
* CR3 involves an expensive full TLB flush.
*/
if (pdpt != z_x86_page_tables_get()) {
page_tables_set(pdpt);
}
}
#endif /* CONFIG_X86_KPTI */
static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
u32_t stack_end;
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = STACK_ROUND_DOWN(_current->stack_info.start +
_current->stack_info.size);
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
_current->stack_info.start);
CODE_UNREACHABLE;
}
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
/* Set up the kernel stack used during privilege elevation */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)(_current->stack_info.start -
MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK,
true);
/* Initialize per-thread page tables, since that wasn't done when
* the thread was initialized (K_USER was not set at creation time)
*/
z_x86_thread_pt_init(_current);
/* Apply memory domain configuration, if assigned */
if (_current->mem_domain_info.mem_domain != NULL) {
z_x86_apply_mem_domain(z_x86_pdpt_get(_current),
_current->mem_domain_info.mem_domain);
}
#ifndef CONFIG_X86_KPTI
/* We're synchronously dropping into user mode from a thread that
* used to be in supervisor mode. K_USER flag has now been set, but
* Need to swap from the kernel's page tables to the per-thread page
* tables.
*
* Safe to update page tables from here, all tables are identity-
* mapped and memory areas used before the ring 3 transition all
* have the same attributes wrt supervisor mode access.
*/
z_x86_swap_update_page_tables(_current);
#endif
drop_to_user(user_entry, p1, p2, p3);
}
/* Implemented in userspace.S */
extern void z_x86_syscall_entry_stub(void);
/* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that
* userspace can invoke it.
*/
NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3);
#endif /* CONFIG_X86_USERSPACE */
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
extern int z_float_disable(struct k_thread *thread);
int z_arch_float_disable(struct k_thread *thread)
{
#if defined(CONFIG_LAZY_FP_SHARING)
return z_float_disable(thread);
#else
return -ENOSYS;
#endif /* CONFIG_LAZY_FP_SHARING */
}
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */
/**
* @brief Create a new kernel execution thread
*
@ -67,20 +202,22 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
stack_buf = Z_THREAD_STACK_BUFFER(stack);
z_new_thread_init(thread, stack_buf, stack_size, priority, options);
#if CONFIG_X86_USERSPACE
if ((options & K_USER) == 0U) {
/* Running in kernel mode, kernel stack region is also a guard
* page */
#ifdef CONFIG_X86_USERSPACE
/* Set MMU properties for the privilege mode elevation stack.
* If we're not starting in user mode, this functions as a guard
* area.
*/
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)(stack_buf - MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_READ,
MMU_PTE_RW_MASK);
}
(void *)(stack_buf - MMU_PAGE_SIZE), MMU_PAGE_SIZE,
((options & K_USER) == 0U) ? MMU_ENTRY_READ : MMU_ENTRY_WRITE,
MMU_PTE_RW_MASK, true);
#endif /* CONFIG_X86_USERSPACE */
#if CONFIG_X86_STACK_PROTECTION
/* Set guard area to read-only to catch stack overflows */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack + Z_X86_THREAD_PT_AREA,
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK);
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK,
true);
#endif
stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
@ -96,11 +233,12 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
initial_frame->eflags = EFLAGS_INITIAL;
#ifdef CONFIG_X86_USERSPACE
if ((options & K_USER) != 0U) {
z_x86_thread_pt_init(thread);
#ifdef _THREAD_WRAPPER_REQUIRED
initial_frame->edi = (u32_t)z_arch_user_mode_enter;
initial_frame->edi = (u32_t)drop_to_user;
initial_frame->thread_entry = z_x86_thread_entry_wrapper;
#else
initial_frame->thread_entry = z_arch_user_mode_enter;
initial_frame->thread_entry = drop_to_user;
#endif /* _THREAD_WRAPPER_REQUIRED */
} else
#endif /* CONFIG_X86_USERSPACE */
@ -121,94 +259,3 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->arch.excNestCount = 0;
#endif /* CONFIG_LAZY_FP_SHARING */
}
#ifdef CONFIG_X86_USERSPACE
void _x86_swap_update_page_tables(struct k_thread *incoming,
struct k_thread *outgoing)
{
/* Outgoing thread stack no longer accessible */
z_x86_reset_pages((void *)outgoing->stack_info.start,
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE));
/* Userspace can now access the incoming thread's stack */
z_x86_mmu_set_flags(&USER_PDPT,
(void *)incoming->stack_info.start,
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
#ifndef CONFIG_X86_KPTI
/* In case of privilege elevation, use the incoming thread's kernel
* stack, the top of the thread stack is the bottom of the kernel
* stack.
*
* If KPTI is enabled, then privilege elevation always lands on the
* trampoline stack and the irq/sycall code has to manually transition
* off of it to the thread's kernel stack after switching page
* tables.
*/
_main_tss.esp0 = incoming->stack_info.start;
#endif
/* If either thread defines different memory domains, efficiently
* switch between them
*/
if (incoming->mem_domain_info.mem_domain !=
outgoing->mem_domain_info.mem_domain){
/* Ensure that the outgoing mem domain configuration
* is set back to default state.
*/
z_x86_mem_domain_pages_update(outgoing->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_RESET_PAGES);
z_x86_mem_domain_pages_update(incoming->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_SET_PAGES);
}
}
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
u32_t stack_end;
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = STACK_ROUND_DOWN(_current->stack_info.start +
_current->stack_info.size);
/* Set up the kernel stack used during privilege elevation */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
(void *)(_current->stack_info.start - MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK);
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
_current->stack_info.start);
CODE_UNREACHABLE;
}
/* Implemented in userspace.S */
extern void z_x86_syscall_entry_stub(void);
/* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that
* userspace can invoke it.
*/
NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3);
#endif /* CONFIG_X86_USERSPACE */
#if defined(CONFIG_FLOAT) && defined(CONFIG_FP_SHARING)
extern int z_float_disable(struct k_thread *thread);
int z_arch_float_disable(struct k_thread *thread)
{
#if defined(CONFIG_LAZY_FP_SHARING)
return z_float_disable(thread);
#else
return -ENOSYS;
#endif /* CONFIG_LAZY_FP_SHARING */
}
#endif /* CONFIG_FLOAT && CONFIG_FP_SHARING */

View file

@ -58,13 +58,14 @@ SECTION_FUNC(TEXT, z_x86_trampoline_to_kernel)
/* %esp = _kernel->current->stack_info.start
*
* This is the lowest address of the user mode stack, and highest
* address of the kernel stack, they are adjacent.
* We want to transplant context here.
* This is the lowest address of the user mode stack, the PDPT is
* immediately before it, and then the highest address of the kernel
* stack. We want to transplant context here.
*/
movl $_kernel, %esi
movl _kernel_offset_to_current(%esi), %esi
movl _thread_offset_to_stack_start(%esi), %esp
subl $Z_X86_PDPT_SIZE, %esp
/* Transplant stack context and restore ESI/EDI. Taking care to zero
* or put uninteresting values where we stashed ESI/EDI since the
@ -134,9 +135,15 @@ SECTION_FUNC(TEXT, z_x86_trampoline_to_user_always)
xchgl %edi, (%edi) /* Exchange old edi to restore it and put
trampoline stack address in its old storage
area */
/* Switch to user page table */
/* Switch to user page table. The per-thread user page table is
* located at the highest addresses of the privilege mode elevation
* stack, immediately below the thread stack buffer.
*/
pushl %eax
movl $z_x86_user_pdpt, %eax
movl $_kernel, %eax
movl _kernel_offset_to_current(%eax), %eax
movl _thread_offset_to_stack_start(%eax), %eax
subl $Z_X86_PDPT_SIZE, %eax
movl %eax, %cr3
popl %eax
movl $0, -4(%esp) /* Delete stashed EAX data */
@ -166,13 +173,14 @@ SECTION_FUNC(TEXT, z_x86_syscall_entry_stub)
/* %esp = _kernel->current->stack_info.start
*
* This is the lowest address of the user mode stack, and highest
* address of the kernel stack, they are adjacent.
* We want to transplant context here.
* This is the lowest address of the user mode stack, the PDPT is
* immediately before it, and then the highest address of the kernel
* stack. We want to transplant context here.
*/
movl $_kernel, %esi
movl _kernel_offset_to_current(%esi), %esi
movl _thread_offset_to_stack_start(%esi), %esp
subl $Z_X86_PDPT_SIZE, %esp
/* Transplant context according to layout above. Variant of logic
* in x86_trampoline_to_kernel */
@ -324,6 +332,7 @@ SECTION_FUNC(TEXT, z_x86_userspace_enter)
* want to leak any information.
*/
mov %edi, %esp
subl $Z_X86_PDPT_SIZE, %esp
/* Stash some registers we are going to need to erase the user
* stack.

View file

@ -11,6 +11,7 @@
#include <kernel_structs.h>
#include <init.h>
#include <ctype.h>
#include <string.h>
/* Common regions for all x86 processors.
* Peripheral I/O ranges configured at the SOC level
@ -180,8 +181,8 @@ void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
}
}
int z_arch_buffer_validate(void *addr, size_t size, int write)
int z_x86_mmu_validate(struct x86_mmu_pdpt *pdpt, void *addr, size_t size,
int write)
{
u32_t start_pde_num;
u32_t end_pde_num;
@ -212,12 +213,12 @@ int z_arch_buffer_validate(void *addr, size_t size, int write)
}
/* Ensure page directory pointer table entry is present */
if (X86_MMU_GET_PDPTE_INDEX(&USER_PDPT, pdpte)->p == 0) {
if (X86_MMU_GET_PDPTE_INDEX(pdpt, pdpte)->p == 0) {
goto out;
}
struct x86_mmu_pd *pd_address =
X86_MMU_GET_PD_ADDR_INDEX(&USER_PDPT, pdpte);
X86_MMU_GET_PD_ADDR_INDEX(pdpt, pdpte);
/* Iterate for all the pde's the buffer might take up.
* (depends on the size of the buffer and start address
@ -298,10 +299,9 @@ static inline void tlb_flush_page(void *addr)
}
void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
size_t size,
void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr, size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
x86_page_entry_data_t mask, bool flush)
{
union x86_mmu_pte *pte;
@ -335,7 +335,9 @@ void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
}
pte->value = (pte->value & ~mask) | cur_flags;
if (flush) {
tlb_flush_page((void *)addr);
}
size -= MMU_PAGE_SIZE;
addr += MMU_PAGE_SIZE;
@ -343,150 +345,317 @@ void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
}
#ifdef CONFIG_X86_USERSPACE
void z_x86_reset_pages(void *start, size_t size)
int z_arch_buffer_validate(void *addr, size_t size, int write)
{
#ifdef CONFIG_X86_KPTI
/* Clear both present bit and access flags. Only applies
* to threads running in user mode.
return z_x86_mmu_validate(z_x86_pdpt_get(_current), addr, size, write);
}
static uintptr_t thread_pd_create(uintptr_t pages,
struct x86_mmu_pdpt *thread_pdpt,
struct x86_mmu_pdpt *master_pdpt)
{
uintptr_t pos = pages, phys_addr = Z_X86_PD_START;
for (int i = 0; i < Z_X86_NUM_PD; i++, phys_addr += Z_X86_PD_AREA) {
union x86_mmu_pdpte *pdpte;
struct x86_mmu_pd *master_pd, *dest_pd;
/* Obtain PD in master tables for the address range and copy
* into the per-thread PD for this range
*/
z_x86_mmu_set_flags(&z_x86_user_pdpt, start, size,
MMU_ENTRY_NOT_PRESENT,
K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK);
#else
/* Mark as supervisor read-write, user mode no access */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, start, size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
#endif /* CONFIG_X86_KPTI */
}
master_pd = X86_MMU_GET_PD_ADDR(master_pdpt, phys_addr);
dest_pd = (struct x86_mmu_pd *)pos;
static inline void activate_partition(struct k_mem_partition *partition)
{
/* Set the partition attributes */
u64_t attr, mask;
(void)memcpy(dest_pd, master_pd, sizeof(struct x86_mmu_pd));
#if CONFIG_X86_KPTI
attr = partition->attr | MMU_ENTRY_PRESENT;
mask = K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK;
#else
attr = partition->attr;
mask = K_MEM_PARTITION_PERM_MASK;
#endif /* CONFIG_X86_KPTI */
z_x86_mmu_set_flags(&USER_PDPT,
(void *)partition->start,
partition->size, attr, mask);
}
/* Pass 1 to page_conf if reset of mem domain pages is needed else pass a 0*/
void z_x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
u32_t page_conf)
{
u32_t partition_index;
u32_t total_partitions;
struct k_mem_partition *partition;
u32_t partitions_count;
/* If mem_domain doesn't point to a valid location return.*/
if (mem_domain == NULL) {
goto out;
/* Update pointer in per-thread pdpt to point to the per-thread
* directory we just copied
*/
pdpte = X86_MMU_GET_PDPTE(thread_pdpt, phys_addr);
pdpte->pd = pos >> MMU_PAGE_SHIFT;
pos += MMU_PAGE_SIZE;
}
/* Get the total number of partitions*/
total_partitions = mem_domain->num_partitions;
return pos;
}
/* Iterate over all the partitions for the given mem_domain
* For x86: iterate over all the partitions and set the
* required flags in the correct MMU page tables.
/* thread_pdpt must be initialized, as well as all the page directories */
static uintptr_t thread_pt_create(uintptr_t pages,
struct x86_mmu_pdpt *thread_pdpt,
struct x86_mmu_pdpt *master_pdpt)
{
uintptr_t pos = pages, phys_addr = Z_X86_PT_START;
for (int i = 0; i < Z_X86_NUM_PT; i++, phys_addr += Z_X86_PT_AREA) {
union x86_mmu_pde_pt *pde;
struct x86_mmu_pt *master_pt, *dest_pt;
/* Same as we did with the directories, obtain PT in master
* tables for the address range and copy into per-thread PT
* for this range
*/
partitions_count = 0U;
for (partition_index = 0U;
partitions_count < total_partitions;
partition_index++) {
master_pt = X86_MMU_GET_PT_ADDR(master_pdpt, phys_addr);
dest_pt = (struct x86_mmu_pt *)pos;
(void)memcpy(dest_pt, master_pt, sizeof(struct x86_mmu_pd));
/* Get the partition info */
partition = &mem_domain->partitions[partition_index];
if (partition->size == 0U) {
/* And then wire this up to the relevant per-thread
* page directory entry
*/
pde = X86_MMU_GET_PDE(thread_pdpt, phys_addr);
pde->pt = pos >> MMU_PAGE_SHIFT;
pos += MMU_PAGE_SIZE;
}
return pos;
}
/* Initialize the page tables for a thread. This will contain, once done,
* the boot-time configuration for a user thread page tables. There are
* no pre-conditions on the existing state of the per-thread tables.
*/
static void copy_page_tables(struct k_thread *thread,
struct x86_mmu_pdpt *master_pdpt)
{
uintptr_t pos, start;
struct x86_mmu_pdpt *thread_pdpt = z_x86_pdpt_get(thread);
__ASSERT(thread->stack_obj != NULL, "no stack object assigned");
__ASSERT(z_x86_page_tables_get() != thread_pdpt, "PDPT is active");
__ASSERT(((uintptr_t)thread_pdpt & 0x1f) == 0, "unaligned pdpt at %p",
thread_pdpt);
__ASSERT(((uintptr_t)thread_pdpt) == ((uintptr_t)thread->stack_obj +
Z_ARCH_THREAD_STACK_RESERVED -
sizeof(struct x86_mmu_pdpt)),
"misplaced pdpt\n");
__ASSERT(thread->stack_info.start == ((uintptr_t)thread->stack_obj +
Z_ARCH_THREAD_STACK_RESERVED),
"stack_info.start is wrong");
(void)memcpy(thread_pdpt, master_pdpt, sizeof(struct x86_mmu_pdpt));
/* pos represents the page we are working with in the reserved area
* in the stack buffer for per-thread tables. As we create tables in
* this area, pos is incremented to the next free page.
*
* The layout of the stack object, when this is done:
*
* +---------------------------+ <- thread->stack_obj
* | PDE(0) |
* +---------------------------+
* | ... |
* +---------------------------+
* | PDE(Z_X86_NUM_PD - 1) |
* +---------------------------+
* | PTE(0) |
* +---------------------------+
* | ... |
* +---------------------------+
* | PTE(Z_X86_NUM_PT - 1) |
* +---------------------------+ <- pos once this logic completes
* | Stack guard |
* +---------------------------+
* | Privilege elevation stack |
* | PDPT |
* +---------------------------+ <- thread->stack_info.start
* | Thread stack |
* | ... |
*
*/
start = (uintptr_t)(thread->stack_obj);
pos = thread_pd_create(start, thread_pdpt, master_pdpt);
pos = thread_pt_create(pos, thread_pdpt, master_pdpt);
__ASSERT(pos == (start + Z_X86_THREAD_PT_AREA),
"wrong amount of stack object memory used");
}
static void reset_mem_partition(struct x86_mmu_pdpt *thread_pdpt,
struct k_mem_partition *partition)
{
uintptr_t addr = partition->start;
size_t size = partition->size;
__ASSERT((addr & MMU_PAGE_MASK) == 0U, "unaligned address provided");
__ASSERT((size & MMU_PAGE_MASK) == 0U, "unaligned size provided");
while (size != 0) {
union x86_mmu_pte *thread_pte, *master_pte;
thread_pte = X86_MMU_GET_PTE(thread_pdpt, addr);
master_pte = X86_MMU_GET_PTE(&USER_PDPT, addr);
(void)memcpy(thread_pte, master_pte, sizeof(union x86_mmu_pte));
size -= MMU_PAGE_SIZE;
addr += MMU_PAGE_SIZE;
}
}
static void apply_mem_partition(struct x86_mmu_pdpt *pdpt,
struct k_mem_partition *partition)
{
x86_page_entry_data_t x86_attr;
x86_page_entry_data_t mask;
if (IS_ENABLED(CONFIG_X86_KPTI)) {
x86_attr = partition->attr | MMU_ENTRY_PRESENT;
mask = K_MEM_PARTITION_PERM_MASK | MMU_PTE_P_MASK;
} else {
x86_attr = partition->attr;
mask = K_MEM_PARTITION_PERM_MASK;
}
__ASSERT(partition->start >= DT_PHYS_RAM_ADDR,
"region at %08lx[%u] extends below system ram start 0x%08x",
partition->start, partition->size, DT_PHYS_RAM_ADDR);
__ASSERT(((partition->start + partition->size) <=
(DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024U))),
"region at %08lx[%u] end at %08lx extends beyond system ram end 0x%08x",
partition->start, partition->size,
partition->start + partition->size,
(DT_PHYS_RAM_ADDR + (DT_RAM_SIZE * 1024U)));
z_x86_mmu_set_flags(pdpt, (void *)partition->start, partition->size,
x86_attr, mask, false);
}
void z_x86_apply_mem_domain(struct x86_mmu_pdpt *pdpt,
struct k_mem_domain *mem_domain)
{
for (int i = 0, pcount = 0; pcount < mem_domain->num_partitions; i++) {
struct k_mem_partition *partition;
partition = &mem_domain->partitions[i];
if (partition->size == 0) {
continue;
}
partitions_count++;
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
activate_partition(partition);
} else {
z_x86_reset_pages((void *)partition->start,
partition->size);
pcount++;
apply_mem_partition(pdpt, partition);
}
}
out:
return;
}
/* Load the partitions of the thread. */
void z_arch_mem_domain_thread_add(struct k_thread *thread)
{
if (_current != thread) {
return;
}
z_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_SET_PAGES);
}
/* Destroy or reset the mmu page tables when necessary.
* Needed when either swap takes place or k_mem_domain_destroy is called.
/* Called on creation of a user thread or when a supervisor thread drops to
* user mode.
*
* Sets up the per-thread page tables, such that when they are activated on
* context switch, everything is ready to go.
*/
void z_x86_thread_pt_init(struct k_thread *thread)
{
struct x86_mmu_pdpt *pdpt = z_x86_pdpt_get(thread);
/* USER_PDPT contains the page tables with the boot time memory
* policy. We use it as a template to set up the per-thread page
* tables.
*
* With KPTI, this is a distinct set of tables z_x86_user_pdpt from the
* kernel page tables in z_x86_kernel_pdpt; it has all non user
* accessible pages except the trampoline page marked as non-present.
* Without KPTI, they are the same object.
*/
copy_page_tables(thread, &USER_PDPT);
/* Enable access to the thread's own stack buffer */
z_x86_mmu_set_flags(pdpt, (void *)thread->stack_info.start,
ROUND_UP(thread->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_PRESENT | K_MEM_PARTITION_P_RW_U_RW,
MMU_PTE_P_MASK | K_MEM_PARTITION_PERM_MASK,
false);
}
/*
* Memory domain interface
*
* In all cases, if one of these APIs is called on a supervisor thread,
* we don't need to do anything. If the thread later drops into supervisor
* mode the per-thread page tables will be generated and the memory domain
* configuration applied.
*/
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
sys_dnode_t *node, *next_node;
/* Removing a partition. Need to reset the relevant memory range
* to the defaults in USER_PDPT for each thread.
*/
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
struct k_thread *thread =
CONTAINER_OF(node, struct k_thread, mem_domain_info);
if ((thread->base.user_options & K_USER) == 0) {
continue;
}
reset_mem_partition(z_x86_pdpt_get(thread),
&domain->partitions[partition_id]);
}
}
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
{
if (_current->mem_domain_info.mem_domain != domain) {
return;
}
for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) {
struct k_mem_partition *partition;
z_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
partition = &domain->partitions[i];
if (partition->size == 0) {
continue;
}
pcount++;
z_arch_mem_domain_partition_remove(domain, i);
}
}
void z_arch_mem_domain_thread_remove(struct k_thread *thread)
{
if (_current != thread) {
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
/* Non-user threads don't have per-thread page tables set up */
if ((thread->base.user_options & K_USER) == 0) {
return;
}
z_arch_mem_domain_destroy(thread->mem_domain_info.mem_domain);
}
/* Reset/destroy one partition specified in the argument of the API. */
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
for (int i = 0, pcount = 0; pcount < domain->num_partitions; i++) {
struct k_mem_partition *partition;
if (_current->mem_domain_info.mem_domain != domain) {
return;
partition = &domain->partitions[i];
if (partition->size == 0) {
continue;
}
pcount++;
__ASSERT_NO_MSG(domain != NULL);
__ASSERT(partition_id <= domain->num_partitions,
"invalid partitions");
partition = &domain->partitions[partition_id];
z_x86_reset_pages((void *)partition->start, partition->size);
reset_mem_partition(z_x86_pdpt_get(thread), partition);
}
}
/* Add one partition specified in the argument of the API. */
void z_arch_mem_domain_partition_add(struct k_mem_domain *domain,
u32_t partition_id)
{
struct k_mem_partition *partition;
sys_dnode_t *node, *next_node;
if (_current->mem_domain_info.mem_domain != domain) {
SYS_DLIST_FOR_EACH_NODE_SAFE(&domain->mem_domain_q, node, next_node) {
struct k_thread *thread =
CONTAINER_OF(node, struct k_thread, mem_domain_info);
if ((thread->base.user_options & K_USER) == 0) {
continue;
}
apply_mem_partition(z_x86_pdpt_get(thread),
&domain->partitions[partition_id]);
}
}
void z_arch_mem_domain_thread_add(struct k_thread *thread)
{
if ((thread->base.user_options & K_USER) == 0) {
return;
}
__ASSERT_NO_MSG(domain != NULL);
__ASSERT(partition_id <= domain->num_partitions,
"invalid partitions");
partition = &domain->partitions[partition_id];
activate_partition(partition);
z_x86_apply_mem_domain(z_x86_pdpt_get(thread),
thread->mem_domain_info.mem_domain);
}
int z_arch_mem_domain_max_partitions_get(void)

View file

@ -38,6 +38,10 @@ GEN_OFFSET_SYM(_thread_arch_t, excNestCount);
GEN_OFFSET_SYM(_thread_arch_t, preempFloatReg);
#ifdef CONFIG_USERSPACE
GEN_ABSOLUTE_SYM(Z_X86_PDPT_SIZE, sizeof(struct x86_mmu_pdpt));
#endif
/**
* size of the struct k_thread structure sans save area for floating
* point regs

View file

@ -49,7 +49,7 @@ static inline void kernel_arch_init(void)
#endif
#if CONFIG_X86_STACK_PROTECTION
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, _interrupt_stack, MMU_PAGE_SIZE,
MMU_ENTRY_READ, MMU_PTE_RW_MASK);
MMU_ENTRY_READ, MMU_PTE_RW_MASK, true);
#endif
}
@ -76,18 +76,24 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
extern void k_cpu_atomic_idle(unsigned int key);
#ifdef CONFIG_USERSPACE
extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
u32_t stack_end,
u32_t stack_start);
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
#define X86_MEM_DOMAIN_SET_PAGES (0U)
#define X86_MEM_DOMAIN_RESET_PAGES (1U)
void z_x86_thread_pt_init(struct k_thread *thread);
extern void z_x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
u32_t page_conf);
void z_x86_apply_mem_domain(struct x86_mmu_pdpt *pdpt,
struct k_mem_domain *mem_domain);
static inline struct x86_mmu_pdpt *z_x86_pdpt_get(struct k_thread *thread)
{
uintptr_t addr = thread->stack_info.start;
return (struct x86_mmu_pdpt *)(addr - sizeof(struct x86_mmu_pdpt));
}
#endif /* CONFIG_USERSPACE */
#include <stddef.h> /* For size_t */
#ifdef __cplusplus

View file

@ -35,6 +35,7 @@
#ifndef _ASMLANGUAGE
#include <stdint.h>
#include <ia32/mmustructs.h>
/*
* The following structure defines the set of 'non-volatile' integer registers.
@ -221,6 +222,24 @@ struct _thread_arch {
* struct without ill effect.
*/
tPreempFloatReg preempFloatReg; /* volatile float register storage */
#ifdef CONFIG_USERSPACE
/* Per-thread page directory pointer table when a thread is running
* in user mode.
*
* With KPTI enabled, any privilege elevation while that thread is
* running, or ISR will switch to the master kernel pdpt at
* z_x86_kernel_pdpt; the memory domain policy will not apply at
* all.
*
* With KPTI disabled, this pdpt will be active at all times when
* the thread is running. This has implications for memory domain
* partitions that are read-only!!
*
* See #17833 for more discussion.
*/
__aligned(0x20) struct x86_mmu_pdpt user_pdpt;
#endif /* CONFIG_USERSPACE */
};
typedef struct _thread_arch _thread_arch_t;

View file

@ -526,11 +526,6 @@ struct x86_mmu_pt {
*/
void z_x86_dump_page_tables(struct x86_mmu_pdpt *pdpt);
static inline void z_x86_page_tables_set(struct x86_mmu_pdpt *pdpt)
{
__asm__ volatile("movl %0, %%cr3\n\t" : : "r" (pdpt));
}
static inline struct x86_mmu_pdpt *z_x86_page_tables_get(void)
{
struct x86_mmu_pdpt *ret;

View file

@ -738,9 +738,18 @@ extern struct task_state_segment _main_tss;
#endif
#ifdef CONFIG_X86_MMU
/* kernel's page table */
/* Kernel's page table. Always active when threads are running in supervisor
* mode, or handling an interrupt.
*
* If KPTI is not enabled, this is used as a template to create per-thread
* page tables for when threads run in user mode.
*/
extern struct x86_mmu_pdpt z_x86_kernel_pdpt;
#ifdef CONFIG_X86_KPTI
/* Separate page tables for user mode threads. The top-level PDPT is never
* installed into the CPU; instead used as a template for creating per-thread
* page tables.
*/
extern struct x86_mmu_pdpt z_x86_user_pdpt;
#define USER_PDPT z_x86_user_pdpt
#else
@ -774,14 +783,16 @@ void z_x86_mmu_get_flags(struct x86_mmu_pdpt *pdpt, void *addr,
* @param flags Value of bits to set in the page table entries
* @param mask Mask indicating which particular bits in the page table entries to
* modify
* @param flush Whether to flush the TLB for the modified pages, only needed
* when modifying the active page tables
*/
void z_x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask);
void z_x86_reset_pages(void *start, size_t size);
x86_page_entry_data_t mask, bool flush);
int z_x86_mmu_validate(struct x86_mmu_pdpt *pdpt, void *addr, size_t size,
int write);
#endif /* CONFIG_X86_MMU */
#endif /* !_ASMLANGUAGE */

View file

@ -21,12 +21,11 @@ static int status;
#define BUFF_WRITEABLE ((u32_t) 0x1)
#define BUFF_USER ((u32_t) 0x2)
int z_arch_buffer_validate(void *addr, size_t size, int write);
void reset_flag(void);
void reset_multi_pte_page_flag(void);
void reset_multi_pde_flag(void);
#define PDPT &USER_PDPT
#define PDPT (&z_x86_kernel_pdpt)
#define ADDR_PAGE_1 ((u8_t *)__bss_start + SKIP_SIZE * MMU_PAGE_SIZE)
#define ADDR_PAGE_2 ((u8_t *)__bss_start + (SKIP_SIZE + 1) * MMU_PAGE_SIZE)
@ -37,12 +36,16 @@ void reset_multi_pde_flag(void);
static void set_flags(void *ptr, size_t size, x86_page_entry_data_t flags,
x86_page_entry_data_t mask)
{
z_x86_mmu_set_flags(PDPT, ptr, size, flags, mask);
z_x86_mmu_set_flags(PDPT, ptr, size, flags, mask, true);
}
static int buffer_validate(void *addr, size_t size, int write)
{
return z_x86_mmu_validate(PDPT, addr, size, write);
}
/* if Failure occurs
* z_arch_buffer_validate return -EPERM
* buffer_validate return -EPERM
* else return 0.
* Below conditions will be tested accordingly
*
@ -58,9 +61,7 @@ static int buffer_rw_read(void)
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_WRITEABLE);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
@ -79,9 +80,7 @@ static int buffer_writeable_write(void)
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_WRITEABLE);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -99,9 +98,7 @@ static int buffer_readable_read(void)
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_READABLE);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -119,10 +116,7 @@ static int buffer_readable_write(void)
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_READABLE);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -141,10 +135,8 @@ static int buffer_supervisor_rw(void)
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_READABLE | BUFF_USER);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_READABLE |
BUFF_USER);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -162,10 +154,7 @@ static int buffer_supervisor_w(void)
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_WRITEABLE);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -182,9 +171,8 @@ static int buffer_user_rw_user(void)
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_USER,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_WRITEABLE | BUFF_USER);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE |
BUFF_USER);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -201,9 +189,8 @@ static int buffer_user_rw_supervisor(void)
MMU_PAGE_SIZE,
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
BUFF_SIZE,
BUFF_WRITEABLE | BUFF_USER);
status = buffer_validate(ADDR_PAGE_1, BUFF_SIZE, BUFF_WRITEABLE |
BUFF_USER);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -228,7 +215,7 @@ static int multi_page_buffer_user(void)
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
status = buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
BUFF_WRITEABLE | BUFF_USER);
if (status != -EPERM) {
@ -254,8 +241,7 @@ static int multi_page_buffer_write_user(void)
MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_WRITEABLE);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
@ -280,9 +266,8 @@ static int multi_page_buffer_read_user(void)
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
BUFF_READABLE | BUFF_USER);
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE, BUFF_READABLE
| BUFF_USER);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -306,8 +291,7 @@ static int multi_page_buffer_read(void)
MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR,
MMU_PTE_RW_MASK | MMU_PTE_US_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_WRITEABLE);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
@ -332,10 +316,8 @@ static int multi_pde_buffer_rw(void)
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_WRITEABLE);
if (status != -EPERM) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -359,8 +341,7 @@ static int multi_pde_buffer_writeable_write(void)
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_WRITEABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
@ -385,8 +366,7 @@ static int multi_pde_buffer_readable_read(void)
MMU_ENTRY_READ,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_READABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
@ -411,10 +391,8 @@ static int multi_pde_buffer_readable_write(void)
MMU_ENTRY_WRITE,
MMU_PDE_RW_MASK);
status = z_arch_buffer_validate(ADDR_PAGE_1,
2 * MMU_PAGE_SIZE,
status = buffer_validate(ADDR_PAGE_1, 2 * MMU_PAGE_SIZE,
BUFF_READABLE);
if (status != 0) {
TC_PRINT("%s failed\n", __func__);
return TC_FAIL;
@ -462,7 +440,7 @@ void reset_multi_pde_flag(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_pde_buffer_readable_write(void)
{
@ -474,7 +452,7 @@ void test_multi_pde_buffer_readable_write(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_pde_buffer_readable_read(void)
{
@ -486,7 +464,7 @@ void test_multi_pde_buffer_readable_read(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_pde_buffer_writeable_write(void)
{
@ -498,7 +476,7 @@ void test_multi_pde_buffer_writeable_write(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_pde_buffer_rw(void)
{
@ -510,7 +488,7 @@ void test_multi_pde_buffer_rw(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_rw_read(void)
{
@ -522,7 +500,7 @@ void test_buffer_rw_read(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_writeable_write(void)
{
@ -534,7 +512,7 @@ void test_buffer_writeable_write(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_readable_read(void)
{
@ -546,7 +524,7 @@ void test_buffer_readable_read(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_readable_write(void)
{
@ -558,7 +536,7 @@ void test_buffer_readable_write(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_supervisor_rw(void)
{
@ -570,7 +548,7 @@ void test_buffer_supervisor_rw(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_supervisor_w(void)
{
@ -582,7 +560,7 @@ void test_buffer_supervisor_w(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_user_rw_user(void)
{
@ -594,7 +572,7 @@ void test_buffer_user_rw_user(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_buffer_user_rw_supervisor(void)
{
@ -606,7 +584,7 @@ void test_buffer_user_rw_supervisor(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_page_buffer_user(void)
{
@ -618,7 +596,7 @@ void test_multi_page_buffer_user(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_page_buffer_write_user(void)
{
@ -630,7 +608,7 @@ void test_multi_page_buffer_write_user(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_page_buffer_read_user(void)
{
@ -642,7 +620,7 @@ void test_multi_page_buffer_read_user(void)
*
* @ingroup kernel_memprotect_tests
*
* @see z_arch_buffer_validate(), z_x86_mmu_set_flags()
* @see z_x86_mmu_validate(), z_x86_mmu_set_flags()
*/
void test_multi_page_buffer_read(void)
{