x86: implement userspace APIs

- _arch_user_mode_enter() implemented
- _arch_is_user_context() implemented
- _new_thread() will honor K_USER option if passed in
- System call triggering macros implemented
- _thread_entry_wrapper moved and now looks for the next function to
call in EDI

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-08-30 14:06:30 -07:00 committed by Andrew Boie
commit 424e993b41
11 changed files with 510 additions and 101 deletions

View file

@ -35,12 +35,14 @@ config CPU_ATOM
select CMOV
select CPU_HAS_FPU
select ARCH_HAS_STACK_PROTECTION if X86_MMU
select ARCH_HAS_USERSPACE if X86_MMU
help
This option signifies the use of a CPU from the Atom family.
config CPU_MINUTEIA
# Hidden
select ARCH_HAS_STACK_PROTECTION if X86_MMU
select ARCH_HAS_USERSPACE if X86_MMU
bool
help
This option signifies the use of a CPU from the Minute IA family.
@ -88,6 +90,15 @@ config X86_STACK_PROTECTION
bounds of the current process stack are overflowed. This is done
by preceding all stack areas with a 4K guard page.
config X86_USERSPACE
bool
default y if USERSPACE
select THREAD_STACK_INFO
help
This option enables APIs to drop a thread's privileges down to ring 3,
supporting user-level threads that are protected from each other and
from crashing the kernel.
menu "Floating Point Options"
depends on CPU_HAS_FPU

View file

@ -21,7 +21,7 @@ obj-$(CONFIG_IRQ_OFFLOAD) += irq_offload.o
obj-$(CONFIG_FP_SHARING) += float.o
obj-$(CONFIG_REBOOT_RST_CNT) += reboot_rst_cnt.o
obj-$(CONFIG_X86_MMU) += x86_mmu.o
obj-$(CONFIG_X86_USERSPACE) += userspace.o
obj-$(CONFIG_DEBUG_INFO) += debug/
obj-$(CONFIG_REBOOT_RST_CNT) += reboot_rst_cnt.o

View file

@ -13,6 +13,7 @@
#include <arch/x86/asm.h>
#include <kernel_arch_data.h>
#include <arch/cpu.h>
/* exports (private APIs) */

View file

@ -24,8 +24,13 @@
/* exports (internal APIs) */
GTEXT(__swap)
GTEXT(_x86_thread_entry_wrapper)
GTEXT(_x86_user_thread_entry_wrapper)
/* externs */
#ifdef CONFIG_X86_USERSPACE
GTEXT(_x86_swap_update_page_tables)
#endif
GDATA(_k_neg_eagain)
/**
@ -133,8 +138,8 @@ SECTION_FUNC(TEXT, __swap)
/* save esp into k_thread structure */
movl _kernel_offset_to_current(%edi), %ecx
movl %esp, _thread_offset_to_esp(%ecx)
movl _kernel_offset_to_current(%edi), %edx
movl %esp, _thread_offset_to_esp(%edx)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_CONTEXT_SWITCH
/* Register the context switch */
@ -144,8 +149,25 @@ SECTION_FUNC(TEXT, __swap)
/*
* At this point, the %eax register contains the 'k_thread *' of the
* thread to be swapped in, and %edi still contains &_kernel.
* thread to be swapped in, and %edi still contains &_kernel. %edx
* has the pointer to the outgoing thread.
*/
#ifdef CONFIG_X86_USERSPACE
#ifdef CONFIG_X86_IAMCU
push %eax
#else
push %edx
push %eax
#endif
call _x86_swap_update_page_tables
#ifdef CONFIG_X86_IAMCU
pop %eax
#else
pop %eax
pop %edx
#endif
#endif
#ifdef CONFIG_FP_SHARING
/*
@ -368,3 +390,81 @@ skipIntLatencyStop:
time_read_not_needed:
#endif
ret
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|| defined(CONFIG_X86_IAMCU)
/**
*
* @brief Adjust stack/parameters before invoking thread entry function
*
* This function adjusts the initial stack frame created by _new_thread() such
* that the GDB stack frame unwinders recognize it as the outermost frame in
* the thread's stack. For targets that use the IAMCU calling convention, the
* first three arguments are popped into eax, edx, and ecx. The function then
* jumps to _thread_entry().
*
* GDB normally stops unwinding a stack when it detects that it has
* reached a function called main(). Kernel tasks, however, do not have
* a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack.
*
* SYS V Systems:
*
* Given the initial thread created by _new_thread(), GDB expects to find a
* return address on the stack immediately above the thread entry routine
* _thread_entry, in the location occupied by the initial EFLAGS.
* GDB attempts to examine the memory at this return address, which typically
* results in an invalid access to page 0 of memory.
*
* This function overwrites the initial EFLAGS with zero. When GDB subsequently
* attempts to examine memory at address zero, the PeekPoke driver detects
* an invalid access to address zero and returns an error, which causes the
* GDB stack unwinder to stop somewhat gracefully.
*
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
* the new thread for the first time. This routine is called by _Swap() the
* first time that the new thread is swapped in, and it jumps to
* _thread_entry after it has done its work.
*
* IAMCU Systems:
*
* There is no EFLAGS on the stack when we get here. _thread_entry() takes
* four arguments, and we need to pop off the first three into the
* appropriate registers. Instead of using the 'call' instruction, we push
* a NULL return address onto the stack and jump into _thread_entry,
* ensuring the stack won't be unwound further. Placing some kind of return
* address on the stack is mandatory so this isn't conditionally compiled.
*
* __________________
* | param3 | <------ Top of the stack
* |__________________|
* | param2 | Stack Grows Down
* |__________________| |
* | param1 | V
* |__________________|
* | pEntry | <---- ESP when invoked by _Swap() on IAMCU
* |__________________|
* | initial EFLAGS | <---- ESP when invoked by _Swap() on Sys V
* |__________________| (Zeroed by this routine on Sys V)
*
* The address of the thread entry function needs to be in %edi when this is
* invoked. It will either be _thread_entry, or if userspace is enabled,
* _arch_drop_to_user_mode if this is a user thread.
*
* @return this routine does NOT return.
*/
SECTION_FUNC(TEXT, _x86_thread_entry_wrapper)
#ifdef CONFIG_X86_IAMCU
/* IAMCU calling convention has first 3 arguments supplied in
* registers not the stack
*/
pop %eax
pop %edx
pop %ecx
push $0 /* Null return address */
#elif defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
mov $0, (%esp) /* zero initialEFLAGS location */
#endif
jmp *%edi
#endif

View file

@ -21,90 +21,23 @@
#include <kernel_structs.h>
#include <wait_q.h>
#include <mmustructs.h>
#include <misc/printk.h>
/* forward declaration */
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|| defined(CONFIG_X86_IAMCU)
extern void _thread_entry_wrapper(k_thread_entry_t entry,
void *p1, void *p2, void *p3);
/**
*
* @brief Adjust stack/parameters before invoking _thread_entry
*
* This function adjusts the initial stack frame created by _new_thread() such
* that the GDB stack frame unwinders recognize it as the outermost frame in
* the thread's stack. For targets that use the IAMCU calling convention, the
* first three arguments are popped into eax, edx, and ecx. The function then
* jumps to _thread_entry().
*
* GDB normally stops unwinding a stack when it detects that it has
* reached a function called main(). Kernel tasks, however, do not have
* a main() function, and there does not appear to be a simple way of stopping
* the unwinding of the stack.
*
* SYS V Systems:
*
* Given the initial thread created by _new_thread(), GDB expects to find a
* return address on the stack immediately above the thread entry routine
* _thread_entry, in the location occupied by the initial EFLAGS.
* GDB attempts to examine the memory at this return address, which typically
* results in an invalid access to page 0 of memory.
*
* This function overwrites the initial EFLAGS with zero. When GDB subsequently
* attempts to examine memory at address zero, the PeekPoke driver detects
* an invalid access to address zero and returns an error, which causes the
* GDB stack unwinder to stop somewhat gracefully.
*
* The initial EFLAGS cannot be overwritten until after _Swap() has swapped in
* the new thread for the first time. This routine is called by _Swap() the
* first time that the new thread is swapped in, and it jumps to
* _thread_entry after it has done its work.
*
* IAMCU Systems:
*
* There is no EFLAGS on the stack when we get here. _thread_entry() takes
* four arguments, and we need to pop off the first three into the
* appropriate registers. Instead of using the 'call' instruction, we push
* a NULL return address onto the stack and jump into _thread_entry,
* ensuring the stack won't be unwound further. Placing some kind of return
* address on the stack is mandatory so this isn't conditionally compiled.
*
* __________________
* | param3 | <------ Top of the stack
* |__________________|
* | param2 | Stack Grows Down
* |__________________| |
* | param1 | V
* |__________________|
* | pEntry | <---- ESP when invoked by _Swap() on IAMCU
* |__________________|
* | initial EFLAGS | <---- ESP when invoked by _Swap() on Sys V
* |__________________| (Zeroed by this routine on Sys V)
*
*
*
* @return this routine does NOT return.
/* Some configurations require that the stack/registers be adjusted before
* _thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper()
*/
__asm__("\t.globl _thread_entry\n"
"\t.section .text\n"
"_thread_entry_wrapper:\n" /* should place this func .S file and use
* SECTION_FUNC
*/
#ifdef CONFIG_X86_IAMCU
/* IAMCU calling convention has first 3 arguments supplied in
* registers not the stack
*/
"\tpopl %eax\n"
"\tpopl %edx\n"
"\tpopl %ecx\n"
"\tpushl $0\n" /* Null return address */
#elif defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO)
"\tmovl $0, (%esp)\n" /* zero initialEFLAGS location */
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) || \
defined(CONFIG_X86_IAMCU)
#define WRAPPER_REQUIRED
#endif
"\tjmp _thread_entry\n");
#endif /* CONFIG_GDB_INFO || CONFIG_DEBUG_INFO) || CONFIG_X86_IAMCU */
#ifdef WRAPPER_REQUIRED
extern void _x86_thread_entry_wrapper(k_thread_entry_t entry,
void *p1, void *p2, void *p3);
#endif /* WRAPPER_REQUIRED */
/* Initial thread stack frame, such that everything is laid out as expected
* for when _Swap() switches to it for the first time.
@ -149,12 +82,23 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t stack,
struct _x86_initial_frame *initial_frame;
_ASSERT_VALID_PRIO(priority, entry);
stack_buf = K_THREAD_STACK_BUFFER(stack);
_new_thread_init(thread, stack_buf, stack_size, priority, options);
#if CONFIG_X86_USERSPACE
if (!(options & K_USER)) {
/* Running in kernel mode, kernel stack region is also a guard
* page */
_x86_mmu_set_flags((void *)(stack_buf - MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
MMU_PTE_P_MASK);
}
#endif /* CONFIG_X86_USERSPACE */
#if CONFIG_X86_STACK_PROTECTION
_x86_mmu_set_flags(stack, MMU_PAGE_SIZE, MMU_ENTRY_NOT_PRESENT,
MMU_PTE_P_MASK);
#endif
stack_buf = K_THREAD_STACK_BUFFER(stack);
_new_thread_init(thread, stack_buf, stack_size, priority, options);
stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
@ -168,13 +112,24 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t stack,
initial_frame->p3 = parameter3;
/* initial EFLAGS; only modify IF and IOPL bits */
initial_frame->eflags = (EflagsGet() & ~EFLAGS_MASK) | EFLAGS_INITIAL;
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|| defined(CONFIG_X86_IAMCU)
/* Adjust the stack before _thread_entry() is invoked */
initial_frame->_thread_entry = _thread_entry_wrapper;
#ifdef CONFIG_X86_USERSPACE
if (options & K_USER) {
#ifdef WRAPPER_REQUIRED
initial_frame->edi = (u32_t)_arch_user_mode_enter;
initial_frame->_thread_entry = _x86_thread_entry_wrapper;
#else
initial_frame->_thread_entry = _thread_entry;
initial_frame->_thread_entry = _arch_user_mode_enter;
#endif /* WRAPPER_REQUIRED */
} else
#endif /* CONFIG_X86_USERSPACE */
{
#ifdef WRAPPER_REQUIRED
initial_frame->edi = (u32_t)_thread_entry;
initial_frame->_thread_entry = _x86_thread_entry_wrapper;
#else
initial_frame->_thread_entry = _thread_entry;
#endif
}
/* Remaining _x86_initial_frame members can be garbage, _thread_entry()
* doesn't care about their state when execution begins
*/
@ -188,3 +143,64 @@ void _new_thread(struct k_thread *thread, k_thread_stack_t stack,
thread_monitor_init(thread);
#endif
}
#ifdef CONFIG_X86_USERSPACE
void _x86_swap_update_page_tables(struct k_thread *incoming,
struct k_thread *outgoing)
{
/* Outgoing thread stack no longer accessible */
_x86_mmu_set_flags((void *)outgoing->stack_info.start,
ROUND_UP(outgoing->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_SUPERVISOR, MMU_PTE_US_MASK);
/* Userspace can now access the incoming thread's stack */
_x86_mmu_set_flags((void *)incoming->stack_info.start,
ROUND_UP(incoming->stack_info.size, MMU_PAGE_SIZE),
MMU_ENTRY_USER, MMU_PTE_US_MASK);
/* In case of privilege elevation, use the incoming thread's kernel
* stack, the top of the thread stack is the bottom of the kernel stack
*/
_main_tss.esp0 = incoming->stack_info.start;
/* TODO: if either thread defines different memory domains, efficiently
* switch between them
*/
}
FUNC_NORETURN void _arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3)
{
u32_t stack_end;
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = STACK_ROUND_DOWN(_current->stack_info.start +
_current->stack_info.size);
/* Set up the kernel stack used during privilege elevation */
_x86_mmu_set_flags((void *)(_current->stack_info.start - MMU_PAGE_SIZE),
MMU_PAGE_SIZE,
(MMU_ENTRY_PRESENT | MMU_ENTRY_WRITE |
MMU_ENTRY_SUPERVISOR),
(MMU_PTE_P_MASK | MMU_PTE_RW_MASK |
MMU_PTE_US_MASK));
_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
_current->stack_info.start);
CODE_UNREACHABLE;
}
/* Implemented in userspace.S */
extern void _x86_syscall_entry_stub(void);
/* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that
* userspace can invoke it.
*/
NANO_CPU_INT_REGISTER(_x86_syscall_entry_stub, -1, -1, 0x80, 3);
#endif /* CONFIG_X86_USERSPACE */

148
arch/x86/core/userspace.S Normal file
View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2017 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel_structs.h>
#include <arch/x86/asm.h>
#include <arch/cpu.h>
#include <offsets_short.h>
/* Exports */
GTEXT(_x86_syscall_entry_stub)
GTEXT(_x86_userspace_enter)
/* Imports */
GTEXT(_k_syscall_entry)
/* Landing site for syscall SW IRQ. Marshal arguments and call C function for
* further processing.
*/
SECTION_FUNC(TEXT, _x86_syscall_entry_stub)
push %esi /* call_id */
push %edi /* arg5 */
push %ebx /* arg4 */
#ifndef CONFIG_X86_IAMCU
push %ecx /* arg3 */
push %edx /* arg2 */
push %eax /* arg1 */
#endif
call _k_syscall_entry
/* EAX now contains return value. Pop or xor everything else to prevent
* information leak from kernel mode.
*/
#ifndef CONFIG_X86_IAMCU
pop %edx /* old EAX value, discard it */
pop %edx
pop %ecx
#else
xor %edx, %edx
xor %ecx, %ecx
#endif
pop %ebx
pop %edi
pop %esi
iret
/* FUNC_NORETURN void _x86_userspace_enter(k_thread_entry_t user_entry,
* void *p1, void *p2, void *p3,
* u32_t stack_end,
* u32_t stack_start)
*
* A one-way trip to userspace.
*/
SECTION_FUNC(TEXT, _x86_userspace_enter)
pop %esi /* Discard return address on stack */
/* Fetch parameters on the stack */
#ifndef CONFIG_X86_IAMCU
pop %eax /* user_entry */
pop %edx /* p1 */
pop %ecx /* p2 */
#endif
pop %esi /* p3 */
pop %ebx /* stack_end (high address) */
pop %edi /* stack_start (low address) */
/* Move to the kernel stack for this thread, so we can erase the
* user stack. The kernel stack is the page immediately before
* the user stack.
*
* For security reasons, we must erase the entire user stack.
* We don't know what previous contexts it was used and do not
* want to leak any information.
*/
mov %edi, %esp
/* Stash some registers we are going to need to erase the user
* stack.
*/
push %ecx
push %edi
push %eax
/* Compute size of user stack and put in ECX */
mov %ebx, %ecx
sub %edi, %ecx
#ifdef CONFIG_INIT_STACKS
mov $0xAAAAAAAA, %eax
#else
xor %eax, %eax
#endif
/* Fill ECX bytes of memory, 4 bytes at a time, starting at ES:EDI,
* with whatever is in EAX. Stack sizes are always at least 4-byte
* aligned.
*/
cld
rep stosl
/* Restore registers */
pop %eax
pop %edi
pop %ecx
/* Now set stack pointer to the base of the user stack. Now that this
* is set we won't need EBX any more.
*/
mov %ebx, %esp
/* Set segment registers (except CS and SS which are done in
* a special way by 'iret' below)
*/
mov $USER_DATA_SEG, %bx
mov %bx, %ds
mov %bx, %es
/* Push arguments to _thread_entry() */
push %esi /* p3 */
#ifndef CONFIG_X86_IAMCU
push %ecx /* p2 */
push %edx /* p1 */
push %eax /* user_entry */
#endif
/* NULL return address */
push $0
/* Save stack pointer at this position, this is where it will be
* when we land in _thread_entry()
*/
mov %esp, %edi
/* Inter-privilege 'iret' pops all of these. Need to fake an interrupt
* return to enter user mode as far calls cannot change privilege
* level
*/
push $USER_DATA_SEG /* SS */
push %edi /* ESP */
pushfl /* EFLAGS */
push $USER_CODE_SEG /* CS */
push $_thread_entry /* EIP */
/* We will land in _thread_entry() in user mode after this */
iret

View file

@ -40,12 +40,6 @@
#endif
/* GDT layout */
#define CODE_SEG 0x08
#define DATA_SEG 0x10
#define MAIN_TSS 0x18
#define DF_TSS 0x20
/* increase to 16 bytes (or more?) to support SSE/SSE2 instructions? */
#define STACK_ALIGN_SIZE 4

View file

@ -82,6 +82,11 @@ static inline void _IntLibInit(void)
/* the _idt_base_address symbol is generated via a linker script */
extern unsigned char _idt_base_address[];
extern FUNC_NORETURN void _x86_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
u32_t stack_end,
u32_t stack_start);
#include <stddef.h> /* For size_t */
#ifdef __cplusplus

View file

@ -23,6 +23,7 @@
#ifndef _ASMLANGUAGE
#include <arch/x86/asm_inline.h>
#include <arch/x86/addr_types.h>
#include <arch/x86/segmentation.h>
#endif
#ifdef __cplusplus
@ -34,6 +35,14 @@ extern "C" {
#define OCTET_TO_SIZEOFUNIT(X) (X)
#define SIZEOFUNIT_TO_OCTET(X) (X)
/* GDT layout */
#define CODE_SEG 0x08
#define DATA_SEG 0x10
#define MAIN_TSS 0x18
#define DF_TSS 0x20
#define USER_CODE_SEG 0x2b /* at dpl=3 */
#define USER_DATA_SEG 0x33 /* at dpl=3 */
/**
* Macro used internally by NANO_CPU_INT_REGISTER and NANO_CPU_INT_REGISTER_ASM.
* Not meant to be used explicitly by platform, driver or application code.
@ -530,12 +539,119 @@ extern FUNC_NORETURN void _SysFatalErrorHandler(unsigned int reason,
#ifdef CONFIG_X86_STACK_PROTECTION
extern struct task_state_segment _main_tss;
#ifdef CONFIG_X86_USERSPACE
/* Syscall invocation macros. x86-specific machine constraints used to ensure
* args land in the proper registers, see implementation of
* _x86_syscall_entry_stub in userspace.S
*/
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4), "D" (arg5));
return ret;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2),
"c" (arg3), "b" (arg4));
return ret;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3));
return ret;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1), "d" (arg2));
return ret;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id), "a" (arg1));
return ret;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
u32_t ret;
__asm__ volatile("int $0x80"
: "=a" (ret)
: "S" (call_id));
return ret;
}
static inline int _arch_is_user_context(void)
{
int cs;
/* On x86, read the CS register (which cannot be manually set) */
__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
return cs == USER_CODE_SEG;
}
/* With userspace enabled, stacks are arranged as follows:
*
* High memory addresses
* +---------------+
* | Thread stack |
* +---------------+
* | Kernel stack |
* +---------------+
* | Guard page |
* +---------------+
* Low Memory addresses
*
* Kernel stacks are fixed at 4K. All the pages containing the thread stack
* are marked as user-accessible.
* All threads start in supervisor mode, and the kernel stack/guard page
* are both marked non-present in the MMU.
* If a thread drops down to user mode, the kernel stack page will be marked
* as present, supervior-only, and the _main_tss.esp0 field updated to point
* to the top of it.
* All context switches will save/restore the esp0 field in the TSS.
*/
#define _STACK_GUARD_SIZE (MMU_PAGE_SIZE * 2)
#else /* !CONFIG_X86_USERSPACE */
#define _STACK_GUARD_SIZE MMU_PAGE_SIZE
#endif /* CONFIG_X86_USERSPACE */
#define _STACK_BASE_ALIGN MMU_PAGE_SIZE
#else
#else /* !CONFIG_X86_STACK_PROTECTION */
#define _STACK_GUARD_SIZE 0
#define _STACK_BASE_ALIGN STACK_ALIGN
#endif
#endif /* CONFIG_X86_STACK_PROTECTION */
#define _ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit \

View file

@ -242,9 +242,13 @@ SECTIONS
#else /* LINKER_PASS2 */
#ifdef CONFIG_X86_STACK_PROTECTION
#define GDT_NUM_ENTRIES 5
#ifdef CONFIG_X86_USERSPACE
#define GDT_NUM_ENTRIES 7
#else
#define GDT_NUM_ENTRIES 5
#endif /* CONFIG_X86_USERSPACE */
#else /* CONFIG_X86_STACK_PROTECTION */
#define GDT_NUM_ENTRIES 3
#define GDT_NUM_ENTRIES 3
#endif /* CONFIG_X86_STACK_PROTECTION */
. += GDT_NUM_ENTRIES * 8;

View file

@ -135,7 +135,12 @@ def main():
if "CONFIG_X86_STACK_PROTECTION" in syms:
stackprot = True
num_entries = 5
if "CONFIG_X86_USERSPACE" in syms:
userspace = True
num_entries = 7
else:
userspace = False
num_entries = 5
else:
stackprot = False
num_entries = 3
@ -165,6 +170,15 @@ def main():
# Selector 0x20: double-fault TSS
fp.write(create_tss_entry(df_tss, 0x67, 0))
if userspace:
# Selector 0x28: code descriptor, dpl = 3
fp.write(create_code_data_entry(0, 0xFFFFF, 3,
FLAGS_GRAN, ACCESS_EX | ACCESS_RW))
# Selector 0x30: data descriptor, dpl = 3
fp.write(create_code_data_entry(0, 0xFFFFF, 3,
FLAGS_GRAN, ACCESS_RW))
if __name__ == "__main__":
main()