x86: use a struct to specify stack layout

Makes the code that defines stacks, and code referencing
areas within the stack object, much clearer.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-08-01 15:06:40 -07:00 committed by Carles Cufí
commit 0add92523c
4 changed files with 61 additions and 46 deletions

View file

@ -115,10 +115,11 @@ static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry,
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry, FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)_current->stack_obj;
/* Set up the kernel stack used during privilege elevation */ /* Set up the kernel stack used during privilege elevation */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->privilege_stack,
(void *)(_current->stack_info.start -
MMU_PAGE_SIZE),
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK, MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK,
true); true);
@ -197,6 +198,10 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_buf; char *stack_buf;
char *stack_high; char *stack_high;
struct _x86_initial_frame *initial_frame; struct _x86_initial_frame *initial_frame;
#if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION)
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)stack;
#endif
Z_ASSERT_VALID_PRIO(priority, entry); Z_ASSERT_VALID_PRIO(priority, entry);
stack_buf = Z_THREAD_STACK_BUFFER(stack); stack_buf = Z_THREAD_STACK_BUFFER(stack);
@ -207,15 +212,15 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* If we're not starting in user mode, this functions as a guard * If we're not starting in user mode, this functions as a guard
* area. * area.
*/ */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->privilege_stack,
(void *)(stack_buf - MMU_PAGE_SIZE), MMU_PAGE_SIZE, MMU_PAGE_SIZE,
((options & K_USER) == 0U) ? MMU_ENTRY_READ : MMU_ENTRY_WRITE, ((options & K_USER) == 0U) ? MMU_ENTRY_READ : MMU_ENTRY_WRITE,
MMU_PTE_RW_MASK, true); MMU_PTE_RW_MASK, true);
#endif /* CONFIG_X86_USERSPACE */ #endif /* CONFIG_X86_USERSPACE */
#if CONFIG_X86_STACK_PROTECTION #if CONFIG_X86_STACK_PROTECTION
/* Set guard area to read-only to catch stack overflows */ /* Set guard area to read-only to catch stack overflows */
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack + Z_X86_THREAD_PT_AREA, z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->guard_page,
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK, MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK,
true); true);
#endif #endif

View file

@ -418,18 +418,13 @@ static void copy_page_tables(struct k_thread *thread,
{ {
uintptr_t pos, start; uintptr_t pos, start;
struct x86_mmu_pdpt *thread_pdpt = z_x86_pdpt_get(thread); struct x86_mmu_pdpt *thread_pdpt = z_x86_pdpt_get(thread);
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)thread->stack_obj;
__ASSERT(thread->stack_obj != NULL, "no stack object assigned"); __ASSERT(thread->stack_obj != NULL, "no stack object assigned");
__ASSERT(z_x86_page_tables_get() != thread_pdpt, "PDPT is active"); __ASSERT(z_x86_page_tables_get() != thread_pdpt, "PDPT is active");
__ASSERT(((uintptr_t)thread_pdpt & 0x1f) == 0, "unaligned pdpt at %p", __ASSERT(((uintptr_t)thread_pdpt & 0x1f) == 0, "unaligned pdpt at %p",
thread_pdpt); thread_pdpt);
__ASSERT(((uintptr_t)thread_pdpt) == ((uintptr_t)thread->stack_obj +
Z_ARCH_THREAD_STACK_RESERVED -
sizeof(struct x86_mmu_pdpt)),
"misplaced pdpt\n");
__ASSERT(thread->stack_info.start == ((uintptr_t)thread->stack_obj +
Z_ARCH_THREAD_STACK_RESERVED),
"stack_info.start is wrong");
(void)memcpy(thread_pdpt, master_pdpt, sizeof(struct x86_mmu_pdpt)); (void)memcpy(thread_pdpt, master_pdpt, sizeof(struct x86_mmu_pdpt));
@ -461,7 +456,7 @@ static void copy_page_tables(struct k_thread *thread,
* | ... | * | ... |
* *
*/ */
start = (uintptr_t)(thread->stack_obj); start = (uintptr_t)(&header->page_tables);
pos = thread_pd_create(start, thread_pdpt, master_pdpt); pos = thread_pd_create(start, thread_pdpt, master_pdpt);
pos = thread_pt_create(pos, thread_pdpt, master_pdpt); pos = thread_pt_create(pos, thread_pdpt, master_pdpt);

View file

@ -89,9 +89,10 @@ void z_x86_apply_mem_domain(struct x86_mmu_pdpt *pdpt,
static inline struct x86_mmu_pdpt *z_x86_pdpt_get(struct k_thread *thread) static inline struct x86_mmu_pdpt *z_x86_pdpt_get(struct k_thread *thread)
{ {
uintptr_t addr = thread->stack_info.start; struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)thread->stack_obj;
return (struct x86_mmu_pdpt *)(addr - sizeof(struct x86_mmu_pdpt)); return &header->kernel_data.pdpt;
} }
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
#include <stddef.h> /* For size_t */ #include <stddef.h> /* For size_t */

View file

@ -638,7 +638,26 @@ extern struct task_state_segment _main_tss;
#define Z_X86_THREAD_PT_AREA (Z_X86_NUM_TABLE_PAGES * MMU_PAGE_SIZE) #define Z_X86_THREAD_PT_AREA (Z_X86_NUM_TABLE_PAGES * MMU_PAGE_SIZE)
#if defined(CONFIG_HW_STACK_PROTECTION) && defined(CONFIG_USERSPACE) #if defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
#define Z_X86_STACK_BASE_ALIGN MMU_PAGE_SIZE
#else
#define Z_X86_STACK_BASE_ALIGN STACK_ALIGN
#endif
#ifdef CONFIG_USERSPACE
/* If user mode enabled, expand any stack size to fill a page since that is
* the access control granularity and we don't want other kernel data to
* unintentionally fall in the latter part of the page
*/
#define Z_X86_STACK_SIZE_ALIGN MMU_PAGE_SIZE
#else
#define Z_X86_STACK_SIZE_ALIGN 1
#endif
struct z_x86_kernel_stack_data {
struct x86_mmu_pdpt pdpt;
} __aligned(0x20);
/* With both hardware stack protection and userspace enabled, stacks are /* With both hardware stack protection and userspace enabled, stacks are
* arranged as follows: * arranged as follows:
* *
@ -671,53 +690,48 @@ extern struct task_state_segment _main_tss;
* _main_tss.esp0 always points to the trampoline stack, which handles the * _main_tss.esp0 always points to the trampoline stack, which handles the
* page table switch to the kernel PDPT and transplants context to the * page table switch to the kernel PDPT and transplants context to the
* privileged mode stack. * privileged mode stack.
*
* TODO: The stack object layout is getting rather complex. We should define
* its layout in a struct definition, rather than doing math in the kernel
* code to find the parts we want or to obtain sizes.
*/ */
#define Z_ARCH_THREAD_STACK_RESERVED (MMU_PAGE_SIZE * (2 + Z_X86_NUM_TABLE_PAGES)) struct z_x86_thread_stack_header {
#define _STACK_BASE_ALIGN MMU_PAGE_SIZE #ifdef CONFIG_USERSPACE
#elif defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE) char page_tables[Z_X86_THREAD_PT_AREA];
/* If only one of HW stack protection or userspace is enabled, then the #endif
* stack will be preceded by one page which is a guard page or a kernel mode
* stack, respectively. #ifdef CONFIG_HW_STACK_PROTECTION
*/ char guard_page[MMU_PAGE_SIZE];
#define Z_ARCH_THREAD_STACK_RESERVED (MMU_PAGE_SIZE * (1 + Z_X86_NUM_TABLE_PAGES))
#define _STACK_BASE_ALIGN MMU_PAGE_SIZE
#else /* Neither feature */
#define Z_ARCH_THREAD_STACK_RESERVED 0
#define _STACK_BASE_ALIGN STACK_ALIGN
#endif #endif
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* If user mode enabled, expand any stack size to fill a page since that is char privilege_stack[MMU_PAGE_SIZE -
* the access control granularity and we don't want other kernel data to sizeof(struct z_x86_kernel_stack_data)];
* unintentionally fall in the latter part of the page
*/ struct z_x86_kernel_stack_data kernel_data;
#define _STACK_SIZE_ALIGN MMU_PAGE_SIZE
#else
#define _STACK_SIZE_ALIGN 1
#endif #endif
} __packed __aligned(Z_X86_STACK_SIZE_ALIGN);
#define Z_ARCH_THREAD_STACK_RESERVED \
((u32_t)sizeof(struct z_x86_thread_stack_header))
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ #define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(_STACK_BASE_ALIGN) \ __aligned(Z_X86_STACK_BASE_ALIGN) \
sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + Z_ARCH_THREAD_STACK_RESERVED] sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
Z_ARCH_THREAD_STACK_RESERVED]
#define Z_ARCH_THREAD_STACK_LEN(size) \ #define Z_ARCH_THREAD_STACK_LEN(size) \
(ROUND_UP((size), \ (ROUND_UP((size), \
MAX(_STACK_BASE_ALIGN, _STACK_SIZE_ALIGN)) + \ MAX(Z_X86_STACK_BASE_ALIGN, \
Z_X86_STACK_SIZE_ALIGN)) + \
Z_ARCH_THREAD_STACK_RESERVED) Z_ARCH_THREAD_STACK_RESERVED)
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \ #define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct _k_thread_stack_element __noinit \ struct _k_thread_stack_element __noinit \
__aligned(_STACK_BASE_ALIGN) \ __aligned(Z_X86_STACK_BASE_ALIGN) \
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)] sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \ #define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
struct _k_thread_stack_element __aligned(_STACK_BASE_ALIGN) \ struct _k_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \
sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + Z_ARCH_THREAD_STACK_RESERVED] sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
Z_ARCH_THREAD_STACK_RESERVED]
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \ #define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED) (sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED)