x86: use a struct to specify stack layout
Makes the code that defines stacks, and code referencing areas within the stack object, much clearer. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
8014e075f4
commit
0add92523c
4 changed files with 61 additions and 46 deletions
|
@ -115,10 +115,11 @@ static FUNC_NORETURN void drop_to_user(k_thread_entry_t user_entry,
|
|||
FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)_current->stack_obj;
|
||||
|
||||
/* Set up the kernel stack used during privilege elevation */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
(void *)(_current->stack_info.start -
|
||||
MMU_PAGE_SIZE),
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->privilege_stack,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_WRITE, MMU_PTE_RW_MASK,
|
||||
true);
|
||||
|
||||
|
@ -197,6 +198,10 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
char *stack_buf;
|
||||
char *stack_high;
|
||||
struct _x86_initial_frame *initial_frame;
|
||||
#if defined(CONFIG_X86_USERSPACE) || defined(CONFIG_X86_STACK_PROTECTION)
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)stack;
|
||||
#endif
|
||||
|
||||
Z_ASSERT_VALID_PRIO(priority, entry);
|
||||
stack_buf = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
@ -207,15 +212,15 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
* If we're not starting in user mode, this functions as a guard
|
||||
* area.
|
||||
*/
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt,
|
||||
(void *)(stack_buf - MMU_PAGE_SIZE), MMU_PAGE_SIZE,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->privilege_stack,
|
||||
MMU_PAGE_SIZE,
|
||||
((options & K_USER) == 0U) ? MMU_ENTRY_READ : MMU_ENTRY_WRITE,
|
||||
MMU_PTE_RW_MASK, true);
|
||||
#endif /* CONFIG_X86_USERSPACE */
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
/* Set guard area to read-only to catch stack overflows */
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, stack + Z_X86_THREAD_PT_AREA,
|
||||
z_x86_mmu_set_flags(&z_x86_kernel_pdpt, &header->guard_page,
|
||||
MMU_PAGE_SIZE, MMU_ENTRY_READ, MMU_PTE_RW_MASK,
|
||||
true);
|
||||
#endif
|
||||
|
|
|
@ -418,18 +418,13 @@ static void copy_page_tables(struct k_thread *thread,
|
|||
{
|
||||
uintptr_t pos, start;
|
||||
struct x86_mmu_pdpt *thread_pdpt = z_x86_pdpt_get(thread);
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)thread->stack_obj;
|
||||
|
||||
__ASSERT(thread->stack_obj != NULL, "no stack object assigned");
|
||||
__ASSERT(z_x86_page_tables_get() != thread_pdpt, "PDPT is active");
|
||||
__ASSERT(((uintptr_t)thread_pdpt & 0x1f) == 0, "unaligned pdpt at %p",
|
||||
thread_pdpt);
|
||||
__ASSERT(((uintptr_t)thread_pdpt) == ((uintptr_t)thread->stack_obj +
|
||||
Z_ARCH_THREAD_STACK_RESERVED -
|
||||
sizeof(struct x86_mmu_pdpt)),
|
||||
"misplaced pdpt\n");
|
||||
__ASSERT(thread->stack_info.start == ((uintptr_t)thread->stack_obj +
|
||||
Z_ARCH_THREAD_STACK_RESERVED),
|
||||
"stack_info.start is wrong");
|
||||
|
||||
(void)memcpy(thread_pdpt, master_pdpt, sizeof(struct x86_mmu_pdpt));
|
||||
|
||||
|
@ -461,7 +456,7 @@ static void copy_page_tables(struct k_thread *thread,
|
|||
* | ... |
|
||||
*
|
||||
*/
|
||||
start = (uintptr_t)(thread->stack_obj);
|
||||
start = (uintptr_t)(&header->page_tables);
|
||||
pos = thread_pd_create(start, thread_pdpt, master_pdpt);
|
||||
pos = thread_pt_create(pos, thread_pdpt, master_pdpt);
|
||||
|
||||
|
|
|
@ -89,9 +89,10 @@ void z_x86_apply_mem_domain(struct x86_mmu_pdpt *pdpt,
|
|||
|
||||
static inline struct x86_mmu_pdpt *z_x86_pdpt_get(struct k_thread *thread)
|
||||
{
|
||||
uintptr_t addr = thread->stack_info.start;
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)thread->stack_obj;
|
||||
|
||||
return (struct x86_mmu_pdpt *)(addr - sizeof(struct x86_mmu_pdpt));
|
||||
return &header->kernel_data.pdpt;
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#include <stddef.h> /* For size_t */
|
||||
|
|
|
@ -638,7 +638,26 @@ extern struct task_state_segment _main_tss;
|
|||
|
||||
#define Z_X86_THREAD_PT_AREA (Z_X86_NUM_TABLE_PAGES * MMU_PAGE_SIZE)
|
||||
|
||||
#if defined(CONFIG_HW_STACK_PROTECTION) && defined(CONFIG_USERSPACE)
|
||||
#if defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
|
||||
#define Z_X86_STACK_BASE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_BASE_ALIGN STACK_ALIGN
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* If user mode enabled, expand any stack size to fill a page since that is
|
||||
* the access control granularity and we don't want other kernel data to
|
||||
* unintentionally fall in the latter part of the page
|
||||
*/
|
||||
#define Z_X86_STACK_SIZE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_SIZE_ALIGN 1
|
||||
#endif
|
||||
|
||||
struct z_x86_kernel_stack_data {
|
||||
struct x86_mmu_pdpt pdpt;
|
||||
} __aligned(0x20);
|
||||
|
||||
/* With both hardware stack protection and userspace enabled, stacks are
|
||||
* arranged as follows:
|
||||
*
|
||||
|
@ -671,53 +690,48 @@ extern struct task_state_segment _main_tss;
|
|||
* _main_tss.esp0 always points to the trampoline stack, which handles the
|
||||
* page table switch to the kernel PDPT and transplants context to the
|
||||
* privileged mode stack.
|
||||
*
|
||||
* TODO: The stack object layout is getting rather complex. We should define
|
||||
* its layout in a struct definition, rather than doing math in the kernel
|
||||
* code to find the parts we want or to obtain sizes.
|
||||
*/
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED (MMU_PAGE_SIZE * (2 + Z_X86_NUM_TABLE_PAGES))
|
||||
#define _STACK_BASE_ALIGN MMU_PAGE_SIZE
|
||||
#elif defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
|
||||
/* If only one of HW stack protection or userspace is enabled, then the
|
||||
* stack will be preceded by one page which is a guard page or a kernel mode
|
||||
* stack, respectively.
|
||||
*/
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED (MMU_PAGE_SIZE * (1 + Z_X86_NUM_TABLE_PAGES))
|
||||
#define _STACK_BASE_ALIGN MMU_PAGE_SIZE
|
||||
#else /* Neither feature */
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED 0
|
||||
#define _STACK_BASE_ALIGN STACK_ALIGN
|
||||
struct z_x86_thread_stack_header {
|
||||
#ifdef CONFIG_USERSPACE
|
||||
char page_tables[Z_X86_THREAD_PT_AREA];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HW_STACK_PROTECTION
|
||||
char guard_page[MMU_PAGE_SIZE];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* If user mode enabled, expand any stack size to fill a page since that is
|
||||
* the access control granularity and we don't want other kernel data to
|
||||
* unintentionally fall in the latter part of the page
|
||||
*/
|
||||
#define _STACK_SIZE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define _STACK_SIZE_ALIGN 1
|
||||
char privilege_stack[MMU_PAGE_SIZE -
|
||||
sizeof(struct z_x86_kernel_stack_data)];
|
||||
|
||||
struct z_x86_kernel_stack_data kernel_data;
|
||||
#endif
|
||||
} __packed __aligned(Z_X86_STACK_SIZE_ALIGN);
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED \
|
||||
((u32_t)sizeof(struct z_x86_thread_stack_header))
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + Z_ARCH_THREAD_STACK_RESERVED]
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) \
|
||||
(ROUND_UP((size), \
|
||||
MAX(_STACK_BASE_ALIGN, _STACK_SIZE_ALIGN)) + \
|
||||
MAX(Z_X86_STACK_BASE_ALIGN, \
|
||||
Z_X86_STACK_SIZE_ALIGN)) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(_STACK_BASE_ALIGN) \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), _STACK_SIZE_ALIGN) + Z_ARCH_THREAD_STACK_RESERVED]
|
||||
struct _k_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue