x86: unify stack macro definitions
Introduce a new header thread_stack.h which does the right thing for both 32-bit and 64-bit x86. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
10a1b25da6
commit
a9e0d14c57
4 changed files with 232 additions and 203 deletions
|
@ -16,6 +16,7 @@
|
|||
#include <stdbool.h>
|
||||
#include <irq.h>
|
||||
#include <arch/x86/mmustructs.h>
|
||||
#include <arch/x86/thread_stack.h>
|
||||
|
||||
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
|
||||
{
|
||||
|
|
|
@ -54,18 +54,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
/* interrupt/exception/error related definitions */
|
||||
|
||||
/*
|
||||
* The TCS must be aligned to the same boundary as that used by the floating
|
||||
* point register set. This applies even for threads that don't initially
|
||||
* use floating point, since it is possible to enable floating point support
|
||||
* later on.
|
||||
*/
|
||||
|
||||
#define STACK_ALIGN FP_REG_SET_ALIGN
|
||||
|
||||
typedef struct s_isrList {
|
||||
/** Address of ISR/stub */
|
||||
void *fnc;
|
||||
|
@ -353,176 +343,6 @@ extern void k_float_enable(struct k_thread *thread, unsigned int options);
|
|||
extern struct task_state_segment _main_tss;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* We need a set of page tables for each thread in the system which runs in
|
||||
* user mode. For each thread, we have:
|
||||
*
|
||||
* - a toplevel PDPT
|
||||
* - a set of page directories for the memory range covered by system RAM
|
||||
* - a set of page tbales for the memory range covered by system RAM
|
||||
*
|
||||
* Directories and tables for memory ranges outside of system RAM will be
|
||||
* shared and not thread-specific.
|
||||
*
|
||||
* NOTE: We are operating under the assumption that memory domain partitions
|
||||
* will not be configured which grant permission to address ranges outside
|
||||
* of system RAM.
|
||||
*
|
||||
* Each of these page tables will be programmed to reflect the memory
|
||||
* permission policy for that thread, which will be the union of:
|
||||
*
|
||||
* - The boot time memory regions (text, rodata, and so forth)
|
||||
* - The thread's stack buffer
|
||||
* - Partitions in the memory domain configuration (if a member of a
|
||||
* memory domain)
|
||||
*
|
||||
* The PDPT is fairly small singleton on x86 PAE (32 bytes) and also must
|
||||
* be aligned to 32 bytes, so we place it at the highest addresses of the
|
||||
* page reserved for the privilege elevation stack.
|
||||
*
|
||||
* The page directories and tables require page alignment so we put them as
|
||||
* additional fields in the stack object, using the below macros to compute how
|
||||
* many pages we need.
|
||||
*/
|
||||
|
||||
/* Define a range [Z_X86_PT_START, Z_X86_PT_END) which is the memory range
|
||||
* covered by all the page tables needed for system RAM
|
||||
*/
|
||||
#define Z_X86_PT_START ((u32_t)ROUND_DOWN(DT_PHYS_RAM_ADDR, Z_X86_PT_AREA))
|
||||
#define Z_X86_PT_END ((u32_t)ROUND_UP(DT_PHYS_RAM_ADDR + \
|
||||
(DT_RAM_SIZE * 1024U), \
|
||||
Z_X86_PT_AREA))
|
||||
|
||||
/* Number of page tables needed to cover system RAM. Depends on the specific
|
||||
* bounds of system RAM, but roughly 1 page table per 2MB of RAM */
|
||||
#define Z_X86_NUM_PT ((Z_X86_PT_END - Z_X86_PT_START) / Z_X86_PT_AREA)
|
||||
|
||||
/* Same semantics as above, but for the page directories needed to cover
|
||||
* system RAM.
|
||||
*/
|
||||
#define Z_X86_PD_START ((u32_t)ROUND_DOWN(DT_PHYS_RAM_ADDR, Z_X86_PD_AREA))
|
||||
#define Z_X86_PD_END ((u32_t)ROUND_UP(DT_PHYS_RAM_ADDR + \
|
||||
(DT_RAM_SIZE * 1024U), \
|
||||
Z_X86_PD_AREA))
|
||||
/* Number of page directories needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 page directory per 1GB of RAM */
|
||||
#define Z_X86_NUM_PD ((Z_X86_PD_END - Z_X86_PD_START) / Z_X86_PD_AREA)
|
||||
|
||||
/* Number of pages we need to reserve in the stack for per-thread page tables */
|
||||
#define Z_X86_NUM_TABLE_PAGES (Z_X86_NUM_PT + Z_X86_NUM_PD)
|
||||
#else
|
||||
/* If we're not implementing user mode, then the MMU tables don't get changed
|
||||
* on context switch and we don't need any per-thread page tables
|
||||
*/
|
||||
#define Z_X86_NUM_TABLE_PAGES 0U
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#define Z_X86_THREAD_PT_AREA (Z_X86_NUM_TABLE_PAGES * MMU_PAGE_SIZE)
|
||||
|
||||
#if defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
|
||||
#define Z_X86_STACK_BASE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_BASE_ALIGN STACK_ALIGN
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* If user mode enabled, expand any stack size to fill a page since that is
|
||||
* the access control granularity and we don't want other kernel data to
|
||||
* unintentionally fall in the latter part of the page
|
||||
*/
|
||||
#define Z_X86_STACK_SIZE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_SIZE_ALIGN 1
|
||||
#endif
|
||||
|
||||
struct z_x86_kernel_stack_data {
|
||||
/* For 32-bit, a single four-entry page directory pointer table, that
|
||||
* needs to be aligned to 32 bytes.
|
||||
*/
|
||||
struct x86_page_tables ptables;
|
||||
} __aligned(0x20);
|
||||
|
||||
/* With both hardware stack protection and userspace enabled, stacks are
|
||||
* arranged as follows:
|
||||
*
|
||||
* High memory addresses
|
||||
* +-----------------------------------------+
|
||||
* | Thread stack (varies) |
|
||||
* +-----------------------------------------+
|
||||
* | PDPT (32 bytes) |
|
||||
* | Privilege elevation stack (4064 bytes) |
|
||||
* +-----------------------------------------+
|
||||
* | Guard page (4096 bytes) |
|
||||
* +-----------------------------------------+
|
||||
* | User page tables (Z_X86_THREAD_PT_AREA) |
|
||||
* +-----------------------------------------+
|
||||
* Low Memory addresses
|
||||
*
|
||||
* Privilege elevation stacks are fixed-size. All the pages containing the
|
||||
* thread stack are marked as user-accessible. The guard page is marked
|
||||
* read-only to catch stack overflows in supervisor mode.
|
||||
*
|
||||
* If a thread starts in supervisor mode, the page containing the PDPT and
|
||||
* privilege elevation stack is also marked read-only.
|
||||
*
|
||||
* If a thread starts in, or drops down to user mode, the privilege stack page
|
||||
* will be marked as present, supervior-only. The PDPT will be initialized and
|
||||
* used as the active page tables when that thread is active.
|
||||
*
|
||||
* If KPTI is not enabled, the _main_tss.esp0 field will always be updated
|
||||
* updated to point to the top of the privilege elevation stack. Otherwise
|
||||
* _main_tss.esp0 always points to the trampoline stack, which handles the
|
||||
* page table switch to the kernel PDPT and transplants context to the
|
||||
* privileged mode stack.
|
||||
*/
|
||||
struct z_x86_thread_stack_header {
|
||||
#ifdef CONFIG_USERSPACE
|
||||
char page_tables[Z_X86_THREAD_PT_AREA];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HW_STACK_PROTECTION
|
||||
char guard_page[MMU_PAGE_SIZE];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
char privilege_stack[MMU_PAGE_SIZE -
|
||||
sizeof(struct z_x86_kernel_stack_data)];
|
||||
|
||||
struct z_x86_kernel_stack_data kernel_data;
|
||||
#endif
|
||||
} __packed __aligned(Z_X86_STACK_SIZE_ALIGN);
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED \
|
||||
((u32_t)sizeof(struct z_x86_thread_stack_header))
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) \
|
||||
(ROUND_UP((size), \
|
||||
MAX(Z_X86_STACK_BASE_ALIGN, \
|
||||
Z_X86_STACK_SIZE_ALIGN)) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)((sym) + Z_ARCH_THREAD_STACK_RESERVED))
|
||||
|
||||
#if CONFIG_X86_KERNEL_OOPS
|
||||
#define Z_ARCH_EXCEPT(reason_p) do { \
|
||||
__asm__ volatile( \
|
||||
|
|
|
@ -7,9 +7,7 @@
|
|||
#define ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_
|
||||
|
||||
#include <arch/x86/intel64/thread.h>
|
||||
|
||||
#define STACK_ALIGN 16
|
||||
#define STACK_SIZE_ALIGN 16
|
||||
#include <arch/x86/thread_stack.h>
|
||||
|
||||
#if CONFIG_ISR_STACK_SIZE != (CONFIG_ISR_SUBSTACK_SIZE * CONFIG_ISR_DEPTH)
|
||||
#error "Check ISR stack configuration (CONFIG_ISR_*)"
|
||||
|
@ -20,26 +18,6 @@
|
|||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) (ROUND_UP((size), STACK_SIZE_ALIGN))
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) sizeof(sym)
|
||||
#define Z_ARCH_THREAD_STACK_BUFFER(sym) ((char *) sym)
|
||||
|
||||
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
|
||||
{
|
||||
unsigned long key;
|
||||
|
|
230
include/arch/x86/thread_stack.h
Normal file
230
include/arch/x86/thread_stack.h
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright (c) 2019 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
#ifndef ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H
|
||||
#define ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H
|
||||
|
||||
#include <arch/x86/mmustructs.h>
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define STACK_ALIGN 16UL
|
||||
#else
|
||||
#define STACK_ALIGN 4UL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* We need a set of page tables for each thread in the system which runs in
|
||||
* user mode. For each thread, we have:
|
||||
*
|
||||
* - On 32-bit
|
||||
* - a toplevel PDPT
|
||||
* - On 64-bit
|
||||
* - a toplevel PML4
|
||||
* - a set of PDPTs for the memory range covered by system RAM
|
||||
* - On all modes:
|
||||
* - a set of page directories for the memory range covered by system RAM
|
||||
* - a set of page tbales for the memory range covered by system RAM
|
||||
*
|
||||
* Directories and tables for memory ranges outside of system RAM will be
|
||||
* shared and not thread-specific.
|
||||
*
|
||||
* NOTE: We are operating under the assumption that memory domain partitions
|
||||
* will not be configured which grant permission to address ranges outside
|
||||
* of system RAM.
|
||||
*
|
||||
* Each of these page tables will be programmed to reflect the memory
|
||||
* permission policy for that thread, which will be the union of:
|
||||
*
|
||||
* - The boot time memory regions (text, rodata, and so forth)
|
||||
* - The thread's stack buffer
|
||||
* - Partitions in the memory domain configuration (if a member of a
|
||||
* memory domain)
|
||||
*
|
||||
* The PDPT is fairly small singleton on x86 PAE (32 bytes) and also must
|
||||
* be aligned to 32 bytes, so we place it at the highest addresses of the
|
||||
* page reserved for the privilege elevation stack. On 64-bit all table
|
||||
* entities up to and including the PML4 are page-sized.
|
||||
*
|
||||
* The page directories and tables require page alignment so we put them as
|
||||
* additional fields in the stack object, using the below macros to compute how
|
||||
* many pages we need.
|
||||
*/
|
||||
|
||||
/* Define a range [Z_X86_PT_START, Z_X86_PT_END) which is the memory range
|
||||
* covered by all the page tables needed for system RAM
|
||||
*/
|
||||
#define Z_X86_PT_START ((u32_t)ROUND_DOWN(DT_PHYS_RAM_ADDR, Z_X86_PT_AREA))
|
||||
#define Z_X86_PT_END ((u32_t)ROUND_UP(DT_PHYS_RAM_ADDR + \
|
||||
(DT_RAM_SIZE * 1024UL), \
|
||||
Z_X86_PT_AREA))
|
||||
|
||||
/* Number of page tables needed to cover system RAM. Depends on the specific
|
||||
* bounds of system RAM, but roughly 1 page table per 2MB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PT ((Z_X86_PT_END - Z_X86_PT_START) / Z_X86_PT_AREA)
|
||||
|
||||
/* Same semantics as above, but for the page directories needed to cover
|
||||
* system RAM.
|
||||
*/
|
||||
#define Z_X86_PD_START ((u32_t)ROUND_DOWN(DT_PHYS_RAM_ADDR, Z_X86_PD_AREA))
|
||||
#define Z_X86_PD_END ((u32_t)ROUND_UP(DT_PHYS_RAM_ADDR + \
|
||||
(DT_RAM_SIZE * 1024UL), \
|
||||
Z_X86_PD_AREA))
|
||||
/* Number of page directories needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 page directory per 1GB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PD ((Z_X86_PD_END - Z_X86_PD_START) / Z_X86_PD_AREA)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Same semantics as above, but for the page directory pointer tables needed
|
||||
* to cover system RAM. On 32-bit there is just one 4-entry PDPT.
|
||||
*/
|
||||
#define Z_X86_PDPT_START ((u32_t)ROUND_DOWN(DT_PHYS_RAM_ADDR, \
|
||||
Z_X86_PD_AREA))
|
||||
#define Z_X86_PDPT_END ((u32_t)ROUND_UP(DT_PHYS_RAM_ADDR + \
|
||||
(DT_RAM_SIZE * 1024UL), \
|
||||
Z_X86_PDPT_AREA))
|
||||
/* Number of PDPTs needed to cover system RAM. Depends on the
|
||||
* specific bounds of system RAM, but roughly 1 PDPT per 512GB of RAM
|
||||
*/
|
||||
#define Z_X86_NUM_PDPT ((Z_X86_PDPT_END - Z_X86_PDPT_START) / Z_X86_PDPT_AREA)
|
||||
|
||||
/* All pages needed for page tables, using computed values plus one more for
|
||||
* the top-level PML4
|
||||
*/
|
||||
#define Z_X86_NUM_TABLE_PAGES (Z_X86_NUM_PT + Z_X86_NUM_PD + \
|
||||
Z_X86_NUM_PDPT + 1)
|
||||
#else /* !CONFIG_X86_64 */
|
||||
/* Number of pages we need to reserve in the stack for per-thread page tables */
|
||||
#define Z_X86_NUM_TABLE_PAGES (Z_X86_NUM_PT + Z_X86_NUM_PD)
|
||||
#endif /* CONFIG_X86_64 */
|
||||
#else /* !CONFIG_USERSPACE */
|
||||
/* If we're not implementing user mode, then the MMU tables don't get changed
|
||||
* on context switch and we don't need any per-thread page tables
|
||||
*/
|
||||
#define Z_X86_NUM_TABLE_PAGES 0UL
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#define Z_X86_THREAD_PT_AREA (Z_X86_NUM_TABLE_PAGES * MMU_PAGE_SIZE)
|
||||
|
||||
#if defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
|
||||
#define Z_X86_STACK_BASE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_BASE_ALIGN STACK_ALIGN
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* If user mode enabled, expand any stack size to fill a page since that is
|
||||
* the access control granularity and we don't want other kernel data to
|
||||
* unintentionally fall in the latter part of the page
|
||||
*/
|
||||
#define Z_X86_STACK_SIZE_ALIGN MMU_PAGE_SIZE
|
||||
#else
|
||||
#define Z_X86_STACK_SIZE_ALIGN STACK_ALIGN
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#ifndef CONFIG_X86_64
|
||||
struct z_x86_kernel_stack_data {
|
||||
/* For 32-bit, a single four-entry page directory pointer table, that
|
||||
* needs to be aligned to 32 bytes.
|
||||
*
|
||||
* 64-bit all the page table entities up to and including the PML4
|
||||
* are page-aligned and we just reserve room for them in
|
||||
* Z_X86_THREAD_PT_AREA.
|
||||
*/
|
||||
struct x86_page_tables ptables;
|
||||
} __aligned(0x20);
|
||||
#endif /* !CONFIG_X86_64 */
|
||||
|
||||
/* With both hardware stack protection and userspace enabled, stacks are
|
||||
* arranged as follows:
|
||||
*
|
||||
* High memory addresses
|
||||
* +-----------------------------------------+
|
||||
* | Thread stack (varies) |
|
||||
* +-----------------------------------------+
|
||||
* | PDPT (32 bytes, 32-bit only) |
|
||||
* | Privilege elevation stack |
|
||||
* | (4064 or 4096 bytes) |
|
||||
* +-----------------------------------------+
|
||||
* | Guard page (4096 bytes) |
|
||||
* +-----------------------------------------+
|
||||
* | User page tables (Z_X86_THREAD_PT_AREA) |
|
||||
* +-----------------------------------------+
|
||||
* Low Memory addresses
|
||||
*
|
||||
* Privilege elevation stacks are fixed-size. All the pages containing the
|
||||
* thread stack are marked as user-accessible. The guard page is marked
|
||||
* read-only to catch stack overflows in supervisor mode.
|
||||
*
|
||||
* If a thread starts in supervisor mode, the page containing the PDPT and/or
|
||||
* privilege elevation stack is also marked read-only.
|
||||
*
|
||||
* If a thread starts in, or drops down to user mode, the privilege stack page
|
||||
* will be marked as present, supervior-only. The page tables will be
|
||||
* initialized and used as the active page tables when that thread is active.
|
||||
*
|
||||
* If KPTI is not enabled, the _main_tss.esp0 field will always be updated
|
||||
* updated to point to the top of the privilege elevation stack. Otherwise
|
||||
* _main_tss.esp0 always points to the trampoline stack, which handles the
|
||||
* page table switch to the kernel PDPT and transplants context to the
|
||||
* privileged mode stack.
|
||||
*/
|
||||
struct z_x86_thread_stack_header {
|
||||
#ifdef CONFIG_USERSPACE
|
||||
char page_tables[Z_X86_THREAD_PT_AREA];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HW_STACK_PROTECTION
|
||||
char guard_page[MMU_PAGE_SIZE];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#ifdef CONFIG_X86_64
|
||||
char privilege_stack[MMU_PAGE_SIZE];
|
||||
#else
|
||||
char privilege_stack[MMU_PAGE_SIZE -
|
||||
sizeof(struct z_x86_kernel_stack_data)];
|
||||
|
||||
struct z_x86_kernel_stack_data kernel_data;
|
||||
#endif /* CONFIG_X86_64 */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
} __packed __aligned(Z_X86_STACK_BASE_ALIGN);
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_RESERVED \
|
||||
((u32_t)sizeof(struct z_x86_thread_stack_header))
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_LEN(size) \
|
||||
(ROUND_UP((size), \
|
||||
MAX(Z_X86_STACK_BASE_ALIGN, \
|
||||
Z_X86_STACK_SIZE_ALIGN)) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct _k_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[nmemb][Z_ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct _k_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
Z_ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - Z_ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define Z_ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)((sym) + Z_ARCH_THREAD_STACK_RESERVED))
|
||||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H */
|
Loading…
Add table
Add a link
Reference in a new issue