kernel: add Z_STACK_PTR_ALIGN ARCH_STACK_PTR_ALIGN

This operation is formally defined as rounding down a potential
stack pointer value to meet CPU and ABI requirments.

This was previously defined ad-hoc as STACK_ROUND_DOWN().

A new architecture constant ARCH_STACK_PTR_ALIGN is added.
Z_STACK_PTR_ALIGN() is defined in terms of it. This used to
be inconsistently specified as STACK_ALIGN or STACK_PTR_ALIGN;
in the latter case, STACK_ALIGN meant something else, typically
a required alignment for the base of a stack buffer.

STACK_ROUND_UP() only used in practice by Risc-V, delete
elsewhere.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-04-19 15:06:31 -07:00 committed by Anas Nashif
commit 618426d6e7
30 changed files with 69 additions and 87 deletions

View file

@ -90,19 +90,19 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
thread->arch.priv_stack_start =
(u32_t)(stackEnd + STACK_GUARD_SIZE);
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd +
stackAdjEnd = (char *)Z_STACK_PTR_ALIGN(stackEnd +
ARCH_THREAD_STACK_RESERVED);
/* reserve 4 bytes for the start of user sp */
stackAdjEnd -= 4;
(*(u32_t *)stackAdjEnd) = STACK_ROUND_DOWN(
(*(u32_t *)stackAdjEnd) = Z_STACK_PTR_ALIGN(
(u32_t)stackEnd - offset);
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* reserve stack space for the userspace local data struct */
thread->userspace_local_data =
(struct _thread_userspace_local_data *)
STACK_ROUND_DOWN(stackEnd -
Z_STACK_PTR_ALIGN(stackEnd -
sizeof(*thread->userspace_local_data) - offset);
/* update the start of user sp */
(*(u32_t *)stackAdjEnd) = (u32_t) thread->userspace_local_data;
@ -127,12 +127,12 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* reserve stack space for the userspace local data struct */
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd
stackAdjEnd = (char *)Z_STACK_PTR_ALIGN(stackEnd
- sizeof(*thread->userspace_local_data) - offset);
thread->userspace_local_data =
(struct _thread_userspace_local_data *)stackAdjEnd;
#else
stackAdjEnd = (char *)STACK_ROUND_DOWN(stackEnd - offset);
stackAdjEnd = (char *)Z_STACK_PTR_ALIGN(stackEnd - offset);
#endif
}
@ -168,7 +168,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
stackAdjEnd = stackEnd;
pInitCtx = (struct init_stack_frame *)(
STACK_ROUND_DOWN(stackAdjEnd) -
Z_STACK_PTR_ALIGN(stackAdjEnd) -
sizeof(struct init_stack_frame));
pInitCtx->status32 = 0U;

View file

@ -168,12 +168,4 @@ typedef struct _callee_saved_stack _callee_saved_stack_t;
#endif /* _ASMLANGUAGE */
/* stacks */
#define STACK_ALIGN_SIZE 4
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#endif /* ZEPHYR_ARCH_ARC_INCLUDE_KERNEL_ARCH_DATA_H_ */

View file

@ -48,7 +48,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
/* Reserve space on top of stack for local data. */
u32_t p_local_data = STACK_ROUND_DOWN(pStackMem + stackSize
u32_t p_local_data = Z_STACK_PTR_ALIGN(pStackMem + stackSize
- sizeof(*thread->userspace_local_data));
thread->userspace_local_data =
@ -100,7 +100,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* stack frame (state context), because no FP operations have been
* performed yet for this thread.
*/
pInitCtx = (struct __esf *)(STACK_ROUND_DOWN(stackEnd -
pInitCtx = (struct __esf *)(Z_STACK_PTR_ALIGN(stackEnd -
(char *)top_of_stack_offset - sizeof(struct __basic_sf)));
#if defined(CONFIG_USERSPACE)
@ -401,7 +401,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread,
start_of_main_stack =
Z_THREAD_STACK_BUFFER(main_stack) + main_stack_size;
start_of_main_stack = (char *)STACK_ROUND_DOWN(start_of_main_stack);
start_of_main_stack = (char *)Z_STACK_PTR_ALIGN(start_of_main_stack);
_current = main_thread;
#ifdef CONFIG_TRACING

View file

@ -73,7 +73,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
z_new_thread_init(thread, pStackMem, stackSize);
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd -
pInitCtx = (struct init_stack_frame *)(Z_STACK_PTR_ALIGN(stackEnd -
sizeof(struct init_stack_frame)));
pInitCtx->entry_point = (u64_t)pEntry;

View file

@ -24,11 +24,6 @@
#include <linker/sections.h>
#include <arch/cpu.h>
/* stacks */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#if defined(CONFIG_CPU_CORTEX_M)
#include <aarch32/cortex_m/stack.h>
#include <aarch32/cortex_m/exc.h>

View file

@ -102,7 +102,8 @@ SECTION_FUNC(TEXT, __start)
#ifdef CONFIG_INIT_STACKS
/* Pre-populate all bytes in z_interrupt_stacks with 0xAA
* init.c enforces that the z_interrupt_stacks pointer
* and CONFIG_ISR_STACK_SIZE are a multiple of STACK_ALIGN (4) */
* and CONFIG_ISR_STACK_SIZE are a multiple of ARCH_STACK_PTR_ALIGN (4)
*/
movhi r1, %hi(z_interrupt_stacks)
ori r1, r1, %lo(z_interrupt_stacks)
movhi r2, %hi(CONFIG_ISR_STACK_SIZE)

View file

@ -40,7 +40,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* Initial stack frame data, stored at the base of the stack */
iframe = (struct init_stack_frame *)
STACK_ROUND_DOWN(stack_memory + stack_size - sizeof(*iframe));
Z_STACK_PTR_ALIGN(stack_memory + stack_size - sizeof(*iframe));
/* Setup the initial stack frame */
iframe->entry_point = thread_func;

View file

@ -25,13 +25,6 @@
#include <linker/sections.h>
#include <arch/cpu.h>
/* stacks */
#define STACK_ALIGN_SIZE 4
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#ifndef _ASMLANGUAGE
#include <kernel.h>

View file

@ -40,7 +40,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* "initial stack frame"
*/
thread_status = (posix_thread_status_t *)
STACK_ROUND_DOWN(stack_memory + stack_size
Z_STACK_PTR_ALIGN(stack_memory + stack_size
- sizeof(*thread_status));
/* z_thread_entry() arguments */

View file

@ -14,8 +14,4 @@
#ifndef ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_
/* stacks */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#endif /* ZEPHYR_ARCH_POSIX_INCLUDE_KERNEL_ARCH_DATA_H_ */

View file

@ -25,7 +25,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* Initial stack frame for thread */
stack_init = (struct __esf *)
STACK_ROUND_DOWN(stack_memory +
Z_STACK_PTR_ALIGN(stack_memory +
stack_size - sizeof(struct __esf));
/* Setup the initial stack frame */

View file

@ -65,7 +65,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, u16_t cs)
* The normal stack buffer is what we will check.
*/
start = _current->stack_info.start;
end = STACK_ROUND_DOWN(_current->stack_info.start +
end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size);
} else {
/* User thread was doing a syscall, check kernel stack bounds */

View file

@ -136,7 +136,7 @@ __csSet:
* interrupts are disabled until the first context switch.
*
* kernel/init.c enforces that the z_interrupt_stacks pointer and
* the ISR stack size are some multiple of STACK_ALIGN, which
* the ISR stack size are some multiple of ARCH_STACK_PTR_ALIGN, which
* is at least 4.
*
* This is also used to call the _sys_resume_from_deep_sleep()

View file

@ -88,7 +88,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
swap_entry = z_thread_entry;
#endif
stack_high = (char *)STACK_ROUND_DOWN(stack_buf + stack_size);
stack_high = (char *)Z_STACK_PTR_ALIGN(stack_buf + stack_size);
/* Create an initial context on the stack expected by z_swap() */
initial_frame = (struct _x86_initial_frame *)

View file

@ -73,7 +73,7 @@ FUNC_NORETURN static void drop_to_user(k_thread_entry_t user_entry,
/* Transition will reset stack pointer to initial, discarding
* any old context since this is a one-way operation
*/
stack_end = STACK_ROUND_DOWN(_current->stack_info.start +
stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
_current->stack_info.size);
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,

View file

@ -35,10 +35,6 @@ static inline bool arch_is_in_isr(void)
#endif
}
/* stack alignment related macros: STACK_ALIGN is defined in arch.h */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN)
struct multiboot_info;
extern FUNC_NORETURN void z_x86_prep_c(void *arg);

View file

@ -24,10 +24,6 @@
#include <linker/sections.h>
#include <arch/cpu.h>
/* stacks */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__)
#include <kernel.h> /* public kernel API */
#include <zephyr/types.h>

View file

@ -18,11 +18,6 @@
extern "C" {
#endif
/* stack alignment related macros: STACK_ALIGN_SIZE is defined above */
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN_SIZE)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN_SIZE)
extern void FatalErrorHandler(void);
extern void ReservedInterruptHandler(unsigned int intNo);
extern void z_xtensa_fatal_error(unsigned int reason, const z_arch_esf_t *esf);

View file

@ -42,6 +42,8 @@
extern "C" {
#endif
#define ARCH_STACK_PTR_ALIGN 4
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
#if defined(CONFIG_ARC_CORE_MPU)
#if CONFIG_ARC_MPU_VER == 2

View file

@ -46,16 +46,16 @@ extern "C" {
#endif
/**
* @brief Declare the STACK_ALIGN_SIZE
* @brief Declare the ARCH_STACK_PTR_ALIGN
*
* Denotes the required alignment of the stack pointer on public API
* boundaries
*
*/
#ifdef CONFIG_STACK_ALIGN_DOUBLE_WORD
#define STACK_ALIGN_SIZE 8
#define ARCH_STACK_PTR_ALIGN 8
#else
#define STACK_ALIGN_SIZE 4
#define ARCH_STACK_PTR_ALIGN 4
#endif
/**
@ -70,7 +70,7 @@ extern "C" {
#if defined(CONFIG_USERSPACE)
#define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
#else
#define Z_THREAD_MIN_STACK_ALIGN STACK_ALIGN_SIZE
#define Z_THREAD_MIN_STACK_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/**
@ -97,7 +97,7 @@ extern "C" {
* | Some thread data | <---- Defined when thread is created
* | ... |
* |---------------------| <---- Actual initial stack ptr
* | Initial Stack Ptr | aligned to STACK_ALIGN_SIZE
* | Initial Stack Ptr | aligned to ARCH_STACK_PTR_ALIGN
* | ... |
* | ... |
* | ... |
@ -174,7 +174,7 @@ extern "C" {
* the MPU Stack Guard feature).
*/
#if defined(CONFIG_USERSPACE)
#define Z_PRIVILEGE_STACK_ALIGN MAX(STACK_ALIGN_SIZE, Z_MPU_GUARD_ALIGN)
#define Z_PRIVILEGE_STACK_ALIGN MAX(ARCH_STACK_PTR_ALIGN, Z_MPU_GUARD_ALIGN)
#endif
/**

View file

@ -35,14 +35,13 @@ extern "C" {
#endif
/**
* @brief Declare the STACK_ALIGN_SIZE
* @brief Declare the ARCH_STACK_PTR_ALIGN
*
* Denotes the required alignment of the stack pointer on public API
* boundaries
*
*/
#define STACK_ALIGN 16
#define STACK_ALIGN_SIZE STACK_ALIGN
#define ARCH_STACK_PTR_ALIGN 16
#ifdef __cplusplus
}

View file

@ -24,7 +24,7 @@
#include <arch/common/sys_io.h>
#include <arch/common/ffs.h>
#define STACK_ALIGN 4
#define ARCH_STACK_PTR_ALIGN 4
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>

View file

@ -33,11 +33,9 @@ extern "C" {
#endif
#ifdef CONFIG_64BIT
#define STACK_ALIGN 8
#define STACK_ALIGN_SIZE 8
#define ARCH_STACK_PTR_ALIGN 8
#else
#define STACK_ALIGN 4
#define STACK_ALIGN_SIZE 4
#define ARCH_STACK_PTR_ALIGN 4
#endif
struct __esf {

View file

@ -26,7 +26,7 @@
#include <devicetree.h>
/* stacks, for RISCV architecture stack should be 16byte-aligned */
#define STACK_ALIGN 16
#define ARCH_STACK_PTR_ALIGN 16
#ifdef CONFIG_64BIT
#define RV_OP_LOADREG ld
@ -65,8 +65,7 @@
extern "C" {
#endif
#define STACK_ROUND_UP(x) ROUND_UP(x, STACK_ALIGN)
#define STACK_ROUND_DOWN(x) ROUND_DOWN(x, STACK_ALIGN)
#define STACK_ROUND_UP(x) ROUND_UP(x, ARCH_STACK_PTR_ALIGN)
/* macros convert value of its argument to a string */
#define DO_TOSTR(s) #s

View file

@ -13,7 +13,7 @@
#error "Check ISR stack configuration (CONFIG_ISR_*)"
#endif
#if CONFIG_ISR_SUBSTACK_SIZE % STACK_ALIGN
#if CONFIG_ISR_SUBSTACK_SIZE % ARCH_STACK_PTR_ALIGN
#error "CONFIG_ISR_SUBSTACK_SIZE must be a multiple of 16"
#endif

View file

@ -9,9 +9,9 @@
#include <arch/x86/mmustructs.h>
#ifdef CONFIG_X86_64
#define STACK_ALIGN 16UL
#define ARCH_STACK_PTR_ALIGN 16UL
#else
#define STACK_ALIGN 4UL
#define ARCH_STACK_PTR_ALIGN 4UL
#endif
#ifdef CONFIG_USERSPACE
@ -112,7 +112,7 @@
#if defined(CONFIG_HW_STACK_PROTECTION) || defined(CONFIG_USERSPACE)
#define Z_X86_STACK_BASE_ALIGN MMU_PAGE_SIZE
#else
#define Z_X86_STACK_BASE_ALIGN STACK_ALIGN
#define Z_X86_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#ifdef CONFIG_USERSPACE
@ -122,7 +122,7 @@
*/
#define Z_X86_STACK_SIZE_ALIGN MMU_PAGE_SIZE
#else
#define Z_X86_STACK_SIZE_ALIGN STACK_ALIGN
#define Z_X86_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#ifndef _ASMLANGUAGE

View file

@ -27,7 +27,7 @@
#include <xtensa/config/core.h>
#include <arch/common/addr_types.h>
#define STACK_ALIGN 16
#define ARCH_STACK_PTR_ALIGN 16
/* Xtensa GPRs are often designated by two different names */
#define sys_define_gpr_with_alias(name1, name2) union { u32_t name1, name2; }

View file

@ -108,9 +108,16 @@ static inline u32_t arch_k_cycle_get_32(void);
* @see K_THREAD_STACK_RESERVED
*/
/**
* @def ARCH_STACK_PTR_ALIGN
*
* Required alignment of the CPU's stack pointer register value, dictated by
* hardware constraints and the ABI calling convention.
*
* @see Z_STACK_PTR_ALIGN
*/
/** @} */
/**
* @addtogroup arch-pm
* @{

View file

@ -15,6 +15,7 @@
#if !defined(_ASMLANGUAGE)
#include <arch/cpu.h>
#include <sys/util.h>
/* Using typedef deliberately here, this is quite intended to be an opaque
* type.
@ -42,6 +43,16 @@ struct __packed z_thread_stack_element {
* @see z_thread_stack_element
*/
/**
* @brief Properly align a CPU stack pointer value
*
* Take the provided value and round it down such that the value is aligned
* to the CPU and ABI requirements. This is not used for any memory protection
* hardware requirements.
*/
#define Z_STACK_PTR_ALIGN(ptr) ROUND_DOWN((ptr), ARCH_STACK_PTR_ALIGN)
/**
* @brief Obtain an extern reference to a stack
*
@ -74,8 +85,8 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
*
* This declares a region of memory suitable for use as a thread's stack.
*
* This is the generic, historical definition. Align to STACK_ALIGN and put in
* 'noinit' section so that it isn't zeroed at boot
* This is the generic, historical definition. Align to ARCH_STACK_PTR_ALIGN
* and put in 'noinit' section so that it isn't zeroed at boot
*
* The declared symbol will always be a k_thread_stack_t which can be passed to
* k_thread_create(), but should otherwise not be manipulated. If the buffer
@ -96,7 +107,8 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
* @param size Size of the stack memory region
*/
#define K_THREAD_STACK_DEFINE(sym, size) \
struct z_thread_stack_element __noinit __aligned(STACK_ALIGN) sym[size]
struct z_thread_stack_element __noinit \
__aligned(ARCH_STACK_PTR_ALIGN) sym[size]
/**
* @brief Calculate size of stacks to be allocated in a stack array
@ -116,8 +128,8 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
* Create an array of equally sized stacks. See K_THREAD_STACK_DEFINE
* definition for additional details and constraints.
*
* This is the generic, historical definition. Align to STACK_ALIGN and put in
* 'noinit' section so that it isn't zeroed at boot
* This is the generic, historical definition. Align to ARCH_STACK_PTR_ALIGN
* and put in 'noinit' section so that it isn't zeroed at boot
*
* @param sym Thread stack symbol name
* @param nmemb Number of stacks to declare
@ -125,7 +137,8 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
*/
#define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct z_thread_stack_element __noinit \
__aligned(STACK_ALIGN) sym[nmemb][K_THREAD_STACK_LEN(size)]
__aligned(ARCH_STACK_PTR_ALIGN) \
sym[nmemb][K_THREAD_STACK_LEN(size)]
/**
* @brief Declare an embedded stack memory region
@ -140,7 +153,7 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
* @param size Size of the stack memory region
*/
#define K_THREAD_STACK_MEMBER(sym, size) \
struct z_thread_stack_element __aligned(STACK_ALIGN) sym[size]
struct z_thread_stack_element __aligned(ARCH_STACK_PTR_ALIGN) sym[size]
/**
* @brief Return the size in bytes of a stack memory region

View file

@ -491,7 +491,7 @@ void z_setup_new_thread(struct k_thread *new_thread,
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
/* reserve space on top of stack for local data */
stack_size = STACK_ROUND_DOWN(stack_size
stack_size = Z_STACK_PTR_ALIGN(stack_size
- sizeof(*new_thread->userspace_local_data));
#endif
#endif