arch: arm64: Refactor the stack relevant macros

Refactor the stack relevant macros to prepare to introduce the stack
guard. Also add comments about the changes related to stack layout.

Signed-off-by: Jaxson Han <jaxson.han@arm.com>
This commit is contained in:
Jaxson Han 2023-01-20 11:46:31 +08:00 committed by Carles Cufí
commit d3ec98806d
3 changed files with 42 additions and 10 deletions

View file

@ -7,6 +7,7 @@
#include <zephyr/toolchain.h>
#include <zephyr/linker/sections.h>
#include <zephyr/arch/cpu.h>
#include <offsets.h>
#include "boot.h"
#include "macro_priv.inc"
@ -149,7 +150,7 @@ resetwait:
primary_core:
#endif
/* load primary stack and entry point */
ldr x24, =(z_interrupt_stacks + CONFIG_ISR_STACK_SIZE)
ldr x24, =(z_interrupt_stacks + __z_interrupt_stack_SIZEOF)
ldr x25, =z_arm64_prep_c
2:
/* Prepare for calling C code */

View file

@ -28,19 +28,35 @@
* normal execution. When at exception is taken or a syscall is called the
* stack pointer switches to SP_EL1 and the execution starts using the
* privileged portion of the user stack without touching SP_EL0. This portion
* is marked as not user accessible in the MMU.
* is marked as not user accessible in the MMU/MPU.
*
* - a stack guard region will be added bellow the kernel stack when
* ARM64_STACK_PROTECTION is enabled. In this case, SP_EL0 will always point
* to the safe exception stack in the kernel space. For the kernel thread,
* SP_EL0 will not change always pointing to safe exception stack. For the
* userspace thread, SP_EL0 will switch from the user stack to the safe
* exception stack when entering the EL1 mode, and restore to the user stack
* when backing to userspace (EL0).
*
* Kernel threads:
*
* High memory addresses
*
* +---------------+ <- stack_ptr
* E | ESF |
* L |<<<<<<<<<<<<<<<| <- SP_EL1
* 1 | |
* +---------------+
* +---------------+ <- stack limit
* | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
* +---------------+ <- stack_obj
*
* Low Memory addresses
*
*
* User threads:
*
* High memory addresses
*
* +---------------+ <- stack_ptr
* E | |
* L |<<<<<<<<<<<<<<<| <- SP_EL0
@ -49,7 +65,11 @@
* E | ESF | | Privileged portion of the stack
* L +>>>>>>>>>>>>>>>+ <- SP_EL1 |_ used during exceptions and syscalls
* 1 | | | of size ARCH_THREAD_STACK_RESERVED
* +---------------+ <- stack_obj..|
* +---------------+ <- stack limit|
* | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
* +---------------+ <- stack_obj
*
* Low Memory addresses
*
* When a kernel thread switches to user mode the SP_EL0 and SP_EL1
* values are reset accordingly in arch_user_mode_enter().

View file

@ -7,11 +7,11 @@
#ifndef ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_STACK_H_
#define ZEPHYR_INCLUDE_ARCH_ARM64_THREAD_STACK_H_
#include <zephyr/arch/arm64/mm.h>
#define ARCH_STACK_PTR_ALIGN 16
#if CONFIG_USERSPACE
#include <zephyr/arch/arm64/mm.h>
#if defined(CONFIG_USERSPACE) || defined(CONFIG_ARM64_STACK_PROTECTION)
#define Z_ARM64_STACK_BASE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#define Z_ARM64_STACK_SIZE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#else
@ -19,6 +19,14 @@
#define Z_ARM64_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#if defined(CONFIG_ARM64_STACK_PROTECTION)
#define Z_ARM64_STACK_GUARD_SIZE MEM_DOMAIN_ALIGN_AND_SIZE
#define Z_ARM64_K_STACK_BASE_ALIGN MEM_DOMAIN_ALIGN_AND_SIZE
#else
#define Z_ARM64_STACK_GUARD_SIZE 0
#define Z_ARM64_K_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/*
* [ see also comments in arch/arm64/core/thread.c ]
*
@ -36,6 +44,8 @@
* | |
* +-------------------+ <- thread.stack_info.start
* | Privileged stack | } K_(THREAD|KERNEL)_STACK_RESERVED
* +-------------------+ <- thread stack limit (update on every context switch)
* | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
* +-------------------+ <- thread.stack_obj
*
* Low Memory addresses
@ -45,11 +55,12 @@
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARM64_STACK_BASE_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
ROUND_UP((size), Z_ARM64_STACK_SIZE_ALIGN)
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE + \
Z_ARM64_STACK_GUARD_SIZE
/* kernel stack */
#define ARCH_KERNEL_STACK_RESERVED 0
#define ARCH_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#define ARCH_KERNEL_STACK_RESERVED Z_ARM64_STACK_GUARD_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_ARM64_K_STACK_BASE_ALIGN
#ifndef _ASMLANGUAGE