xtensa: mpu: enable userspace support

This extends the Xtensa MPU to support userspace.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2024-01-30 14:33:42 -08:00 committed by Anas Nashif
commit 57d591700b
12 changed files with 535 additions and 8 deletions

View file

@ -221,6 +221,8 @@ menuconfig XTENSA_MPU
select MPU
select SRAM_REGION_PERMISSIONS
select XTENSA_SMALL_VECTOR_TABLE_ENTRY
select ARCH_MEM_DOMAIN_SYNCHRONOUS_API if USERSPACE
select CURRENT_THREAD_USE_NO_TLS if USERSPACE
select EXPERIMENTAL
# TODO: the target the MPU code developed on (basically sample_controller
# plus MPU minus s32c1i) does not have cache or SMP capability.
@ -238,6 +240,9 @@ config XTENSA_MPU_DEFAULT_MEM_TYPE
Default memory type for memory regions: non-cacheable memory,
non-shareable, non-bufferable and interruptible.
If userspace is enabled, it will be used to restore the memory type of
the region being removed from a memory domain.
endif # XTENSA_MPU
endif # CPU_HAS_MPU
@ -245,7 +250,7 @@ endif # CPU_HAS_MPU
config XTENSA_SYSCALL_USE_HELPER
bool "Use userspace syscall helper"
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xt-clang"
depends on XTENSA_MMU && USERSPACE
depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE
help
Use syscall helpers for passing more then 3 arguments.
This is a workaround for toolchains where they have
@ -254,6 +259,6 @@ config XTENSA_SYSCALL_USE_HELPER
config XTENSA_INSECURE_USERSPACE
bool
default y
depends on XTENSA_MMU && USERSPACE
depends on (XTENSA_MMU || XTENSA_MPU) && USERSPACE
endmenu

View file

@ -6,6 +6,7 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <zephyr/kernel.h>
#include <zephyr/spinlock.h>
@ -21,6 +22,11 @@
#include <xtensa/config/core-isa.h>
#include <xtensa_mpu_priv.h>
#ifdef CONFIG_USERSPACE
BUILD_ASSERT((CONFIG_PRIVILEGED_STACK_SIZE > 0) &&
(CONFIG_PRIVILEGED_STACK_SIZE % XCHAL_MPU_ALIGN) == 0);
#endif
extern char _heap_end[];
extern char _heap_start[];
@ -602,10 +608,25 @@ out:
*
* @param map Pointer to foreground MPU map.
*/
#ifdef CONFIG_USERSPACE
/* With userspace enabled, the pointer to per memory domain MPU map is stashed
* inside the thread struct. If we still only take struct xtensa_mpu_map as
* argument, a wrapper function is needed. To avoid the cost associated with
* calling that wrapper function, takes thread pointer directly as argument
* when userspace is enabled. Not to mention that writing the map to hardware
* is already a costly operation per context switch. So every little bit helps.
*/
void xtensa_mpu_map_write(struct k_thread *thread)
#else
void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
#endif
{
int entry;
#ifdef CONFIG_USERSPACE
struct xtensa_mpu_map *map = thread->arch.mpu_map;
#endif
/*
* Clear MPU entries first, then write MPU entries in reverse order.
*
@ -698,5 +719,342 @@ void xtensa_mpu_init(void)
consolidate_entries(xtensa_mpu_map_fg_kernel.entries, first_enabled_idx);
/* Write the map into hardware. There is no turning back now. */
#ifdef CONFIG_USERSPACE
struct k_thread dummy_map_thread;
dummy_map_thread.arch.mpu_map = &xtensa_mpu_map_fg_kernel;
xtensa_mpu_map_write(&dummy_map_thread);
#else
xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel);
#endif
}
#ifdef CONFIG_USERSPACE
int arch_mem_domain_init(struct k_mem_domain *domain)
{
domain->arch.mpu_map = xtensa_mpu_map_fg_kernel;
return 0;
}
int arch_mem_domain_max_partitions_get(void)
{
/*
* Due to each memory region requiring 2 MPU entries to describe,
* it is hard to figure out how many partitions are available.
* For example, if all those partitions are contiguous, it only
* needs 2 entries (1 if the end of region already has an entry).
* If they are all disjoint, it will need (2 * n) entries to
* describe all of them. So just use CONFIG_MAX_DOMAIN_PARTITIONS
* here and let the application set this instead.
*/
return CONFIG_MAX_DOMAIN_PARTITIONS;
}
int arch_mem_domain_partition_remove(struct k_mem_domain *domain,
uint32_t partition_id)
{
int ret;
uint32_t perm;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;
if (end_addr <= partition->start) {
ret = -EINVAL;
goto out;
}
/*
* This is simply to get rid of the user permissions and retain
* whatever the kernel permissions are. So that we won't be
* setting the memory region permission incorrectly, for example,
* marking read only region writable.
*
* Note that Zephyr does not do RWX partitions so we can treat it
* as invalid.
*/
switch (partition->attr) {
case XTENSA_MPU_ACCESS_P_RO_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RO_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_RX:
perm = XTENSA_MPU_ACCESS_P_RO_U_NA;
break;
case XTENSA_MPU_ACCESS_P_RW_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
perm = XTENSA_MPU_ACCESS_P_RW_U_NA;
break;
default:
/* _P_X_U_NA is not a valid permission for userspace, so ignore.
* _P_NA_U_X becomes _P_NA_U_NA when removing user permissions.
* _P_WO_U_WO has not kernel only counterpart so just force no access.
* If we get here with _P_NA_P_NA, there is something seriously
* wrong with the userspace and/or application code.
*/
perm = XTENSA_MPU_ACCESS_P_NA_U_NA;
break;
}
/*
* Reset the memory region attributes by simply "adding"
* a region with default attributes. If entries already
* exist for the region, the corresponding entries will
* be updated with the default attributes. Or new entries
* will be added to carve a hole in existing regions.
*/
ret = mpu_map_region_add(map, partition->start, end_addr,
perm,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
out:
return ret;
}
int arch_mem_domain_partition_add(struct k_mem_domain *domain,
uint32_t partition_id)
{
int ret;
struct xtensa_mpu_map *map = &domain->arch.mpu_map;
struct k_mem_partition *partition = &domain->partitions[partition_id];
uintptr_t end_addr = partition->start + partition->size;
if (end_addr <= partition->start) {
ret = -EINVAL;
goto out;
}
ret = mpu_map_region_add(map, partition->start, end_addr,
(uint8_t)partition->attr,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
out:
return ret;
}
int arch_mem_domain_thread_add(struct k_thread *thread)
{
int ret = 0;
/* New memory domain we are being added to */
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
/*
* this is only set for threads that were migrating from some other
* memory domain; new threads this is NULL.
*/
struct xtensa_mpu_map *old_map = thread->arch.mpu_map;
bool is_user = (thread->base.user_options & K_USER) != 0;
bool is_migration = (old_map != NULL) && is_user;
uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}
/*
* Allow USER access to the thread's stack in its new domain if
* we are migrating. If we are not migrating this is done in
* xtensa_user_stack_perms().
*/
if (is_migration) {
/* Add stack to new domain's MPU map. */
ret = mpu_map_region_add(&domain->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_RW,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
/* Probably this fails due to no more available slots in MPU map. */
__ASSERT_NO_MSG(ret == 0);
}
thread->arch.mpu_map = &domain->arch.mpu_map;
/*
* Remove thread stack from old memory domain if we are
* migrating away from old memory domain. This is done
* by simply remove USER access from the region.
*/
if (is_migration) {
/*
* Remove stack from old MPU map by...
* "adding" a new memory region to the map
* as this carves a hole in the existing map.
*/
ret = mpu_map_region_add(old_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_NA,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
}
/*
* Need to switch to new MPU map if this is the current
* running thread.
*/
if (thread == _current_cpu->current) {
xtensa_mpu_map_write(thread);
}
return ret;
}
int arch_mem_domain_thread_remove(struct k_thread *thread)
{
uintptr_t stack_end_addr;
int ret;
struct k_mem_domain *domain = thread->mem_domain_info.mem_domain;
if ((thread->base.user_options & K_USER) == 0) {
ret = 0;
goto out;
}
if ((thread->base.thread_state & _THREAD_DEAD) == 0) {
/* Thread is migrating to another memory domain and not
* exiting for good; we weren't called from
* z_thread_abort(). Resetting the stack region will
* take place in the forthcoming thread_add() call.
*/
ret = 0;
goto out;
}
stack_end_addr = thread->stack_info.start + thread->stack_info.size;
if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}
/*
* Restore permissions on the thread's stack area since it is no
* longer a member of the domain.
*/
ret = mpu_map_region_add(&domain->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_NA,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
xtensa_mpu_map_write(thread);
out:
return ret;
}
int arch_buffer_validate(void *addr, size_t size, int write)
{
uintptr_t aligned_addr;
size_t aligned_size, addr_offset;
int ret = 0;
/* addr/size arbitrary, fix this up into an aligned region */
aligned_addr = ROUND_DOWN((uintptr_t)addr, XCHAL_MPU_ALIGN);
addr_offset = (uintptr_t)addr - aligned_addr;
aligned_size = ROUND_UP(size + addr_offset, XCHAL_MPU_ALIGN);
for (size_t offset = 0; offset < aligned_size;
offset += XCHAL_MPU_ALIGN) {
uint32_t probed = xtensa_pptlb_probe(aligned_addr + offset);
uint8_t access_rights = (probed & XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK)
>> XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT;
if (write) {
/* Need to check write permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_WO_U_WO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = -EPERM;
goto out;
}
} else {
/* Only check read permission. */
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RO_U_RO:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RX_U_RX:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RW_U_RW:
__fallthrough;
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
/* These permissions are okay. */
break;
default:
ret = -EPERM;
goto out;
}
}
}
out:
return ret;
}
void xtensa_user_stack_perms(struct k_thread *thread)
{
int ret;
uintptr_t stack_end_addr = thread->stack_info.start + thread->stack_info.size;
if (stack_end_addr < thread->stack_info.start) {
/* Account for wrapping around back to 0. */
stack_end_addr = 0xFFFFFFFFU;
}
(void)memset((void *)thread->stack_info.start,
(IS_ENABLED(CONFIG_INIT_STACKS)) ? 0xAA : 0x00,
thread->stack_info.size - thread->stack_info.delta);
/* Add stack to new domain's MPU map. */
ret = mpu_map_region_add(thread->arch.mpu_map,
thread->stack_info.start, stack_end_addr,
XTENSA_MPU_ACCESS_P_RW_U_RW,
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
NULL);
xtensa_mpu_map_write(thread);
/* Probably this fails due to no more available slots in MPU map. */
ARG_UNUSED(ret);
__ASSERT_NO_MSG(ret == 0);
}
#endif /* CONFIG_USERSPACE */

View file

@ -67,8 +67,13 @@ GEN_OFFSET_SYM(_xtensa_irq_bsa_t, hifi);
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(_thread_arch_t, psp);
#ifdef CONFIG_XTENSA_MMU
GEN_OFFSET_SYM(_thread_arch_t, ptables);
#endif
#ifdef CONFIG_XTENSA_MPU
GEN_OFFSET_SYM(_thread_arch_t, mpu_map);
#endif
#endif
GEN_ABS_SYM_END

View file

@ -300,7 +300,12 @@ xtensa_userspace_enter:
call4 xtensa_user_stack_perms
l32i a6, a1, 24
#ifdef CONFIG_XTENSA_MMU
call4 xtensa_swap_update_page_tables
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif
#if XCHAL_HAVE_THREADPTR
#ifdef CONFIG_THREAD_LOCAL_STORAGE
@ -337,7 +342,14 @@ xtensa_userspace_enter:
* We have to set callinc as well, since the called
* function will do "entry"
*/
#ifdef CONFIG_XTENSA_MMU
movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(2)
#endif
#ifdef CONFIG_XTENSA_MPU
/* MPU only has RING 0 and 1. */
movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(1)
#endif
wsr a0, EPS2
movi a0, 0

View file

@ -364,14 +364,14 @@ void *xtensa_excint1_c(int *interrupted_stack)
_current_cpu->nested = 1;
}
#ifdef CONFIG_XTENSA_MMU
#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
#ifdef CONFIG_USERSPACE
fixup_out:
#endif
if (is_dblexc) {
__asm__ volatile("wsr.depc %0" : : "r"(0));
}
#endif /* CONFIG_XTENSA_MMU */
#endif /* CONFIG_XTENSA_MMU || CONFIG_XTENSA_MPU */
return return_to(interrupted_stack);

View file

@ -286,7 +286,12 @@ noflush:
/* Switch page tables */
rsr a6, ZSR_CPU
l32i a6, a6, ___cpu_t_current_OFFSET
#ifdef CONFIG_XTENSA_MMU
call4 xtensa_swap_update_page_tables
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif
l32i a2, a3, 0
l32i a2, a2, 0
@ -394,9 +399,11 @@ _Level1RealVector:
rsr.exccause a0
#ifdef CONFIG_XTENSA_MMU
beqi a0, EXCCAUSE_ITLB_MISS, _handle_tlb_miss_user
#endif /* CONFIG_XTENSA_MMU */
#ifdef CONFIG_USERSPACE
beqi a0, EXCCAUSE_SYSCALL, _syscall
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_XTENSA_MMU
addi a0, a0, -EXCCAUSE_DTLB_MISS
beqz a0, _handle_tlb_miss_user
rsr.exccause a0
@ -426,12 +433,12 @@ _handle_tlb_miss_user:
l32i a0, a0, 0
rsr a0, ZSR_A0SAVE
rfe
#endif /* CONFIG_XTENSA_MMU */
#ifdef CONFIG_USERSPACE
_syscall:
rsr a0, ZSR_A0SAVE
j xtensa_do_syscall
#endif /* CONFIG_USERSPACE */
#endif /* CONFIG_XTENSA_MMU */
.popsection
/* In theory you can have levels up to 15, but known hardware only uses 7. */

View file

@ -549,7 +549,12 @@ _do_call_\@:
rsr a6, ZSR_CPU
l32i a6, a6, ___cpu_t_current_OFFSET
#ifdef CONFIG_XTENSA_MMU
call4 xtensa_swap_update_page_tables
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif
l32i a1, a1, 0
l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET
addi a1, a1, ___xtensa_irq_bsa_t_SIZEOF

View file

@ -73,6 +73,23 @@
* @}
*/
/**
* @name Bit shifts and masks for MPU PPTLB return value.
*
* @{
*/
/** Bit shift for segment value. */
#define XTENSA_MPU_PPTLB_ACCESS_RIGHTS_SHIFT 8U
/** Mask for segment value. */
#define XTENSA_MPU_PPTLB_ACCESS_RIGHTS_MASK 0x00000F00U
/**
* @}
*/
/**
* Define one MPU entry of type struct xtensa_mpu_entry.
*
@ -140,6 +157,21 @@ static ALWAYS_INLINE void xtensa_mpu_mpuenb_write(uint32_t mpuenb)
__asm__ __volatile__("wsr.mpuenb %0" : : "a"(mpuenb));
}
/**
* @brief Probe for protection TLB entry from an address.
*
* @param addr Probe address.
*
* @return Return of the PPTLB instruction.
*/
static ALWAYS_INLINE uint32_t xtensa_pptlb_probe(uintptr_t addr)
{
uint32_t ret;
__asm__ __volatile__("pptlb %0, %1\n\t" : "=a"(ret) : "a"(addr));
return ret;
}
/**
* @name MPU entry internal helper functions.
*

View file

@ -65,6 +65,9 @@ struct arch_mem_domain {
uint32_t *ptables __aligned(CONFIG_MMU_PAGE_SIZE);
uint8_t asid;
bool dirty;
#endif
#ifdef CONFIG_XTENSA_MPU
struct xtensa_mpu_map mpu_map;
#endif
sys_snode_t node;
};

View file

@ -190,6 +190,85 @@ struct xtensa_mpu_map {
struct xtensa_mpu_entry entries[XTENSA_MPU_NUM_ENTRIES];
};
/**
* @name Memory domain and partitions
* @{
*/
typedef uint32_t k_mem_partition_attr_t;
static inline bool xtensa_mem_partition_is_executable(k_mem_partition_attr_t access_rights)
{
bool is_exec;
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_X_U_NA:
case XTENSA_MPU_ACCESS_P_NA_U_X:
case XTENSA_MPU_ACCESS_P_RX_U_NA:
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
case XTENSA_MPU_ACCESS_P_RX_U_RX:
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
is_exec = true;
break;
default:
is_exec = false;
break;
};
return is_exec;
}
static inline bool xtensa_mem_partition_is_writable(k_mem_partition_attr_t access_rights)
{
bool is_writable;
switch (access_rights) {
case XTENSA_MPU_ACCESS_P_RW_U_NA:
case XTENSA_MPU_ACCESS_P_RWX_U_NA:
case XTENSA_MPU_ACCESS_P_WO_U_WO:
case XTENSA_MPU_ACCESS_P_RW_U_RWX:
case XTENSA_MPU_ACCESS_P_RW_U_RO:
case XTENSA_MPU_ACCESS_P_RWX_U_RX:
case XTENSA_MPU_ACCESS_P_RW_U_RW:
case XTENSA_MPU_ACCESS_P_RWX_U_RWX:
is_writable = true;
break;
default:
is_writable = false;
break;
};
return is_writable;
}
#define K_MEM_PARTITION_IS_EXECUTABLE(access_rights) \
(xtensa_mem_partition_is_executable(access_rights))
#define K_MEM_PARTITION_IS_WRITABLE(access_rights) \
(xtensa_mem_partition_is_writable(access_rights))
/* Read-Write access permission attributes */
#define K_MEM_PARTITION_P_RW_U_RW \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RW_U_RW})
#define K_MEM_PARTITION_P_RW_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RW_U_NA})
#define K_MEM_PARTITION_P_RO_U_RO \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RO_U_RO})
#define K_MEM_PARTITION_P_RO_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RO_U_NA})
#define K_MEM_PARTITION_P_NA_U_NA \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_NA_U_NA})
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RX_U_RX \
((k_mem_partition_attr_t) {XTENSA_MPU_ACCESS_P_RX_U_RX})
/**
* @}
*/
/**
* Struct to describe a memory region [start, end).
*/

View file

@ -10,6 +10,10 @@
#include <stdint.h>
#ifndef _ASMLANGUAGE
#ifdef CONFIG_XTENSA_MPU
#include <zephyr/arch/xtensa/mpu.h>
#endif
/* Xtensa doesn't use these structs, but Zephyr core requires they be
* defined so they can be included in struct _thread_base. Dummy
* field exists for sizeof compatibility with C++.
@ -24,7 +28,15 @@ typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
uint32_t last_cpu;
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_XTENSA_MMU
uint32_t *ptables;
#endif
#ifdef CONFIG_XTENSA_MPU
/* Pointer to the memory domain's MPU map. */
struct xtensa_mpu_map *mpu_map;
#endif
/* Initial privilege mode stack pointer when doing a system call.
* Un-set for surpervisor threads.

View file

@ -9,6 +9,7 @@
#include <xtensa/config/core-isa.h>
#include <zephyr/toolchain.h>
#include <zephyr/sys/util.h>
#ifdef CONFIG_KERNEL_COHERENCE
#define ARCH_STACK_PTR_ALIGN XCHAL_DCACHE_LINESIZE
@ -17,9 +18,15 @@
#endif
#if CONFIG_USERSPACE
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_XTENSA_MMU
#define XTENSA_STACK_BASE_ALIGN CONFIG_MMU_PAGE_SIZE
#define XTENSA_STACK_SIZE_ALIGN CONFIG_MMU_PAGE_SIZE
#endif
#ifdef CONFIG_XTENSA_MPU
#define XTENSA_STACK_BASE_ALIGN XCHAL_MPU_ALIGN
#define XTENSA_STACK_SIZE_ALIGN XCHAL_MPU_ALIGN
#endif
#else
#define XTENSA_STACK_BASE_ALIGN ARCH_STACK_PTR_ALIGN
#define XTENSA_STACK_SIZE_ALIGN ARCH_STACK_PTR_ALIGN
@ -45,14 +52,16 @@
#ifndef _ASMLANGUAGE
/* thread stack */
#ifdef CONFIG_XTENSA_MMU
struct xtensa_thread_stack_header {
#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
char privilege_stack[CONFIG_PRIVILEGED_STACK_SIZE];
#endif /* CONFIG_XTENSA_MPU */
} __packed __aligned(XTENSA_STACK_BASE_ALIGN);
#if defined(CONFIG_XTENSA_MMU) || defined(CONFIG_XTENSA_MPU)
#define ARCH_THREAD_STACK_RESERVED \
sizeof(struct xtensa_thread_stack_header)
#endif /* CONFIG_XTENSA_MMU */
#endif /* CONFIG_XTENSA_MMU || CONFIG_XTENSA_MPU */
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) XTENSA_STACK_BASE_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \