kernel: introduce supervisor-only stacks

These stacks are appropriate for threads that run purely in
supervisor mode, and also as stacks for interrupt and exception
handling.

Two new arch defines are introduced:

- ARCH_KERNEL_STACK_GUARD_SIZE
- ARCH_KERNEL_STACK_OBJ_ALIGN

New public declaration macros:

- K_KERNEL_STACK_RESERVED
- K_KERNEL_STACK_EXTERN
- K_KERNEL_STACK_DEFINE
- K_KERNEL_STACK_ARRAY_DEFINE
- K_KERNEL_STACK_MEMBER
- K_KERNEL_STACK_SIZEOF

If user mode is not enabled, K_KERNEL_STACK_* and K_THREAD_STACK_*
are equivalent.

Separately generated privilege elevation stacks are now declared
like kernel stacks, removing the need for K_PRIVILEGE_STACK_ALIGN.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-04-24 16:24:46 -07:00 committed by Anas Nashif
commit 8ce260d8df
21 changed files with 414 additions and 143 deletions

View file

@ -32,10 +32,10 @@
*/
#if defined(CONFIG_ARC_FIRQ_STACK)
#if defined(CONFIG_SMP)
K_THREAD_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_NUM_CPUS,
K_KERNEL_STACK_ARRAY_DEFINE(_firq_interrupt_stack, CONFIG_MP_NUM_CPUS,
CONFIG_ARC_FIRQ_STACK_SIZE);
#else
K_THREAD_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
K_KERNEL_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
#endif
/*
@ -46,11 +46,11 @@ K_THREAD_STACK_DEFINE(_firq_interrupt_stack, CONFIG_ARC_FIRQ_STACK_SIZE);
void z_arc_firq_stack_set(void)
{
#ifdef CONFIG_SMP
char *firq_sp = Z_THREAD_STACK_BUFFER(
char *firq_sp = Z_KERNEL_STACK_BUFFER(
_firq_interrupt_stack[z_arc_v2_core_id()]) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#else
char *firq_sp = Z_THREAD_STACK_BUFFER(_firq_interrupt_stack) +
char *firq_sp = Z_KERNEL_STACK_BUFFER(_firq_interrupt_stack) +
CONFIG_ARC_FIRQ_STACK_SIZE;
#endif

View file

@ -257,8 +257,7 @@ void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
* protect with a stack guard.
*/
guard_start = thread->stack_info.start - guard_size;
__ASSERT((uint32_t)thread->stack_obj == guard_start,
__ASSERT((uint32_t)thread->stack_obj == guard_start,
"Guard start (0x%x) not beginning at stack object (0x%x)\n",
guard_start, (uint32_t)thread->stack_obj);
}

View file

@ -42,13 +42,15 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct __basic_sf *iframe;
#ifdef CONFIG_MPU_STACK_GUARD
#if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
/* Guard area is carved-out of the buffer, instead of reserved,
* in this configuration, due to buffer alignment constraints
*/
thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
#if defined(CONFIG_USERSPACE)
if (z_stack_is_user_capable(stack)) {
/* Guard area is carved-out of the buffer instead of reserved
* for stacks that can host user threads
*/
thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
}
#endif /* CONFIG_USERSPACE */
#if FP_GUARD_EXTRA_SIZE > 0
if ((thread->base.user_options & K_FP_REGS) != 0) {
/* Larger guard needed due to lazy stacking of FP regs may
@ -128,10 +130,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
}
#endif /* FP_GUARD_EXTRA_SIZE */
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
#endif /* CONFIG_THREAD_STACK_INFO */
/* Stack guard area reserved at the bottom of the thread's

View file

@ -68,13 +68,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct _x86_initial_frame *initial_frame;
#if CONFIG_X86_STACK_PROTECTION
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)stack;
/* Set guard area to read-only to catch stack overflows */
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->guard_page,
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true);
z_x86_set_stack_guard(stack);
#endif
#ifdef CONFIG_USERSPACE
@ -121,7 +115,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
k_thread_entry_t _main)
{
struct k_thread *dummy_thread = (struct k_thread *)
ROUND_UP(Z_THREAD_STACK_BUFFER(z_interrupt_stacks[0]),
ROUND_UP(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]),
FP_REG_SET_ALIGN);
__ASSERT(((uintptr_t)(&dummy_thread->arch.preempFloatReg) %

View file

@ -24,13 +24,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct x86_initial_frame *iframe;
#if CONFIG_X86_STACK_PROTECTION
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)stack;
/* Set guard area to read-only to catch stack overflows */
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->guard_page,
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true);
z_x86_set_stack_guard(stack);
#endif
#ifdef CONFIG_USERSPACE
switch_entry = z_x86_userspace_prepare_thread(thread);

View file

@ -40,9 +40,9 @@ FUNC_NORETURN void z_x86_prep_c(void *arg)
#endif
#if CONFIG_X86_STACK_PROTECTION
z_x86_mmu_set_flags(&z_x86_kernel_ptables, z_interrupt_stacks[0],
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true);
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
z_x86_set_stack_guard(z_interrupt_stacks[i]);
}
#endif
#if defined(CONFIG_SMP)

View file

@ -892,6 +892,30 @@ void z_x86_paging_init(void)
#endif
}
#ifdef CONFIG_X86_STACK_PROTECTION
void z_x86_set_stack_guard(k_thread_stack_t *stack)
{
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
struct z_x86_thread_stack_header *header =
(struct z_x86_thread_stack_header *)stack;
/* Set guard area to read-only to catch stack overflows */
z_x86_mmu_set_flags(&z_x86_kernel_ptables, &header->guard_page,
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true);
} else
#endif /* CONFIG_USERSPACE */
{
/* Kernel-only stacks have the guard be the first page */
z_x86_mmu_set_flags(&z_x86_kernel_ptables, stack,
MMU_PAGE_SIZE, MMU_ENTRY_READ, Z_X86_MMU_RW,
true);
}
}
#endif /* CONFIG_X86_STACK_PROTECTION */
#ifdef CONFIG_X86_USERSPACE
int arch_buffer_validate(void *addr, size_t size, int write)
{

View file

@ -111,6 +111,10 @@ void z_x86_apply_mem_domain(struct x86_page_tables *ptables,
void z_x86_do_kernel_oops(const z_arch_esf_t *esf);
#ifdef CONFIG_X86_STACK_PROTECTION
void z_x86_set_stack_guard(k_thread_stack_t *stack);
#endif
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */

View file

@ -70,6 +70,23 @@ extern "C" {
#define Z_ARC_STACK_GUARD_SIZE 0
#endif
/* Kernel-only stacks have the following layout if a stack guard is enabled:
*
* +------------+ <- thread.stack_obj
* | Guard | } Z_ARC_STACK_GUARD_SIZE
* +------------+ <- thread.stack_info.start
* | Kernel |
* | stack |
* | |
* +............|
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#ifdef CONFIG_MPU_STACK_GUARD
#define ARCH_KERNEL_STACK_RESERVED Z_ARC_STACK_GUARD_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_ARC_MPU_ALIGN
#endif
#ifdef CONFIG_USERSPACE
/* Any thread running In user mode will have full access to the region denoted
* by thread.stack_info.
@ -112,7 +129,7 @@ BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
* in another area of memory generated at build time by gen_kobject_list.py
*
* +------------+ <- thread.arch.priv_stack_start
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
* | Priv Stack | } Z_KERNEL_STACK_LEN(CONFIG_PRIVILEGED_STACK_SIZE)
* +------------+
*
* +------------+ <- thread.stack_obj = thread.stack_info.start
@ -123,7 +140,6 @@ BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
* | TLS | } thread.stack_info.delta
* +------------+ <- thread.stack_info.start + thread.stack_info.size
*/
#define Z_PRIVILEGE_STACK_ALIGN ARCH_STACK_PTR_ALIGN
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
Z_POW2_CEIL(ROUND_UP((size), Z_ARC_MPU_ALIGN))
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \

View file

@ -168,44 +168,16 @@ extern "C" {
#endif
#endif
/**
* @brief Define alignment of a privilege stack buffer
*
* This is used to determine the required alignment of threads'
* privilege stacks when building with support for user mode.
*
* @note
* The privilege stacks do not need to respect the minimum MPU
* region alignment requirement (unless this is enforced via
* the MPU Stack Guard feature).
#ifdef CONFIG_MPU_STACK_GUARD
/* Kernel-only stacks need an MPU guard region programmed at the beginning of
* the stack object, so align the object appropriately.
*/
#if defined(CONFIG_USERSPACE)
#define Z_PRIVILEGE_STACK_ALIGN MAX(ARCH_STACK_PTR_ALIGN, Z_MPU_GUARD_ALIGN)
#define ARCH_KERNEL_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN Z_MPU_GUARD_ALIGN
#endif
/* On arm, the MPU guard can take a few different forms: completely
* reserved, completely borrowed, or a combination of the two.
*
* On devices without power-of-two MPU region requirements, the MPU
* guard is reserved as extra memory in the beginning of the stack
* object. If we need a larger floating point guard, this is carved
* out of the thread stack buffer.
*
* On devices with power-of-two MPU requirements, the guard is
* completely carved out of the thread stack buffer.
*
* thread->stack_info is updated any time the guard configuration
* changes. For instance, if a thread drops down to user mode, then
* the guard is no longer necessary and it gets moved to guard the
* privilege mode stack instead./
*/
#if !defined(CONFIG_USERSPACE) || \
!defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
/* TODO: Convert all non power-of-two ARM MPUs to not use separate privilege
* stack generation, right now this is done unconditionally
*/
#define ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
#endif
/* On arm, all MPU guards are carve-outs. */
#define ARCH_THREAD_STACK_RESERVED 0
/* Legacy case: retain containing extern "C" with C++ */
#ifdef CONFIG_ARM_MPU

View file

@ -203,5 +203,13 @@ struct z_x86_thread_stack_header {
#define ARCH_THREAD_STACK_RESERVED \
sizeof(struct z_x86_thread_stack_header)
#ifdef CONFIG_HW_STACK_PROTECTION
#define ARCH_KERNEL_STACK_RESERVED MMU_PAGE_SIZE
#define ARCH_KERNEL_STACK_OBJ_ALIGN MMU_PAGE_SIZE
#else
#define ARCH_KERNEL_STACK_RESERVED 0
#define ARCH_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif
#endif /* !_ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H */

View file

@ -781,9 +781,25 @@ extern void k_thread_foreach_unlocked(
* K_FP_REGS, and K_SSE_REGS. Multiple options may be specified by separating
* them using "|" (the logical OR operator).
*
* The stack_size parameter must be the same size value used when the stack
* object was defined, or the return value of K_THREAD_STACK_SIZEOF() on the
* stack object.
* Stack objects passed to this function must be originally defined with
* either of these macros in order to be portable:
*
* - K_THREAD_STACK_DEFINE() - For stacks that may support either user or
* supervisor threads.
* - K_KERNEL_STACK_DEFINE() - For stacks that may support supervisor
* threads only. These stacks use less memory if CONFIG_USERSPACE is
* enabled.
*
* The stack_size parameter has constraints. It must either be:
*
* - The original size value passed to K_THREAD_STACK_DEFINE() or
* K_KERNEL_STACK_DEFINE()
* - The return value of K_THREAD_STACK_SIZEOF(stack) if the stack was
* defined with K_THREAD_STACK_DEFINE()
* - The return value of K_KERNEL_STACK_SIZEOF(stack) if the stack was
* defined with K_KERNEL_STACK_DEFINE().
*
* Using other values, or sizeof(stack) may produce undefined behavior.
*
* @param new_thread Pointer to uninitialized struct k_thread
* @param stack Pointer to the stack space.

View file

@ -12,6 +12,11 @@ SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
*/
*(.noinit)
*(".noinit.*")
#ifdef CONFIG_USERSPACE
z_user_stacks_start = .;
*(.user_stacks)
z_user_stacks_end = .;
#endif /* CONFIG_USERSPACE */
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.

View file

@ -245,6 +245,8 @@ extern char _ramfunc_rom_start[];
#ifdef CONFIG_USERSPACE
extern char z_priv_stacks_ram_start[];
extern char z_priv_stacks_ram_end[];
extern char z_user_stacks_start[];
extern char z_user_stacks_end[];
#endif /* CONFIG_USERSPACE */
#endif /* ! _ASMLANGUAGE */

View file

@ -113,6 +113,26 @@ static inline uint32_t arch_k_cycle_get_32(void);
*
* @see Z_THREAD_STACK_SIZE_ADJUST
*/
/**
* @def ARCH_KERNEL_STACK_RESERVED
* @brief MPU guard size for kernel-only stacks
*
* If MPU stack guards are used to catch stack overflows, specify the
* amount of space reserved in kernel stack objects. If guard sizes are
* context dependent, this should be in the minimum guard size, with
* remaining space carved out if needed.
*
* Optional definition, defaults to 0.
*
* @see K_KERNEL_STACK_RESERVED
*/
/**
* @def ARCH_KERNEL_STACK_OBJ_ALIGN
* @brief Required alignment of the lowest address of a kernel-only stack.
*/
/** @} */
/**

View file

@ -76,6 +76,108 @@ static inline char *z_stack_ptr_align(char *ptr)
#define Z_STACK_PTR_TO_FRAME(type, ptr) \
(type *)((ptr) - sizeof(type))
#ifdef ARCH_KERNEL_STACK_RESERVED
#define K_KERNEL_STACK_RESERVED ((size_t)ARCH_KERNEL_STACK_RESERVED)
#else
#define K_KERNEL_STACK_RESERVED ((size_t)0)
#endif
#define Z_KERNEL_STACK_SIZE_ADJUST(size) (ROUND_UP(size, \
ARCH_STACK_PTR_ALIGN) + \
K_KERNEL_STACK_RESERVED)
#ifdef ARCH_KERNEL_STACK_OBJ_ALIGN
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_KERNEL_STACK_OBJ_ALIGN
#else
#define Z_KERNEL_STACK_OBJ_ALIGN ARCH_STACK_PTR_ALIGN
#endif
/**
* @brief Obtain an extern reference to a stack
*
* This macro properly brings the symbol of a thread stack declared
* elsewhere into scope.
*
* @param sym Thread stack symbol name
*/
#define K_KERNEL_STACK_EXTERN(sym) extern k_thread_stack_t sym[]
/**
* @def K_KERNEL_STACK_DEFINE
* @brief Define a toplevel kernel stack memory region
*
* This declares a region of memory for use as a thread stack, for threads
* that exclusively run in supervisor mode. This is also suitable for
* declaring special stacks for interrupt or exception handling.
*
* Stacks declared with this macro may not host user mode threads.
*
* It is legal to precede this definition with the 'static' keyword.
*
* It is NOT legal to take the sizeof(sym) and pass that to the stackSize
* parameter of k_thread_create(), it may not be the same as the
* 'size' parameter. Use K_KERNEL_STACK_SIZEOF() instead.
*
* The total amount of memory allocated may be increased to accommodate
* fixed-size stack overflow guards.
*
* @param sym Thread stack symbol name
* @param size Size of the stack memory region
*/
#define K_KERNEL_STACK_DEFINE(sym, size) \
struct z_thread_stack_element __noinit \
__aligned(Z_KERNEL_STACK_OBJ_ALIGN) \
sym[Z_KERNEL_STACK_SIZE_ADJUST(size)]
#define Z_KERNEL_STACK_LEN(size) \
ROUND_UP(Z_KERNEL_STACK_SIZE_ADJUST(size), Z_KERNEL_STACK_OBJ_ALIGN)
/**
* @def K_KERNEL_STACK_ARRAY_DEFINE
* @brief Define a toplevel array of kernel stack memory regions
*
* Stacks declared with this macro may not host user mode threads.
*
* @param sym Kernel stack array symbol name
* @param nmemb Number of stacks to declare
* @param size Size of the stack memory region
*/
#define K_KERNEL_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct z_thread_stack_element __noinit \
__aligned(Z_KERNEL_STACK_OBJ_ALIGN) \
sym[nmemb][Z_KERNEL_STACK_LEN(size)]
/**
* @def K_KERNEL_STACK_MEMBER
* @brief Declare an embedded stack memory region
*
* Used for kernel stacks embedded within other data structures.
*
* Stacks declared with this macro may not host user mode threads.
* @param sym Thread stack symbol name
* @param size Size of the stack memory region
*/
#define K_KERNEL_STACK_MEMBER(sym, size) \
struct z_thread_stack_element \
__aligned(Z_KERNEL_STACK_OBJ_ALIGN) \
sym[Z_KERNEL_STACK_SIZE_ADJUST(size)]
#define K_KERNEL_STACK_SIZEOF(sym) (sizeof(sym) - K_KERNEL_STACK_RESERVED)
static inline char *Z_KERNEL_STACK_BUFFER(k_thread_stack_t *sym)
{
return (char *)sym + K_KERNEL_STACK_RESERVED;
}
#ifndef CONFIG_USERSPACE
#define K_THREAD_STACK_RESERVED K_KERNEL_STACK_RESERVED
#define K_THREAD_STACK_SIZEOF K_KERNEL_STACK_SIZEOF
#define K_THREAD_STACK_LEN Z_KERNEL_STACK_LEN
#define K_THREAD_STACK_DEFINE K_KERNEL_STACK_DEFINE
#define K_THREAD_STACK_ARRAY_DEFINE K_KERNEL_STACK_ARRAY_DEFINE
#define K_THREAD_STACK_MEMBER K_KERNEL_STACK_MEMBER
#define Z_THREAD_STACK_BUFFER Z_KERNEL_STACK_BUFFER
#define K_THREAD_STACK_EXTERN K_KERNEL_STACK_EXTERN
#else
/**
* @def K_THREAD_STACK_RESERVED
* @brief Indicate how much additional memory is reserved for stack objects
@ -217,7 +319,7 @@ static inline char *z_stack_ptr_align(char *ptr)
* @param size Size of the stack memory region
*/
#define K_THREAD_STACK_DEFINE(sym, size) \
struct z_thread_stack_element __noinit \
struct z_thread_stack_element Z_GENERIC_SECTION(.user_stacks) \
__aligned(Z_THREAD_STACK_OBJ_ALIGN(size)) \
sym[Z_THREAD_STACK_SIZE_ADJUST(size)]
@ -252,7 +354,7 @@ static inline char *z_stack_ptr_align(char *ptr)
* @param size Size of the stack memory region
*/
#define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct z_thread_stack_element __noinit \
struct z_thread_stack_element Z_GENERIC_SECTION(.user_stacks) \
__aligned(Z_THREAD_STACK_OBJ_ALIGN(size)) \
sym[nmemb][K_THREAD_STACK_LEN(size)]
@ -265,6 +367,12 @@ static inline char *z_stack_ptr_align(char *ptr)
* by threads else a stack overflow will lead to silent corruption. In other
* words, the containing data structure should live in RAM owned by the kernel.
*
* A user thread can only be started with a stack defined in this way if
* the thread starting it is in supervisor mode.
*
* This is now deprecated, as stacks defined in this way are not usable from
* user mode. Use K_KERNEL_STACK_MEMBER.
*
* @param sym Thread stack symbol name
* @param size Size of the stack memory region
*/
@ -291,5 +399,6 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
{
return (char *)sym + K_THREAD_STACK_RESERVED;
}
#endif /* CONFIG_USERSPACE */
#endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_INCLUDE_SYS_THREAD_STACK_H */

View file

@ -124,6 +124,10 @@ extern K_THREAD_STACK_ARRAY_DEFINE(z_interrupt_stacks, CONFIG_MP_NUM_CPUS,
extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
#endif
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack);
#endif /* CONFIG_USERSPACE */
#ifdef __cplusplus
}
#endif

View file

@ -28,6 +28,10 @@
#include <sys/check.h>
#include <random/rand32.h>
#define LOG_LEVEL CONFIG_KERNEL_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
#ifdef CONFIG_THREAD_MONITOR
/* This lock protects the linked list of active threads; i.e. the
* initial _kernel.threads pointer and the linked list made up of
@ -444,25 +448,34 @@ static size_t random_offset(size_t stack_size)
static char *setup_thread_stack(struct k_thread *new_thread,
k_thread_stack_t *stack, size_t stack_size)
{
size_t stack_obj_size;
size_t stack_obj_size, stack_buf_size;
char *stack_ptr, *stack_buf_start;
size_t delta = 0;
char *stack_ptr;
stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
#ifdef CONFIG_USERSPACE
if (z_stack_is_user_capable(stack)) {
stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
} else
#endif
{
/* Object cannot host a user mode thread */
stack_obj_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size);
stack_buf_start = Z_KERNEL_STACK_BUFFER(stack);
stack_buf_size = stack_obj_size - K_KERNEL_STACK_RESERVED;
}
/* Initial stack pointer at the high end of the stack object, may
* be reduced later in this function by TLS or random offset
*/
stack_ptr = (char *)stack + stack_obj_size;
#if defined(CONFIG_INIT_STACKS) || defined(CONFIG_THREAD_STACK_INFO) || \
defined(CONFIG_STACK_SENTINEL)
char *stack_buf_start;
LOG_DBG("stack %p for thread %p: obj_size=%zu buf_start=%p "
" buf_size %zu stack_ptr=%p",
stack, new_thread, stack_obj_size, stack_buf_start,
stack_buf_size, stack_ptr);
stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
#endif
#if defined(CONFIG_INIT_STACKS) || defined(CONFIG_THREAD_STACK_INFO) || \
CONFIG_STACK_POINTER_RANDOM
size_t stack_buf_size;
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
#endif
#ifdef CONFIG_INIT_STACKS
memset(stack_buf_start, 0xaa, stack_buf_size);
#endif
@ -518,6 +531,9 @@ char *z_setup_new_thread(struct k_thread *new_thread,
Z_ASSERT_VALID_PRIO(prio, entry);
#ifdef CONFIG_USERSPACE
__ASSERT((options & K_USER) == 0 || z_stack_is_user_capable(stack),
"user thread %p with kernel-only stack %p",
new_thread, stack);
z_object_init(new_thread);
z_object_init(stack);
new_thread->stack_obj = stack;
@ -631,6 +647,11 @@ k_tid_t z_impl_k_thread_create(struct k_thread *new_thread,
#ifdef CONFIG_USERSPACE
bool z_stack_is_user_capable(k_thread_stack_t *stack)
{
return z_object_find(stack) != NULL;
}
k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
@ -642,6 +663,10 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
/* The thread and stack objects *must* be in an uninitialized state */
Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(new_thread, K_OBJ_THREAD));
/* No need to check z_stack_is_user_capable(), it won't be in the
* object table if it isn't
*/
stack_object = z_object_find(stack);
Z_OOPS(Z_SYSCALL_VERIFY_MSG(z_obj_validation_check(stack_object, stack,
K_OBJ_THREAD_STACK_ELEMENT,
@ -785,6 +810,8 @@ FUNC_NORETURN void k_thread_user_mode_enter(k_thread_entry_t entry,
_current->entry.parameter3 = p3;
#endif
#ifdef CONFIG_USERSPACE
__ASSERT(z_stack_is_user_capable(_current->stack_obj),
"dropping to user mode with kernel-only stack object");
memset(_current->userspace_local_data, 0,
sizeof(struct _thread_userspace_local_data));
arch_user_mode_enter(entry, p1, p2, p3);

View file

@ -98,8 +98,6 @@ struct perm_ctx {
* mode stacks are allocated as an array. The base of the array is
* aligned to Z_PRIVILEGE_STACK_ALIGN, and all members must be as well.
*/
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_PRIVILEGE_STACK_ALIGN == 0);
uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
{
struct z_object *obj = z_object_find(stack);

View file

@ -175,33 +175,13 @@ extern_env = {}
class KobjectInstance:
def __init__(self, type_obj, addr):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
self.addr = addr
self.type_obj = type_obj
# Type name determined later since drivers needs to look at the
# API struct address
self.type_name = None
if self.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
self.data = thread_counter
thread_counter = thread_counter + 1
elif self.type_obj.name == "sys_mutex":
self.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif self.type_obj.name == "k_futex":
self.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif self.type_obj.name == STACK_TYPE:
stack_counter += 1
else:
self.data = 0
self.data = 0
class KobjectType:
@ -512,11 +492,18 @@ def device_get_api_addr(elf, addr):
def find_kobjects(elf, syms):
global thread_counter
global sys_mutex_counter
global futex_counter
global stack_counter
if not elf.has_dwarf_info():
sys.exit("ELF file has no DWARF information")
app_smem_start = syms["_app_smem_start"]
app_smem_end = syms["_app_smem_end"]
user_stack_start = syms["z_user_stacks_start"]
user_stack_end = syms["z_user_stacks_end"]
di = elf.get_dwarf_info()
@ -630,6 +617,26 @@ def find_kobjects(elf, syms):
% (ko.type_obj.name, hex(addr)))
continue
if (ko.type_obj.name == STACK_TYPE and
(addr < user_stack_start or addr >= user_stack_end)):
debug("skip kernel-only stack at %s" % hex(addr))
continue
# At this point we know the object will be included in the gperf table
if ko.type_obj.name == "k_thread":
# Assign an ID for this thread object, used to track its
# permissions to other kernel objects
ko.data = thread_counter
thread_counter = thread_counter + 1
elif ko.type_obj.name == "sys_mutex":
ko.data = "&kernel_mutexes[%d]" % sys_mutex_counter
sys_mutex_counter += 1
elif ko.type_obj.name == "k_futex":
ko.data = "&futex_data[%d]" % futex_counter
futex_counter += 1
elif ko.type_obj.name == STACK_TYPE:
stack_counter += 1
if ko.type_obj.name != "device":
# Not a device struct so we immediately know its type
ko.type_name = kobject_to_enum(ko.type_obj.name)
@ -748,9 +755,11 @@ def write_gperf_table(fp, syms, objs, little_endian, static_begin, static_end):
if "CONFIG_GEN_PRIV_STACKS" in syms:
metadata_names["K_OBJ_THREAD_STACK_ELEMENT"] = "stack_data"
if stack_counter != 0:
# Same as K_KERNEL_STACK_ARRAY_DEFINE, but routed to a different
# memory section.
fp.write("static uint8_t Z_GENERIC_SECTION(.priv_stacks.noinit) "
" __aligned(Z_PRIVILEGE_STACK_ALIGN)"
" priv_stacks[%d][CONFIG_PRIVILEGED_STACK_SIZE];\n"
" __aligned(Z_KERNEL_STACK_OBJ_ALIGN)"
" priv_stacks[%d][Z_KERNEL_STACK_LEN(CONFIG_PRIVILEGED_STACK_SIZE)];\n"
% stack_counter)
fp.write("static struct z_stack_data stack_data[%d] = {\n"

View file

@ -19,11 +19,13 @@ struct k_thread test_thread;
#define STEST_STACKSIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
K_THREAD_STACK_DEFINE(user_stack, STEST_STACKSIZE);
K_THREAD_STACK_ARRAY_DEFINE(user_stack_array, NUM_STACKS, STEST_STACKSIZE);
K_KERNEL_STACK_DEFINE(kern_stack, STEST_STACKSIZE);
K_KERNEL_STACK_ARRAY_DEFINE(kern_stack_array, NUM_STACKS, STEST_STACKSIZE);
struct foo {
int bar;
K_THREAD_STACK_MEMBER(stack, STEST_STACKSIZE);
K_KERNEL_STACK_MEMBER(stack, STEST_STACKSIZE);
int baz;
};
@ -58,11 +60,13 @@ static inline int z_vrfy_check_perms(void *addr, size_t size, int write)
#include <syscalls/check_perms_mrsh.c>
#endif /* CONFIG_USERSPACE */
void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size,
bool is_user_stack)
{
size_t stack_size, unused, carveout;
size_t stack_size, unused, carveout, reserved, alignment;
uint8_t val;
char *stack_start, *stack_ptr, *stack_end, *obj_start, *obj_end;
char *stack_buf;
volatile char *pos;
int ret, expected;
uintptr_t base = (uintptr_t)stack_obj;
@ -73,12 +77,24 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
#else
is_usermode = false;
#endif
/* Dump interesting information */
stack_info_get(&stack_start, &stack_size);
printk(" - Thread reports buffer %p size %zu\n", stack_start,
stack_size);
#ifdef CONFIG_USERSPACE
if (is_user_stack) {
reserved = K_THREAD_STACK_RESERVED;
stack_buf = Z_THREAD_STACK_BUFFER(stack_obj);
alignment = Z_THREAD_STACK_OBJ_ALIGN(stack_size);
} else
#endif
{
reserved = K_KERNEL_STACK_RESERVED;
stack_buf = Z_KERNEL_STACK_BUFFER(stack_obj);
alignment = Z_KERNEL_STACK_OBJ_ALIGN;
}
stack_end = stack_start + stack_size;
obj_end = (char *)stack_obj + obj_size;
obj_start = (char *)stack_obj;
@ -86,7 +102,7 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
/* Assert that the created stack object, with the reserved data
* removed, can hold a thread buffer of STEST_STACKSIZE
*/
zassert_true(STEST_STACKSIZE <= (obj_size - K_THREAD_STACK_RESERVED),
zassert_true(STEST_STACKSIZE <= (obj_size - reserved),
"bad stack size in object");
/* Check that the stack info in the thread marks a region
@ -98,9 +114,9 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
"stack size in thread struct out of bounds (underflow)");
/* Check that the base of the stack is aligned properly. */
zassert_true(base % Z_THREAD_STACK_OBJ_ALIGN(stack_size) == 0,
zassert_true(base % alignment == 0,
"stack base address %p not aligned to %zu",
stack_obj, Z_THREAD_STACK_OBJ_ALIGN(stack_size));
stack_obj, alignment);
/* Check that the entire stack buffer is read/writable */
printk(" - check read/write to stack buffer\n");
@ -143,12 +159,12 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
zassert_true(check_perms(stack_end, 1, 0),
"user mode access to memory %p past end of stack object",
obj_end);
zassert_true(stack_size <= obj_size - K_THREAD_STACK_RESERVED,
zassert_true(stack_size <= obj_size - reserved,
"bad stack size %zu in thread struct",
stack_size);
}
#endif
carveout = stack_start - Z_THREAD_STACK_BUFFER(stack_obj);
carveout = stack_start - stack_buf;
printk(" - Carved-out space in buffer: %zu\n", carveout);
zassert_true(carveout < stack_size,
"Suspicious carve-out space reported");
@ -168,27 +184,35 @@ void stack_buffer_scenarios(k_thread_stack_t *stack_obj, size_t obj_size)
}
}
ZTEST_BMEM struct scenario_data {
k_thread_stack_t *stack;
bool is_user;
size_t metadata_size;
size_t object_size;
} scenario_data;
void stest_thread_entry(void *p1, void *p2, void *p3)
{
bool drop = (bool)p3;
bool drop = (bool)p1;
if (drop) {
k_thread_user_mode_enter(stest_thread_entry, p1, p2,
(void *)false);
k_thread_user_mode_enter(stest_thread_entry, (void *)false,
p2, p3);
} else {
stack_buffer_scenarios((k_thread_stack_t *)p1, (size_t)p2);
stack_buffer_scenarios(scenario_data.stack,
scenario_data.object_size,
scenario_data.is_user);
}
}
void stest_thread_launch(void *stack_obj, size_t obj_size, uint32_t flags,
bool drop)
void stest_thread_launch(uint32_t flags, bool drop)
{
int ret;
size_t unused;
k_thread_create(&test_thread, stack_obj, STEST_STACKSIZE,
stest_thread_entry, stack_obj, (void *)obj_size,
(void *)drop,
k_thread_create(&test_thread, scenario_data.stack, STEST_STACKSIZE,
stest_thread_entry,
(void *)drop, NULL, NULL,
-1, flags, K_NO_WAIT);
k_thread_join(&test_thread, K_FOREVER);
@ -199,15 +223,46 @@ void stest_thread_launch(void *stack_obj, size_t obj_size, uint32_t flags,
void scenario_entry(void *stack_obj, size_t obj_size)
{
bool is_user;
size_t metadata_size;
#ifdef CONFIG_USERSPACE
struct z_object *zo;
zo = z_object_find(stack_obj);
if (zo != NULL) {
is_user = true;
#ifdef CONFIG_GEN_PRIV_STACKS
metadata_size = zo->data.stack_data->size;
#else
metadata_size = zo->data.stack_size;
#endif /* CONFIG_GEN_PRIV_STACKS */
printk("stack may host user thread, size in metadata is %zu\n",
metadata_size);
} else
#endif /* CONFIG_USERSPACE */
{
metadata_size = 0;
is_user = false;
}
scenario_data.stack = stack_obj;
scenario_data.object_size = obj_size;
scenario_data.is_user = is_user;
scenario_data.metadata_size = metadata_size;
printk("Stack object %p[%zu]\n", stack_obj, obj_size);
printk(" - Testing supervisor mode\n");
stest_thread_launch(stack_obj, obj_size, 0, false);
printk(" - Testing user mode (direct launch)\n");
stest_thread_launch(stack_obj, obj_size, K_USER | K_INHERIT_PERMS,
false);
printk(" - Testing user mode (drop)\n");
stest_thread_launch(stack_obj, obj_size, K_INHERIT_PERMS,
true);
stest_thread_launch(0, false);
#ifdef CONFIG_USERSPACE
if (is_user) {
printk(" - Testing user mode (direct launch)\n");
stest_thread_launch(K_USER | K_INHERIT_PERMS, false);
printk(" - Testing user mode (drop)\n");
stest_thread_launch(K_INHERIT_PERMS, true);
}
#endif /* CONFIG_USERSPACE */
}
/**
@ -223,6 +278,9 @@ void test_stack_buffer(void)
{
printk("Reserved space (thread stacks): %zu\n",
K_THREAD_STACK_RESERVED);
printk("Reserved space (kernel stacks): %zu\n",
K_KERNEL_STACK_RESERVED);
printk("CONFIG_ISR_STACK_SIZE %zu\n", (size_t)CONFIG_ISR_STACK_SIZE);
for (int i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
printk("irq stack %d: %p size %zu\n",
@ -231,16 +289,28 @@ void test_stack_buffer(void)
}
printk("Provided stack size: %u\n", STEST_STACKSIZE);
scenario_entry(stest_stack, sizeof(stest_stack));
printk("\ntesting user_stack\n");
scenario_entry(user_stack, sizeof(user_stack));
for (int i = 0; i < NUM_STACKS; i++) {
scenario_entry(stest_stack_array[i],
sizeof(stest_stack_array[i]));
printk("\ntesting user_stack_array[%d]\n", i);
scenario_entry(user_stack_array[i],
sizeof(user_stack_array[i]));
}
printk("\ntesting kern_stack\n");
scenario_entry(kern_stack, sizeof(kern_stack));
for (int i = 0; i < NUM_STACKS; i++) {
printk("\ntesting kern_stack_array[%d]\n", i);
scenario_entry(kern_stack_array[i],
sizeof(kern_stack_array[i]));
}
printk("\ntesting stest_member_stack\n");
scenario_entry(&stest_member_stack.stack,
sizeof(stest_member_stack.stack));
}
void test_main(void)