kernel: overhaul stack specification
The core kernel computes the initial stack pointer for a thread, properly aligning it and subtracting out any random offsets or thread-local storage areas. arch_new_thread() no longer needs to make any calculations, an initial stack frame may be placed at the bounds of the new 'stack_ptr' parameter passed in. This parameter replaces 'stack_size'. thread->stack_info is now set before arch_new_thread() is invoked, z_new_thread_init() has been removed. The values populated may need to be adjusted on arches which carve-out MPU guard space from the actual stack buffer. thread->stack_info now has a new member 'delta' which indicates any offset applied for TLS or random offset. It's used so the calculations don't need to be repeated if the thread later drops to user mode. CONFIG_INIT_STACKS logic is now performed inside z_setup_new_thread(), before arch_new_thread() is called. thread->stack_info is now defined as the canonical user-accessible area within the stack object, including random offsets and TLS. It will never include any carved-out memory for MPU guards and must be updated at runtime if guards are removed. Available stack space is now optimized. Some arches may need to significantly round up the buffer size to account for page-level granularity or MPU power-of-two requirements. This space is now accounted for and used by virtue of the Z_THREAD_STACK_SIZE_ADJUST() call in z_setup_new_thread. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
d4b6226aa9
commit
b0c155f3ca
27 changed files with 570 additions and 764 deletions
|
@ -31,95 +31,73 @@ static const struct z_exc_handle exceptions[] = {
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
|
||||
#define IS_MPU_GUARD_VIOLATION(guard_start, fault_addr, stack_ptr) \
|
||||
((fault_addr >= guard_start) && \
|
||||
(fault_addr < (guard_start + STACK_GUARD_SIZE)) && \
|
||||
(stack_ptr <= (guard_start + STACK_GUARD_SIZE)))
|
||||
|
||||
/**
|
||||
* @brief Assess occurrence of current thread's stack corruption
|
||||
*
|
||||
* This function performs an assessment whether a memory fault (on a
|
||||
* given memory address) is the result of stack memory corruption of
|
||||
* the current thread.
|
||||
* This function performs an assessment whether a memory fault (on a given
|
||||
* memory address) is the result of a stack overflow of the current thread.
|
||||
*
|
||||
* Thread stack corruption for supervisor threads or user threads in
|
||||
* privilege mode (when User Space is supported) is reported upon an
|
||||
* attempt to access the stack guard area (if MPU Stack Guard feature
|
||||
* is supported). Additionally the current thread stack pointer
|
||||
* must be pointing inside or below the guard area.
|
||||
*
|
||||
* Thread stack corruption for user threads in user mode is reported,
|
||||
* if the current stack pointer is pointing below the start of the current
|
||||
* thread's stack.
|
||||
*
|
||||
* Notes:
|
||||
* - we assume a fully descending stack,
|
||||
* - we assume a stacking error has occurred,
|
||||
* - the function shall be called when handling MPU privilege violation
|
||||
*
|
||||
* If stack corruption is detected, the function returns the lowest
|
||||
* allowed address where the Stack Pointer can safely point to, to
|
||||
* prevent from errors when un-stacking the corrupted stack frame
|
||||
* upon exception return.
|
||||
* When called, we know at this point that we received an ARC
|
||||
* protection violation, with any cause code, with the protection access
|
||||
* error either "MPU" or "Secure MPU". In other words, an MPU fault of
|
||||
* some kind. Need to determine whether this is a general MPU access
|
||||
* exception or the specific case of a stack overflow.
|
||||
*
|
||||
* @param fault_addr memory address on which memory access violation
|
||||
* has been reported.
|
||||
* @param sp stack pointer when exception comes out
|
||||
*
|
||||
* @return The lowest allowed stack frame pointer, if error is a
|
||||
* thread stack corruption, otherwise return 0.
|
||||
* @retval True if this appears to be a stack overflow
|
||||
* @retval False if this does not appear to be a stack overflow
|
||||
*/
|
||||
static uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
|
||||
static bool z_check_thread_stack_fail(const uint32_t fault_addr, uint32_t sp)
|
||||
{
|
||||
const struct k_thread *thread = _current;
|
||||
uint32_t guard_end, guard_start;
|
||||
|
||||
if (!thread) {
|
||||
return 0;
|
||||
/* TODO: Under what circumstances could we get here ? */
|
||||
return false;
|
||||
}
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if (thread->arch.priv_stack_start) {
|
||||
/* User thread */
|
||||
if (z_arc_v2_aux_reg_read(_ARC_V2_ERSTATUS)
|
||||
& _ARC_V2_STATUS32_U) {
|
||||
/* Thread's user stack corruption */
|
||||
#ifdef CONFIG_ARC_HAS_SECURE
|
||||
sp = z_arc_v2_aux_reg_read(_ARC_V2_SEC_U_SP);
|
||||
#else
|
||||
sp = z_arc_v2_aux_reg_read(_ARC_V2_USER_SP);
|
||||
#endif
|
||||
if (sp <= (uint32_t)thread->stack_obj) {
|
||||
return (uint32_t)thread->stack_obj;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
if ((z_arc_v2_aux_reg_read(_ARC_V2_ERSTATUS) &
|
||||
_ARC_V2_STATUS32_U) != 0) {
|
||||
/* Normal user mode context. There is no specific
|
||||
* "guard" installed in this case, instead what's
|
||||
* happening is that the stack pointer is crashing
|
||||
* into the privilege mode stack buffer which
|
||||
* immediately precededs it.
|
||||
*/
|
||||
guard_end = thread->stack_info.start;
|
||||
guard_start = (uint32_t)thread->stack_obj;
|
||||
} else {
|
||||
/* User thread in privilege mode */
|
||||
if (IS_MPU_GUARD_VIOLATION(
|
||||
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
|
||||
fault_addr, sp)) {
|
||||
/* Thread's privilege stack corruption */
|
||||
return thread->arch.priv_stack_start;
|
||||
}
|
||||
/* Special case: handling a syscall on privilege stack.
|
||||
* There is guard memory reserved immediately before
|
||||
* it.
|
||||
*/
|
||||
guard_end = thread->arch.priv_stack_start;
|
||||
guard_start = guard_end - Z_ARC_STACK_GUARD_SIZE;
|
||||
}
|
||||
} else {
|
||||
/* Supervisor thread */
|
||||
if (IS_MPU_GUARD_VIOLATION((uint32_t)thread->stack_obj,
|
||||
fault_addr, sp)) {
|
||||
/* Supervisor thread stack corruption */
|
||||
return (uint32_t)thread->stack_obj + STACK_GUARD_SIZE;
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_USERSPACE */
|
||||
if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start,
|
||||
fault_addr, sp)) {
|
||||
/* Thread stack corruption */
|
||||
return thread->stack_info.start + STACK_GUARD_SIZE;
|
||||
}
|
||||
} else
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
{
|
||||
/* Supervisor thread */
|
||||
guard_end = thread->stack_info.start;
|
||||
guard_start = guard_end - Z_ARC_STACK_GUARD_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* treat any MPU exceptions within the guard region as a stack
|
||||
* overflow if the stack pointer is at or below the end of the guard
|
||||
* region.
|
||||
*/
|
||||
if (sp <= guard_end && fault_addr < guard_end &&
|
||||
fault_addr >= guard_start) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_EXCEPTION_DEBUG
|
||||
|
|
|
@ -200,7 +200,8 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
|
|||
if (thread->base.user_options & K_USER) {
|
||||
LOG_DBG("configure user thread %p's stack", thread);
|
||||
if (_mpu_configure(THREAD_STACK_USER_REGION,
|
||||
(uint32_t)thread->stack_obj, thread->stack_info.size) < 0) {
|
||||
(uint32_t)thread->stack_info.start,
|
||||
thread->stack_info.size) < 0) {
|
||||
LOG_ERR("user thread %p's stack failed", thread);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -545,50 +545,38 @@ void arc_core_mpu_configure_thread(struct k_thread *thread)
|
|||
_mpu_reset_dynamic_regions();
|
||||
#endif
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
uint32_t guard_start;
|
||||
|
||||
/* Set location of guard area when the thread is running in
|
||||
* supervisor mode. For a supervisor thread, this is just low
|
||||
* memory in the stack buffer. For a user thread, it only runs
|
||||
* in supervisor mode when handling a system call on the privilege
|
||||
* elevation stack.
|
||||
*/
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if ((thread->base.user_options & K_USER) != 0U) {
|
||||
/* the areas before and after the user stack of thread is
|
||||
* kernel only. These area can be used as stack guard.
|
||||
* -----------------------
|
||||
* | kernel only area |
|
||||
* |---------------------|
|
||||
* | user stack |
|
||||
* |---------------------|
|
||||
* |privilege stack guard|
|
||||
* |---------------------|
|
||||
* | privilege stack |
|
||||
* -----------------------
|
||||
*/
|
||||
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
|
||||
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
|
||||
STACK_GUARD_SIZE) < 0) {
|
||||
LOG_ERR("thread %p's stack guard failed", thread);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
|
||||
thread->stack_info.start - STACK_GUARD_SIZE,
|
||||
STACK_GUARD_SIZE) < 0) {
|
||||
LOG_ERR("thread %p's stack guard failed", thread);
|
||||
return;
|
||||
}
|
||||
guard_start = thread->arch.priv_stack_start;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
guard_start = thread->stack_info.start;
|
||||
}
|
||||
#else
|
||||
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
|
||||
thread->stack_info.start - STACK_GUARD_SIZE,
|
||||
STACK_GUARD_SIZE) < 0) {
|
||||
guard_start -= Z_ARC_STACK_GUARD_SIZE;
|
||||
|
||||
if (_mpu_configure(THREAD_STACK_GUARD_REGION, guard_start,
|
||||
Z_ARC_STACK_GUARD_SIZE) < 0) {
|
||||
LOG_ERR("thread %p's stack guard failed", thread);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
/* configure stack region of user thread */
|
||||
if (thread->base.user_options & K_USER) {
|
||||
LOG_DBG("configure user thread %p's stack", thread);
|
||||
if (_mpu_configure(THREAD_STACK_USER_REGION,
|
||||
(uint32_t)thread->stack_obj, thread->stack_info.size) < 0) {
|
||||
(uint32_t)thread->stack_info.start,
|
||||
thread->stack_info.size) < 0) {
|
||||
LOG_ERR("thread %p's stack failed", thread);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -33,164 +33,137 @@ struct init_stack_frame {
|
|||
uint32_t r0;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
struct user_init_stack_frame {
|
||||
struct init_stack_frame iframe;
|
||||
uint32_t user_sp;
|
||||
};
|
||||
|
||||
static bool is_user(struct k_thread *thread)
|
||||
{
|
||||
return (thread->base.user_options & K_USER) != 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set all stack-related architecture variables for the provided thread */
|
||||
static void setup_stack_vars(struct k_thread *thread)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (is_user(thread)) {
|
||||
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||
thread->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(thread->stack_obj);
|
||||
#else
|
||||
thread->arch.priv_stack_start = (uint32_t)(thread->stack_obj);
|
||||
#endif /* CONFIG_GEN_PRIV_STACKS */
|
||||
thread->arch.priv_stack_start += Z_ARC_STACK_GUARD_SIZE;
|
||||
} else {
|
||||
thread->arch.priv_stack_start = 0;
|
||||
}
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (is_user(thread)) {
|
||||
thread->arch.k_stack_top = thread->arch.priv_stack_start;
|
||||
thread->arch.k_stack_base = (thread->arch.priv_stack_start +
|
||||
CONFIG_PRIVILEGED_STACK_SIZE);
|
||||
thread->arch.u_stack_top = thread->stack_info.start;
|
||||
thread->arch.u_stack_base = (thread->stack_info.start +
|
||||
thread->stack_info.size);
|
||||
} else
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
{
|
||||
thread->arch.k_stack_top = (uint32_t)thread->stack_info.start;
|
||||
thread->arch.k_stack_base = (uint32_t)(thread->stack_info.start +
|
||||
thread->stack_info.size);
|
||||
#ifdef CONFIG_USERSPACE
|
||||
thread->arch.u_stack_top = 0;
|
||||
thread->arch.u_stack_base = 0;
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
}
|
||||
#endif /* CONFIG_ARC_STACK_CHECKING */
|
||||
}
|
||||
|
||||
/* Get the initial stack frame pointer from the thread's stack buffer. */
|
||||
static struct init_stack_frame *get_iframe(struct k_thread *thread,
|
||||
char *stack_ptr)
|
||||
{
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (is_user(thread)) {
|
||||
/* Initial stack frame for a user thread is slightly larger;
|
||||
* we land in z_user_thread_entry_wrapper on the privilege
|
||||
* stack, and pop off an additional value for the user
|
||||
* stack pointer.
|
||||
*/
|
||||
struct user_init_stack_frame *uframe;
|
||||
|
||||
uframe = Z_STACK_PTR_TO_FRAME(struct user_init_stack_frame,
|
||||
thread->arch.priv_stack_start +
|
||||
CONFIG_PRIVILEGED_STACK_SIZE);
|
||||
uframe->user_sp = (uint32_t)stack_ptr;
|
||||
return &uframe->iframe;
|
||||
}
|
||||
#endif
|
||||
return Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* The initial context is a basic stack frame that contains arguments for
|
||||
* z_thread_entry() return address, that points at z_thread_entry()
|
||||
* and status register.
|
||||
*/
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
struct init_stack_frame *iframe;
|
||||
|
||||
char *stackEnd;
|
||||
char *priv_stack_end;
|
||||
struct init_stack_frame *pInitCtx;
|
||||
setup_stack_vars(thread);
|
||||
|
||||
/* Set up initial stack frame */
|
||||
iframe = get_iframe(thread, stack_ptr);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
size_t stackAdjSize;
|
||||
size_t offset = 0;
|
||||
bool is_user = (thread->base.user_options & K_USER) != 0;
|
||||
|
||||
stackAdjSize = Z_ARC_MPU_SIZE_ALIGN(stack_size);
|
||||
stackEnd = pStackMem + stackAdjSize;
|
||||
|
||||
#ifdef CONFIG_STACK_POINTER_RANDOM
|
||||
offset = stackAdjSize - stack_size;
|
||||
#endif
|
||||
|
||||
if (is_user) {
|
||||
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||
thread->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(thread->stack_obj);
|
||||
#else
|
||||
thread->arch.priv_stack_start =
|
||||
(uint32_t)(stackEnd + STACK_GUARD_SIZE);
|
||||
#endif
|
||||
|
||||
priv_stack_end = (char *)Z_STACK_PTR_ALIGN(
|
||||
thread->arch.priv_stack_start +
|
||||
CONFIG_PRIVILEGED_STACK_SIZE);
|
||||
|
||||
/* reserve 4 bytes for the start of user sp */
|
||||
priv_stack_end -= 4;
|
||||
(*(uint32_t *)priv_stack_end) = Z_STACK_PTR_ALIGN(
|
||||
(uint32_t)stackEnd - offset);
|
||||
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
/* reserve stack space for the userspace local data struct */
|
||||
thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)
|
||||
Z_STACK_PTR_ALIGN(stackEnd -
|
||||
sizeof(*thread->userspace_local_data) - offset);
|
||||
/* update the start of user sp */
|
||||
(*(uint32_t *)priv_stack_end) =
|
||||
(uint32_t) thread->userspace_local_data;
|
||||
#endif
|
||||
|
||||
} else {
|
||||
pStackMem += STACK_GUARD_SIZE;
|
||||
stackEnd += STACK_GUARD_SIZE;
|
||||
|
||||
thread->arch.priv_stack_start = 0;
|
||||
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
/* reserve stack space for the userspace local data struct */
|
||||
priv_stack_end = (char *)Z_STACK_PTR_ALIGN(stackEnd
|
||||
- sizeof(*thread->userspace_local_data) - offset);
|
||||
thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)priv_stack_end;
|
||||
#else
|
||||
priv_stack_end = (char *)Z_STACK_PTR_ALIGN(stackEnd - offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
z_new_thread_init(thread, pStackMem, stackAdjSize);
|
||||
|
||||
/* carve the thread entry struct from the "base" of
|
||||
the privileged stack */
|
||||
pInitCtx = (struct init_stack_frame *)(
|
||||
priv_stack_end - sizeof(struct init_stack_frame));
|
||||
|
||||
/* fill init context */
|
||||
pInitCtx->status32 = 0U;
|
||||
if (is_user) {
|
||||
pInitCtx->pc = ((uint32_t)z_user_thread_entry_wrapper);
|
||||
} else {
|
||||
pInitCtx->pc = ((uint32_t)z_thread_entry_wrapper);
|
||||
}
|
||||
|
||||
/*
|
||||
* enable US bit, US is read as zero in user mode. This will allow use
|
||||
/* enable US bit, US is read as zero in user mode. This will allow user
|
||||
* mode sleep instructions, and it enables a form of denial-of-service
|
||||
* attack by putting the processor in sleep mode, but since interrupt
|
||||
* level/mask can't be set from user space that's not worse than
|
||||
* executing a loop without yielding.
|
||||
*/
|
||||
pInitCtx->status32 |= _ARC_V2_STATUS32_US;
|
||||
#else /* For no USERSPACE feature */
|
||||
pStackMem += STACK_GUARD_SIZE;
|
||||
stackEnd = pStackMem + stack_size;
|
||||
|
||||
z_new_thread_init(thread, pStackMem, stack_size);
|
||||
|
||||
priv_stack_end = stackEnd;
|
||||
|
||||
pInitCtx = (struct init_stack_frame *)(
|
||||
Z_STACK_PTR_ALIGN(priv_stack_end) -
|
||||
sizeof(struct init_stack_frame));
|
||||
|
||||
pInitCtx->status32 = 0U;
|
||||
pInitCtx->pc = ((uint32_t)z_thread_entry_wrapper);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
||||
pInitCtx->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
|
||||
#endif
|
||||
|
||||
pInitCtx->r0 = (uint32_t)entry;
|
||||
pInitCtx->r1 = (uint32_t)p1;
|
||||
pInitCtx->r2 = (uint32_t)p2;
|
||||
pInitCtx->r3 = (uint32_t)p3;
|
||||
|
||||
/* stack check configuration */
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
||||
pInitCtx->sec_stat |= _ARC_V2_SEC_STAT_SSC;
|
||||
#else
|
||||
pInitCtx->status32 |= _ARC_V2_STATUS32_SC;
|
||||
#endif
|
||||
#ifdef CONFIG_USERSPACE
|
||||
if (is_user) {
|
||||
thread->arch.u_stack_top = (uint32_t)pStackMem;
|
||||
thread->arch.u_stack_base = (uint32_t)stackEnd;
|
||||
thread->arch.k_stack_top =
|
||||
(uint32_t)(thread->arch.priv_stack_start);
|
||||
thread->arch.k_stack_base = (uint32_t)
|
||||
(thread->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE);
|
||||
iframe->status32 = _ARC_V2_STATUS32_US;
|
||||
if (is_user(thread)) {
|
||||
iframe->pc = (uint32_t)z_user_thread_entry_wrapper;
|
||||
} else {
|
||||
thread->arch.k_stack_top = (uint32_t)pStackMem;
|
||||
thread->arch.k_stack_base = (uint32_t)stackEnd;
|
||||
thread->arch.u_stack_top = 0;
|
||||
thread->arch.u_stack_base = 0;
|
||||
iframe->pc = (uint32_t)z_thread_entry_wrapper;
|
||||
}
|
||||
#else
|
||||
thread->arch.k_stack_top = (uint32_t) pStackMem;
|
||||
thread->arch.k_stack_base = (uint32_t) stackEnd;
|
||||
#endif
|
||||
iframe->status32 = 0;
|
||||
iframe->pc = ((uint32_t)z_thread_entry_wrapper);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
||||
iframe->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
|
||||
#endif
|
||||
iframe->r0 = (uint32_t)entry;
|
||||
iframe->r1 = (uint32_t)p1;
|
||||
iframe->r2 = (uint32_t)p2;
|
||||
iframe->r3 = (uint32_t)p3;
|
||||
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
#ifdef CONFIG_ARC_SECURE_FIRMWARE
|
||||
iframe->sec_stat |= _ARC_V2_SEC_STAT_SSC;
|
||||
#else
|
||||
iframe->status32 |= _ARC_V2_STATUS32_SC;
|
||||
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
|
||||
#endif /* CONFIG_ARC_STACK_CHECKING */
|
||||
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
|
||||
pInitCtx->status32 |= _ARC_V2_STATUS32_AD;
|
||||
iframe->status32 |= _ARC_V2_STATUS32_AD;
|
||||
#endif
|
||||
|
||||
/* Set required thread members */
|
||||
thread->switch_handle = thread;
|
||||
thread->arch.relinquish_cause = _CAUSE_COOP;
|
||||
thread->callee_saved.sp =
|
||||
(uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
|
||||
|
||||
(uint32_t)iframe - ___callee_saved_stack_t_SIZEOF;
|
||||
/* initial values in all other regs/k_thread entries are irrelevant */
|
||||
}
|
||||
|
||||
|
@ -202,42 +175,21 @@ void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
|
||||
_current->stack_info.start = (uint32_t)_current->stack_obj;
|
||||
#ifdef CONFIG_GEN_PRIV_STACKS
|
||||
_current->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
||||
#else
|
||||
_current->arch.priv_stack_start =
|
||||
(uint32_t)(_current->stack_info.start +
|
||||
_current->stack_info.size + STACK_GUARD_SIZE);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_ARC_STACK_CHECKING
|
||||
_current->arch.k_stack_top = _current->arch.priv_stack_start;
|
||||
_current->arch.k_stack_base = _current->arch.priv_stack_start +
|
||||
CONFIG_PRIVILEGED_STACK_SIZE;
|
||||
_current->arch.u_stack_top = _current->stack_info.start;
|
||||
_current->arch.u_stack_base = _current->stack_info.start +
|
||||
_current->stack_info.size;
|
||||
#endif
|
||||
setup_stack_vars(_current);
|
||||
|
||||
/* possible optimizaiton: no need to load mem domain anymore */
|
||||
/* need to lock cpu here ? */
|
||||
configure_mpu_thread(_current);
|
||||
|
||||
z_arc_userspace_enter(user_entry, p1, p2, p3,
|
||||
(uint32_t)_current->stack_obj,
|
||||
_current->stack_info.size, _current);
|
||||
(uint32_t)_current->stack_info.start,
|
||||
(_current->stack_info.size -
|
||||
_current->stack_info.delta), _current);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
|
||||
|
|
|
@ -16,6 +16,13 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
#if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \
|
||||
MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#else
|
||||
#define FP_GUARD_EXTRA_SIZE 0
|
||||
#endif
|
||||
|
||||
/* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
|
||||
* end of the stack, and thus reusable by the stack when not needed anymore.
|
||||
*
|
||||
|
@ -29,111 +36,63 @@
|
|||
* of the ESF.
|
||||
*/
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *stackEnd;
|
||||
/* Offset between the top of stack and the high end of stack area. */
|
||||
uint32_t top_of_stack_offset = 0U;
|
||||
struct __basic_sf *iframe;
|
||||
|
||||
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) \
|
||||
&& defined(CONFIG_USERSPACE)
|
||||
/* This is required to work-around the case where the thread
|
||||
* is created without using K_THREAD_STACK_SIZEOF() macro in
|
||||
* k_thread_create(). If K_THREAD_STACK_SIZEOF() is used, the
|
||||
* Guard size has already been take out of stackSize.
|
||||
*/
|
||||
stackSize -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
/* Truncate the stack size to align with the MPU region granularity.
|
||||
* This is done proactively to account for the case when the thread
|
||||
* switches to user mode (thus, its stack area will need to be MPU-
|
||||
* programmed to be assigned unprivileged RW access permission).
|
||||
*/
|
||||
stack_size &= ~(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE - 1);
|
||||
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
/* Reserve space on top of stack for local data. */
|
||||
uint32_t p_local_data = Z_STACK_PTR_ALIGN(pStackMem + stack_size
|
||||
- sizeof(*thread->userspace_local_data));
|
||||
|
||||
thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)(p_local_data);
|
||||
|
||||
/* Top of actual stack must be moved below the user local data. */
|
||||
top_of_stack_offset = (uint32_t)
|
||||
(pStackMem + stack_size - ((char *)p_local_data));
|
||||
|
||||
#endif /* CONFIG_THREAD_USERSPACE_LOCAL_DATA */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) \
|
||||
&& defined(CONFIG_MPU_STACK_GUARD)
|
||||
/* For a thread which intends to use the FP services, it is required to
|
||||
* allocate a wider MPU guard region, to always successfully detect an
|
||||
* overflow of the stack.
|
||||
*
|
||||
* Note that the wider MPU regions requires re-adjusting the stack_info
|
||||
* .start and .size.
|
||||
*
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
#if CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Guard area is carved-out of the buffer, instead of reserved,
|
||||
* in this configuration, due to buffer alignment constraints
|
||||
*/
|
||||
thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
#if FP_GUARD_EXTRA_SIZE > 0
|
||||
if ((thread->base.user_options & K_FP_REGS) != 0) {
|
||||
pStackMem += MPU_GUARD_ALIGN_AND_SIZE_FLOAT
|
||||
- MPU_GUARD_ALIGN_AND_SIZE;
|
||||
stack_size -= MPU_GUARD_ALIGN_AND_SIZE_FLOAT
|
||||
- MPU_GUARD_ALIGN_AND_SIZE;
|
||||
/* Larger guard needed due to lazy stacking of FP regs may
|
||||
* overshoot the guard area without writing anything. We
|
||||
* carve it out of the stack buffer as-needed instead of
|
||||
* unconditionally reserving it.
|
||||
*/
|
||||
thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
|
||||
thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
|
||||
}
|
||||
#endif
|
||||
stackEnd = pStackMem + stack_size;
|
||||
|
||||
struct __esf *pInitCtx;
|
||||
|
||||
z_new_thread_init(thread, pStackMem, stack_size);
|
||||
|
||||
/* Carve the thread entry struct from the "base" of the stack
|
||||
*
|
||||
* The initial carved stack frame only needs to contain the basic
|
||||
* stack frame (state context), because no FP operations have been
|
||||
* performed yet for this thread.
|
||||
*/
|
||||
pInitCtx = (struct __esf *)(Z_STACK_PTR_ALIGN(stackEnd -
|
||||
(char *)top_of_stack_offset - sizeof(struct __basic_sf)));
|
||||
#endif /* FP_GUARD_EXTRA_SIZE */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
if ((thread->base.user_options & K_USER) != 0) {
|
||||
pInitCtx->basic.pc = (uint32_t)arch_user_mode_enter;
|
||||
iframe->pc = (uint32_t)arch_user_mode_enter;
|
||||
} else {
|
||||
pInitCtx->basic.pc = (uint32_t)z_thread_entry;
|
||||
iframe->pc = (uint32_t)z_thread_entry;
|
||||
}
|
||||
#else
|
||||
pInitCtx->basic.pc = (uint32_t)z_thread_entry;
|
||||
iframe->pc = (uint32_t)z_thread_entry;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M)
|
||||
/* force ARM mode by clearing LSB of address */
|
||||
pInitCtx->basic.pc &= 0xfffffffe;
|
||||
iframe->pc &= 0xfffffffe;
|
||||
#endif
|
||||
|
||||
pInitCtx->basic.a1 = (uint32_t)entry;
|
||||
pInitCtx->basic.a2 = (uint32_t)p1;
|
||||
pInitCtx->basic.a3 = (uint32_t)p2;
|
||||
pInitCtx->basic.a4 = (uint32_t)p3;
|
||||
iframe->a1 = (uint32_t)entry;
|
||||
iframe->a2 = (uint32_t)p1;
|
||||
iframe->a3 = (uint32_t)p2;
|
||||
iframe->a4 = (uint32_t)p3;
|
||||
|
||||
#if defined(CONFIG_CPU_CORTEX_M)
|
||||
pInitCtx->basic.xpsr =
|
||||
iframe->xpsr =
|
||||
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
|
||||
#else
|
||||
pInitCtx->basic.xpsr = A_BIT | MODE_SYS;
|
||||
iframe->xpsr = A_BIT | MODE_SYS;
|
||||
#if defined(CONFIG_COMPILER_ISA_THUMB2)
|
||||
pInitCtx->basic.xpsr |= T_BIT;
|
||||
iframe->xpsr |= T_BIT;
|
||||
#endif /* CONFIG_COMPILER_ISA_THUMB2 */
|
||||
#endif /* CONFIG_CPU_CORTEX_M */
|
||||
|
||||
thread->callee_saved.psp = (uint32_t)pInitCtx;
|
||||
|
||||
thread->callee_saved.psp = (uint32_t)iframe;
|
||||
thread->arch.basepri = 0;
|
||||
|
||||
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FPU_SHARING)
|
||||
|
@ -142,9 +101,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
thread->arch.priv_stack_start = 0;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
/*
|
||||
* initial values in all other registers/thread entries are
|
||||
* irrelevant.
|
||||
|
@ -152,7 +108,6 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
||||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
@ -161,6 +116,24 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
_current->arch.priv_stack_start =
|
||||
(uint32_t)z_priv_stack_find(_current->stack_obj);
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
/* We're dropping to user mode which means the guard area is no
|
||||
* longer used here, it instead is moved to the privilege stack
|
||||
* to catch stack overflows there. Un-do the calculations done
|
||||
* which accounted for memory borrowed from the thread stack.
|
||||
*/
|
||||
#if FP_GUARD_EXTRA_SIZE > 0
|
||||
if ((_current->base.user_options & K_FP_REGS) != 0) {
|
||||
_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
|
||||
_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
|
||||
}
|
||||
#endif /* FP_GUARD_EXTRA_SIZE */
|
||||
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
|
||||
_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
|
||||
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||
|
||||
/* Stack guard area reserved at the bottom of the thread's
|
||||
* privileged stack. Adjust the available (writable) stack
|
||||
* buffer area accordingly.
|
||||
|
@ -176,7 +149,8 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
|
||||
z_arm_userspace_enter(user_entry, p1, p2, p3,
|
||||
(uint32_t)_current->stack_info.start,
|
||||
_current->stack_info.size);
|
||||
_current->stack_info.size -
|
||||
_current->stack_info.delta);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
||||
|
|
|
@ -44,19 +44,12 @@ struct init_stack_frame {
|
|||
* anymore.
|
||||
*/
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *pStackMem = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *stackEnd;
|
||||
struct init_stack_frame *pInitCtx;
|
||||
|
||||
stackEnd = pStackMem + stack_size;
|
||||
|
||||
z_new_thread_init(thread, pStackMem, stack_size);
|
||||
|
||||
pInitCtx = (struct init_stack_frame *)(Z_STACK_PTR_ALIGN(stackEnd -
|
||||
sizeof(struct init_stack_frame)));
|
||||
pInitCtx = Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
|
||||
|
||||
pInitCtx->entry_point = (uint64_t)entry;
|
||||
pInitCtx->arg1 = (uint64_t)p1;
|
||||
|
|
|
@ -29,17 +29,13 @@ struct init_stack_frame {
|
|||
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *arg1, void *arg2, void *arg3)
|
||||
{
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
struct init_stack_frame *iframe;
|
||||
|
||||
z_new_thread_init(thread, stack_memory, stack_size);
|
||||
|
||||
/* Initial stack frame data, stored at the base of the stack */
|
||||
iframe = (struct init_stack_frame *)
|
||||
Z_STACK_PTR_ALIGN(stack_memory + stack_size - sizeof(*iframe));
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
|
||||
|
||||
/* Setup the initial stack frame */
|
||||
iframe->entry_point = entry;
|
||||
|
|
|
@ -25,22 +25,16 @@
|
|||
* pthreads stack and therefore we ignore the stack size
|
||||
*/
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
||||
posix_thread_status_t *thread_status;
|
||||
|
||||
z_new_thread_init(thread, stack_memory, stack_size);
|
||||
|
||||
/* We store it in the same place where normal archs store the
|
||||
* "initial stack frame"
|
||||
*/
|
||||
thread_status = (posix_thread_status_t *)
|
||||
Z_STACK_PTR_ALIGN(stack_memory + stack_size
|
||||
- sizeof(*thread_status));
|
||||
thread_status = Z_STACK_PTR_TO_FRAME(posix_thread_status_t, stack_ptr);
|
||||
|
||||
/* z_thread_entry() arguments */
|
||||
thread_status->entry_point = entry;
|
||||
|
|
|
@ -13,23 +13,17 @@ void z_thread_entry_wrapper(k_thread_entry_t thread,
|
|||
void *arg3);
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *stack_memory = Z_THREAD_STACK_BUFFER(stack);
|
||||
|
||||
struct __esf *stack_init;
|
||||
|
||||
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
|
||||
const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
|
||||
#endif
|
||||
|
||||
z_new_thread_init(thread, stack_memory, stack_size);
|
||||
|
||||
/* Initial stack frame for thread */
|
||||
stack_init = (struct __esf *)
|
||||
Z_STACK_PTR_ALIGN(stack_memory +
|
||||
stack_size - sizeof(struct __esf));
|
||||
stack_init = Z_STACK_PTR_TO_FRAME(struct __esf, stack_ptr);
|
||||
|
||||
/* Setup the initial stack frame */
|
||||
stack_init->a0 = (ulong_t)entry;
|
||||
|
|
|
@ -181,7 +181,7 @@ static FUNC_NORETURN __used void df_handler_top(void)
|
|||
_df_esf.eflags = _main_tss.eflags;
|
||||
|
||||
/* Restore the main IA task to a runnable state */
|
||||
_main_tss.esp = (uint32_t)(ARCH_THREAD_STACK_BUFFER(
|
||||
_main_tss.esp = (uint32_t)(Z_THREAD_STACK_BUFFER(
|
||||
z_interrupt_stacks[0]) + CONFIG_ISR_STACK_SIZE);
|
||||
_main_tss.cs = CODE_SEG;
|
||||
_main_tss.ds = DATA_SEG;
|
||||
|
|
|
@ -61,17 +61,12 @@ int arch_float_disable(struct k_thread *thread)
|
|||
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *stack_buf;
|
||||
char *stack_high;
|
||||
void *swap_entry;
|
||||
struct _x86_initial_frame *initial_frame;
|
||||
|
||||
stack_buf = Z_THREAD_STACK_BUFFER(stack);
|
||||
z_new_thread_init(thread, stack_buf, stack_size);
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
struct z_x86_thread_stack_header *header =
|
||||
(struct z_x86_thread_stack_header *)stack;
|
||||
|
@ -88,11 +83,10 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
swap_entry = z_thread_entry;
|
||||
#endif
|
||||
|
||||
stack_high = (char *)Z_STACK_PTR_ALIGN(stack_buf + stack_size);
|
||||
|
||||
/* Create an initial context on the stack expected by z_swap() */
|
||||
initial_frame = (struct _x86_initial_frame *)
|
||||
(stack_high - sizeof(struct _x86_initial_frame));
|
||||
initial_frame = Z_STACK_PTR_TO_FRAME(struct _x86_initial_frame,
|
||||
stack_ptr);
|
||||
|
||||
/* z_thread_entry() arguments */
|
||||
initial_frame->entry = entry;
|
||||
initial_frame->p1 = p1;
|
||||
|
|
|
@ -11,13 +11,17 @@
|
|||
|
||||
extern void x86_sse_init(struct k_thread *); /* in locore.S */
|
||||
|
||||
struct x86_initial_frame {
|
||||
/* zeroed return address for ABI */
|
||||
uint64_t rip;
|
||||
};
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
void *switch_entry;
|
||||
|
||||
z_new_thread_init(thread, Z_THREAD_STACK_BUFFER(stack), stack_size);
|
||||
struct x86_initial_frame *iframe;
|
||||
|
||||
#if CONFIG_X86_STACK_PROTECTION
|
||||
struct z_x86_thread_stack_header *header =
|
||||
|
@ -35,8 +39,9 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#else
|
||||
switch_entry = z_thread_entry;
|
||||
#endif
|
||||
thread->callee_saved.rsp = (long) Z_THREAD_STACK_BUFFER(stack);
|
||||
thread->callee_saved.rsp += (stack_size - 8); /* fake RIP for ABI */
|
||||
iframe = Z_STACK_PTR_TO_FRAME(struct x86_initial_frame, stack_ptr);
|
||||
iframe->rip = 0;
|
||||
thread->callee_saved.rsp = (long) iframe;
|
||||
thread->callee_saved.rip = (long) switch_entry;
|
||||
thread->callee_saved.rflags = EFLAGS_INITIAL;
|
||||
|
||||
|
|
|
@ -74,7 +74,8 @@ FUNC_NORETURN static void drop_to_user(k_thread_entry_t user_entry,
|
|||
* any old context since this is a one-way operation
|
||||
*/
|
||||
stack_end = Z_STACK_PTR_ALIGN(_current->stack_info.start +
|
||||
_current->stack_info.size);
|
||||
_current->stack_info.size -
|
||||
_current->stack_info.delta);
|
||||
|
||||
z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
|
||||
_current->stack_info.start);
|
||||
|
|
|
@ -57,18 +57,10 @@ void *xtensa_init_stack(int *stack_top,
|
|||
}
|
||||
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
char *base = Z_THREAD_STACK_BUFFER(stack);
|
||||
char *top = base + stack_size;
|
||||
|
||||
/* Align downward. The API as specified requires a runtime check. */
|
||||
top = (char *)(((unsigned int)top) & ~3);
|
||||
|
||||
z_new_thread_init(thread, base, stack_size);
|
||||
|
||||
thread->switch_handle = xtensa_init_stack((void *)top, entry,
|
||||
thread->switch_handle = xtensa_init_stack((int *)stack_ptr, entry,
|
||||
p1, p2, p3);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,137 +38,146 @@
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define ARCH_STACK_PTR_ALIGN 4
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
|
||||
#if defined(CONFIG_ARC_CORE_MPU)
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/*
|
||||
* The minimum MPU region of MPU v2 is 2048 bytes. The
|
||||
* start address of MPU region should be aligned to the
|
||||
* region size
|
||||
*/
|
||||
#define STACK_ALIGN 2048
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define STACK_ALIGN 32
|
||||
#else
|
||||
#error "Unsupported MPU version"
|
||||
#endif /* CONFIG_ARC_MPU_VER */
|
||||
|
||||
#else /* CONFIG_ARC_CORE_MPU */
|
||||
#error "Requires to enable MPU"
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
|
||||
#define STACK_ALIGN 4
|
||||
/* Indicate, for a minimally sized MPU region, how large it must be and what
|
||||
* its base address must be aligned to.
|
||||
*
|
||||
* For regions that are NOT the minimum size, this define has no semantics
|
||||
* on ARC MPUv2 as its regions must be power of two size and aligned to their
|
||||
* own size. On ARC MPUv3, region sizes are arbitrary and this just indicates
|
||||
* the required size granularity.
|
||||
*/
|
||||
#ifdef CONFIG_ARC_CORE_MPU
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
#define Z_ARC_MPU_ALIGN 2048
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define Z_ARC_MPU_ALIGN 32
|
||||
#else
|
||||
#error "Unsupported MPU version"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MPU_STACK_GUARD)
|
||||
#if CONFIG_ARC_MPU_VER == 3
|
||||
#define STACK_GUARD_SIZE 32
|
||||
#endif
|
||||
#else /* CONFIG_MPU_STACK_GUARD */
|
||||
#define STACK_GUARD_SIZE 0
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
#define Z_ARC_STACK_GUARD_SIZE Z_ARC_MPU_ALIGN
|
||||
#else
|
||||
#define Z_ARC_STACK_GUARD_SIZE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Any thread running In user mode will have full access to the region denoted
|
||||
* by thread.stack_info.
|
||||
*
|
||||
* Thread-local storage is at the very highest memory locations of this area.
|
||||
* Memory for TLS and any initial random stack pointer offset is captured
|
||||
* in thread.stack_info.delta.
|
||||
*/
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
/* MPU guards are only supported with V3 MPU and later. In this configuration
|
||||
* the stack object will contain the MPU guard, the privilege stack, and then
|
||||
* the stack buffer in that order:
|
||||
*
|
||||
* +------------+ <- thread.stack_obj
|
||||
* | Guard | } Z_ARC_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.arch.priv_stack_start
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED (Z_ARC_STACK_GUARD_SIZE + \
|
||||
CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
|
||||
/* We need to be able to exactly cover the stack buffer with an MPU region,
|
||||
* so round its size up to the required granularity of the MPU
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
(ROUND_UP((size), Z_ARC_MPU_ALIGN))
|
||||
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
|
||||
"improper privilege stack size");
|
||||
#else /* !CONFIG_MPU_STACK_GUARD */
|
||||
/* Userspace enabled, but supervisor stack guards are not in use */
|
||||
#ifdef CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT
|
||||
/* Use defaults for everything. The privilege elevation stack is located
|
||||
* in another area of memory generated at build time by gen_kobject_list.py
|
||||
*
|
||||
* +------------+ <- thread.arch.priv_stack_start
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
|
||||
* +------------+
|
||||
*
|
||||
* +------------+ <- thread.stack_obj = thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define Z_PRIVILEGE_STACK_ALIGN ARCH_STACK_PTR_ALIGN
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
Z_POW2_CEIL(ROUND_UP((size), Z_ARC_MPU_ALIGN))
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) \
|
||||
ARCH_THREAD_STACK_SIZE_ADJUST(size)
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#else /* !CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
/* Reserved area of the thread object just contains the privilege stack:
|
||||
*
|
||||
* +------------+ <- thread.stack_obj = thread.arch.priv_stack_start
|
||||
* | Priv Stack | } CONFIG_PRIVILEGED_STACK_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED CONFIG_PRIVILEGED_STACK_SIZE
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
(ROUND_UP((size), Z_ARC_MPU_ALIGN))
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
|
||||
|
||||
BUILD_ASSERT(CONFIG_PRIVILEGED_STACK_SIZE % Z_ARC_MPU_ALIGN == 0,
|
||||
"improper privilege stack size");
|
||||
#endif /* CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
|
||||
#else /* !CONFIG_USERSPACE */
|
||||
|
||||
#define STACK_SIZE_ALIGN(x) ROUND_UP(x, STACK_ALIGN)
|
||||
|
||||
/**
|
||||
* @brief Calculate power of two ceiling for a buffer size input
|
||||
#ifdef CONFIG_MPU_STACK_GUARD
|
||||
/* Only supported on ARC MPU V3 and higher. Reserve some memory for the stack
|
||||
* guard. This is just a minimally-sized region at the beginning of the stack
|
||||
* object, which is programmed to produce an exception if written to.
|
||||
*
|
||||
* +------------+ <- thread.stack_obj
|
||||
* | Guard | } Z_ARC_STACK_GUARD_SIZE
|
||||
* +------------+ <- thread.stack_info.start
|
||||
* | Thread |
|
||||
* | stack |
|
||||
* | |
|
||||
* +............|
|
||||
* | TLS | } thread.stack_info.delta
|
||||
* +------------+ <- thread.stack_info.start + thread.stack_info.size
|
||||
*/
|
||||
#define POW2_CEIL(x) ((1 << (31 - __builtin_clz(x))) < x ? \
|
||||
1 << (31 - __builtin_clz(x) + 1) : \
|
||||
1 << (31 - __builtin_clz(x)))
|
||||
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/* MPUv2 requires
|
||||
* - region size must be power of 2 and >= 2048
|
||||
* - region start must be aligned to its size
|
||||
*/
|
||||
#define Z_ARC_MPU_SIZE_ALIGN(size) POW2_CEIL(STACK_SIZE_ALIGN(size))
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
/* MPUv3 requires
|
||||
* - region size must be 32 bytes aligned
|
||||
* - region start must be 32 bytes aligned
|
||||
*/
|
||||
#define Z_ARC_MPU_SIZE_ALIGN(size) STACK_SIZE_ALIGN(size)
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
#if defined(CONFIG_USERSPACE)
|
||||
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
/* MPU stack guard does not works for MPUv2 as it uses GEN_PRIV_STACK */
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#define Z_PRIVILEGE_STACK_ALIGN CONFIG_PRIVILEGED_STACK_SIZE
|
||||
/*
|
||||
* user stack are protected using MPU regions, so need to adhere to
|
||||
* MPU start, size alignment
|
||||
*/
|
||||
#define Z_ARC_THREAD_STACK_ALIGN(size) Z_ARC_MPU_SIZE_ALIGN(size)
|
||||
#define ARCH_THREAD_STACK_LEN(size) Z_ARC_MPU_SIZE_ALIGN(size)
|
||||
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define ARCH_THREAD_STACK_RESERVED \
|
||||
(STACK_GUARD_SIZE + CONFIG_PRIVILEGED_STACK_SIZE)
|
||||
#define Z_PRIVILEGE_STACK_ALIGN (STACK_ALIGN)
|
||||
|
||||
#define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN)
|
||||
#define ARCH_THREAD_STACK_LEN(size) \
|
||||
(Z_ARC_MPU_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#endif /* CONFIG_ARC_MPU_VER == 2 */
|
||||
|
||||
#else /* CONFIG_USERSPACE */
|
||||
/*
|
||||
* For MPU STACK_GUARD kernel stacks do not need a MPU region to protect,
|
||||
* only guard needs to be protected and aligned. For MPUv3, MPU_STACK_GUARD
|
||||
* requires start 32 bytes aligned, also for size which is decided by stack
|
||||
* array and USERSPACE.
|
||||
*
|
||||
* When no-mpu and no USERSPACE/MPU_STACK_GUARD, everything is 4 bytes
|
||||
* aligned
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED (STACK_GUARD_SIZE)
|
||||
|
||||
#define Z_ARC_THREAD_STACK_ALIGN(size) (STACK_ALIGN)
|
||||
#define ARCH_THREAD_STACK_LEN(size) \
|
||||
(STACK_SIZE_ALIGN(size) + ARCH_THREAD_STACK_RESERVED)
|
||||
#define ARCH_THREAD_STACK_RESERVED Z_ARC_STACK_GUARD_SIZE
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_ARC_MPU_ALIGN
|
||||
/* Default for ARCH_THREAD_STACK_SIZE_ADJUST */
|
||||
#else /* !CONFIG_MPU_STACK_GUARD */
|
||||
/* No stack guard, no userspace, Use defaults for everything. */
|
||||
#endif /* CONFIG_MPU_STACK_GUARD */
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
#define Z_ARC_THREAD_STACK_ARRAY_LEN(size) ARCH_THREAD_STACK_LEN(size)
|
||||
|
||||
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[nmemb][Z_ARC_THREAD_STACK_ARRAY_LEN(size)]
|
||||
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct z_thread_stack_element \
|
||||
__aligned(Z_ARC_THREAD_STACK_ALIGN(size)) \
|
||||
sym[ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)(sym))
|
||||
|
||||
#ifdef CONFIG_ARC_MPU
|
||||
#ifndef _ASMLANGUAGE
|
||||
|
||||
/* Legacy case: retain containing extern "C" with C++ */
|
||||
#include <arch/arc/v2/mpu/arc_mpu.h>
|
||||
|
@ -211,26 +220,24 @@ extern "C" {
|
|||
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
|
||||
((attr) & (AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE))
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#if CONFIG_ARC_MPU_VER == 2
|
||||
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
|
||||
BUILD_ASSERT(!(((size) & ((size) - 1))) && (size) >= STACK_ALIGN \
|
||||
BUILD_ASSERT(!(((size) & ((size) - 1))) && (size) >= Z_ARC_MPU_ALIGN \
|
||||
&& !((uint32_t)(start) & ((size) - 1)), \
|
||||
"the size of the partition must be power of 2" \
|
||||
" and greater than or equal to the mpu adddress alignment." \
|
||||
"start address of the partition must align with size.")
|
||||
#elif CONFIG_ARC_MPU_VER == 3
|
||||
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
|
||||
BUILD_ASSERT((size) % STACK_ALIGN == 0 && (size) >= STACK_ALIGN \
|
||||
&& (uint32_t)(start) % STACK_ALIGN == 0, \
|
||||
"the size of the partition must align with 32" \
|
||||
" and greater than or equal to 32." \
|
||||
"start address of the partition must align with 32.")
|
||||
BUILD_ASSERT((size) % Z_ARC_MPU_ALIGN == 0 && \
|
||||
(size) >= Z_ARC_MPU_ALIGN && \
|
||||
(uint32_t)(start) % Z_ARC_MPU_ALIGN == 0, \
|
||||
"the size of the partition must align with 32" \
|
||||
" and greater than or equal to 32." \
|
||||
"start address of the partition must align with 32.")
|
||||
#endif
|
||||
#endif /* CONFIG_ARC_MPU*/
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
/* Typedef for the k_mem_partition attribute*/
|
||||
typedef uint32_t k_mem_partition_attr_t;
|
||||
|
||||
|
|
|
@ -150,17 +150,23 @@ extern "C" {
|
|||
#define Z_MPU_GUARD_ALIGN MPU_GUARD_ALIGN_AND_SIZE
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Define alignment of a stack buffer
|
||||
*
|
||||
* This is used for two different things:
|
||||
*
|
||||
* -# Used in checks for stack size to be a multiple of the stack buffer
|
||||
* alignment
|
||||
* -# Used to determine the alignment of a stack buffer
|
||||
*
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
/* This MPU requires regions to be sized to a power of two, and aligned to
|
||||
* their own size. Since an MPU region must be able to cover the entire
|
||||
* user-accessible stack buffer, we size/align to match. The privilege
|
||||
* mode stack is generated elsewhere in memory.
|
||||
*/
|
||||
#define STACK_ALIGN MAX(Z_THREAD_MIN_STACK_ALIGN, Z_MPU_GUARD_ALIGN)
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_POW2_CEIL(size)
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) Z_POW2_CEIL(size)
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) MAX(Z_THREAD_MIN_STACK_ALIGN, \
|
||||
Z_MPU_GUARD_ALIGN)
|
||||
#ifdef CONFIG_USERSPACE
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
ROUND_UP(size, CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Define alignment of a privilege stack buffer
|
||||
|
@ -177,71 +183,30 @@ extern "C" {
|
|||
#define Z_PRIVILEGE_STACK_ALIGN MAX(ARCH_STACK_PTR_ALIGN, Z_MPU_GUARD_ALIGN)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Calculate power of two ceiling for a buffer size input
|
||||
/* On arm, the MPU guard can take a few different forms: completely
|
||||
* reserved, completely borrowed, or a combination of the two.
|
||||
*
|
||||
* On devices without power-of-two MPU region requirements, the MPU
|
||||
* guard is reserved as extra memory in the beginning of the stack
|
||||
* object. If we need a larger floating point guard, this is carved
|
||||
* out of the thread stack buffer.
|
||||
*
|
||||
* On devices with power-of-two MPU requirements, the guard is
|
||||
* completely carved out of the thread stack buffer.
|
||||
*
|
||||
* thread->stack_info is updated any time the guard configuration
|
||||
* changes. For instance, if a thread drops down to user mode, then
|
||||
* the guard is no longer necessary and it gets moved to guard the
|
||||
* privilege mode stack instead./
|
||||
*/
|
||||
#define POW2_CEIL(x) ((1 << (31 - __builtin_clz(x))) < x ? \
|
||||
1 << (31 - __builtin_clz(x) + 1) : \
|
||||
1 << (31 - __builtin_clz(x)))
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
/* Guard is 'carved-out' of the thread stack region, and the supervisor
|
||||
* mode stack is allocated elsewhere by gen_priv_stack.py
|
||||
#if !defined(CONFIG_USERSPACE) || \
|
||||
!defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
/* TODO: Convert all non power-of-two ARM MPUs to not use separate privilege
|
||||
* stack generation, right now this is done unconditionally
|
||||
*/
|
||||
#define ARCH_THREAD_STACK_RESERVED 0
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_RESERVED MPU_GUARD_ALIGN_AND_SIZE
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct z_thread_stack_element __noinit __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define ARCH_THREAD_STACK_LEN(size) (POW2_CEIL(size))
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_LEN(size) ((size)+MPU_GUARD_ALIGN_AND_SIZE)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(POW2_CEIL(size)) \
|
||||
sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(STACK_ALIGN) \
|
||||
sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_USERSPACE) && \
|
||||
defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct z_thread_stack_element __aligned(POW2_CEIL(size)) \
|
||||
sym[POW2_CEIL(size)]
|
||||
#else
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct z_thread_stack_element __aligned(STACK_ALIGN) \
|
||||
sym[size+MPU_GUARD_ALIGN_AND_SIZE]
|
||||
#endif
|
||||
|
||||
#define ARCH_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - MPU_GUARD_ALIGN_AND_SIZE)
|
||||
|
||||
#define ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)(sym) + MPU_GUARD_ALIGN_AND_SIZE)
|
||||
|
||||
/* Legacy case: retain containing extern "C" with C++ */
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
#ifdef CONFIG_CPU_HAS_ARM_MPU
|
||||
|
|
|
@ -195,36 +195,13 @@ struct z_x86_thread_stack_header {
|
|||
#endif /* CONFIG_USERSPACE */
|
||||
} __packed __aligned(Z_X86_STACK_BASE_ALIGN);
|
||||
|
||||
#define ARCH_THREAD_STACK_OBJ_ALIGN(size) Z_X86_STACK_BASE_ALIGN
|
||||
|
||||
#define ARCH_THREAD_STACK_SIZE_ADJUST(size) \
|
||||
ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN)
|
||||
|
||||
#define ARCH_THREAD_STACK_RESERVED \
|
||||
sizeof(struct z_x86_thread_stack_header)
|
||||
|
||||
#define ARCH_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define ARCH_THREAD_STACK_LEN(size) \
|
||||
(ROUND_UP((size), \
|
||||
MAX(Z_X86_STACK_BASE_ALIGN, \
|
||||
Z_X86_STACK_SIZE_ALIGN)) + \
|
||||
ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[nmemb][ARCH_THREAD_STACK_LEN(size)]
|
||||
|
||||
#define ARCH_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct z_thread_stack_element __aligned(Z_X86_STACK_BASE_ALIGN) \
|
||||
sym[ROUND_UP((size), Z_X86_STACK_SIZE_ALIGN) + \
|
||||
ARCH_THREAD_STACK_RESERVED]
|
||||
|
||||
#define ARCH_THREAD_STACK_SIZEOF(sym) \
|
||||
(sizeof(sym) - ARCH_THREAD_STACK_RESERVED)
|
||||
|
||||
#define ARCH_THREAD_STACK_BUFFER(sym) \
|
||||
((char *)((sym) + ARCH_THREAD_STACK_RESERVED))
|
||||
|
||||
#endif /* !_ASMLANGUAGE */
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_X86_THREAD_STACK_H */
|
||||
|
|
|
@ -527,11 +527,20 @@ struct _thread_stack_info {
|
|||
*/
|
||||
uintptr_t start;
|
||||
|
||||
/* Stack Size - Thread writable stack buffer size. Represents
|
||||
* the size of the actual area, starting from the start member,
|
||||
* that should be writable by the thread
|
||||
/* Thread writable stack buffer size. Represents the size of the actual
|
||||
* buffer, starting from the 'start' member, that should be writable by
|
||||
* the thread. This comprises of the thread stack area, any area reserved
|
||||
* for local thread data storage, as well as any area left-out due to
|
||||
* random adjustments applied to the initial thread stack pointer during
|
||||
* thread initialization.
|
||||
*/
|
||||
size_t size;
|
||||
|
||||
/* Adjustment value to the size member, removing any storage
|
||||
* used for TLS or random stack base offsets. (start + size - delta)
|
||||
* is the initial stack pointer for a thread. May be 0.
|
||||
*/
|
||||
size_t delta;
|
||||
};
|
||||
|
||||
typedef struct _thread_stack_info _thread_stack_info_t;
|
||||
|
|
|
@ -66,48 +66,12 @@ static inline uint32_t arch_k_cycle_get_32(void);
|
|||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_DEFINE(sym, size)
|
||||
*
|
||||
* @see K_THREAD_STACK_DEFINE()
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_ARRAY_DEFINE(sym, size)
|
||||
*
|
||||
* @see K_THREAD_STACK_ARRAY_DEFINE()
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_LEN(size)
|
||||
*
|
||||
* @see K_THREAD_STACK_LEN()
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_MEMBER(sym, size)
|
||||
*
|
||||
* @see K_THREAD_STACK_MEMBER()
|
||||
*/
|
||||
|
||||
/*
|
||||
* @def ARCH_THREAD_STACK_SIZEOF(sym)
|
||||
*
|
||||
* @see K_THREAD_STACK_SIZEOF()
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_RESERVED
|
||||
*
|
||||
* @see K_THREAD_STACK_RESERVED
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_THREAD_STACK_BUFFER(sym)
|
||||
*
|
||||
* @see K_THREAD_STACK_RESERVED
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def ARCH_STACK_PTR_ALIGN
|
||||
*
|
||||
|
|
|
@ -60,6 +60,22 @@ static inline char *z_stack_ptr_align(char *ptr)
|
|||
}
|
||||
#define Z_STACK_PTR_ALIGN(ptr) ((uintptr_t)z_stack_ptr_align((char *)(ptr)))
|
||||
|
||||
/**
|
||||
* @brief Helper macro for getting a stack frame struct
|
||||
*
|
||||
* It is very common for architectures to define a struct which contains
|
||||
* all the data members that are pre-populated in arch_new_thread().
|
||||
*
|
||||
* Given a type and an initial stack pointer, return a properly cast
|
||||
* pointer to the frame struct.
|
||||
*
|
||||
* @param type Type of the initial stack frame struct
|
||||
* @param ptr Initial aligned stack pointer value
|
||||
* @return Pointer to stack frame struct within the stack buffer
|
||||
*/
|
||||
#define Z_STACK_PTR_TO_FRAME(type, ptr) \
|
||||
(type *)((ptr) - sizeof(type))
|
||||
|
||||
/**
|
||||
* @def K_THREAD_STACK_RESERVED
|
||||
* @brief Indicate how much additional memory is reserved for stack objects
|
||||
|
@ -158,22 +174,22 @@ static inline char *z_stack_ptr_align(char *ptr)
|
|||
*/
|
||||
#define K_THREAD_STACK_EXTERN(sym) extern k_thread_stack_t sym[]
|
||||
|
||||
/* arch/cpu.h may declare an architecture or platform-specific macro
|
||||
* for properly declaring stacks, compatible with MMU/MPU constraints if
|
||||
* enabled
|
||||
/**
|
||||
* @brief Return the size in bytes of a stack memory region
|
||||
*
|
||||
* Convenience macro for passing the desired stack size to k_thread_create()
|
||||
* since the underlying implementation may actually create something larger
|
||||
* (for instance a guard area).
|
||||
*
|
||||
* The value returned here is not guaranteed to match the 'size' parameter
|
||||
* passed to K_THREAD_STACK_DEFINE and may be larger, but is always safe to
|
||||
* pass to k_thread_create() for the associated stack object.
|
||||
*
|
||||
* @param sym Stack memory symbol
|
||||
* @return Size of the stack buffer
|
||||
*/
|
||||
#ifdef ARCH_THREAD_STACK_DEFINE
|
||||
#define K_THREAD_STACK_DEFINE(sym, size) ARCH_THREAD_STACK_DEFINE(sym, size)
|
||||
#define K_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
|
||||
ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size)
|
||||
#define K_THREAD_STACK_LEN(size) ARCH_THREAD_STACK_LEN(size)
|
||||
#define K_THREAD_STACK_MEMBER(sym, size) ARCH_THREAD_STACK_MEMBER(sym, size)
|
||||
#define K_THREAD_STACK_SIZEOF(sym) ARCH_THREAD_STACK_SIZEOF(sym)
|
||||
static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
{
|
||||
return ARCH_THREAD_STACK_BUFFER(sym);
|
||||
}
|
||||
#else
|
||||
#define K_THREAD_STACK_SIZEOF(sym) (sizeof(sym) - K_THREAD_STACK_RESERVED)
|
||||
|
||||
/**
|
||||
* @brief Declare a toplevel thread stack memory region
|
||||
*
|
||||
|
@ -203,7 +219,7 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
#define K_THREAD_STACK_DEFINE(sym, size) \
|
||||
struct z_thread_stack_element __noinit \
|
||||
__aligned(Z_THREAD_STACK_OBJ_ALIGN(size)) \
|
||||
sym[size]
|
||||
sym[Z_THREAD_STACK_SIZE_ADJUST(size)]
|
||||
|
||||
/**
|
||||
* @brief Calculate size of stacks to be allocated in a stack array
|
||||
|
@ -212,10 +228,15 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
* inside a stack array. It accepts the indicated "size" as a parameter
|
||||
* and if required, pads some extra bytes (e.g. for MPU scenarios). Refer
|
||||
* K_THREAD_STACK_ARRAY_DEFINE definition to see how this is used.
|
||||
* The returned size ensures each array member will be aligned to the
|
||||
* required stack base alignment.
|
||||
*
|
||||
* @param size Size of the stack memory region
|
||||
* @return Appropriate size for an array member
|
||||
*/
|
||||
#define K_THREAD_STACK_LEN(size) (size)
|
||||
#define K_THREAD_STACK_LEN(size) \
|
||||
ROUND_UP(Z_THREAD_STACK_SIZE_ADJUST(size), \
|
||||
Z_THREAD_STACK_OBJ_ALIGN(Z_THREAD_STACK_SIZE_ADJUST(size)))
|
||||
|
||||
/**
|
||||
* @brief Declare a toplevel array of thread stack memory regions
|
||||
|
@ -250,22 +271,7 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
#define K_THREAD_STACK_MEMBER(sym, size) \
|
||||
struct z_thread_stack_element \
|
||||
__aligned(Z_THREAD_STACK_OBJ_ALIGN(size)) \
|
||||
sym[size]
|
||||
|
||||
/**
|
||||
* @brief Return the size in bytes of a stack memory region
|
||||
*
|
||||
* Convenience macro for passing the desired stack size to k_thread_create()
|
||||
* since the underlying implementation may actually create something larger
|
||||
* (for instance a guard area).
|
||||
*
|
||||
* The value returned here is not guaranteed to match the 'size' parameter
|
||||
* passed to K_THREAD_STACK_DEFINE and may be larger.
|
||||
*
|
||||
* @param sym Stack memory symbol
|
||||
* @return Size of the stack
|
||||
*/
|
||||
#define K_THREAD_STACK_SIZEOF(sym) sizeof(sym)
|
||||
sym[Z_THREAD_STACK_SIZE_ADJUST(size)]
|
||||
|
||||
/**
|
||||
* @brief Get a pointer to the physical stack buffer
|
||||
|
@ -278,8 +284,7 @@ static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
|||
*/
|
||||
static inline char *Z_THREAD_STACK_BUFFER(k_thread_stack_t *sym)
|
||||
{
|
||||
return (char *)sym;
|
||||
return (char *)sym + K_THREAD_STACK_RESERVED;
|
||||
}
|
||||
#endif /* _ARCH_DECLARE_STACK */
|
||||
#endif /* _ASMLANGUAGE */
|
||||
#endif /* ZEPHYR_INCLUDE_SYS_THREAD_STACK_H */
|
||||
|
|
|
@ -188,11 +188,6 @@ config THREAD_USERSPACE_LOCAL_DATA
|
|||
bool
|
||||
depends on USERSPACE
|
||||
|
||||
config THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
|
||||
bool
|
||||
depends on THREAD_USERSPACE_LOCAL_DATA
|
||||
default y if ARC || ARM
|
||||
|
||||
config ERRNO
|
||||
bool "Enable errno support"
|
||||
default y
|
||||
|
|
|
@ -58,18 +58,23 @@ void arch_busy_wait(uint32_t usec_to_wait);
|
|||
* be called with the true bounds of the available stack buffer within the
|
||||
* thread's stack object.
|
||||
*
|
||||
* The provided stack pointer is guaranteed to be properly aligned with respect
|
||||
* to the CPU and ABI requirements. There may be space reserved between the
|
||||
* stack pointer and the bounds of the stack buffer for initial stack pointer
|
||||
* randomization and thread-local storage.
|
||||
*
|
||||
* Fields in thread->base will be initialized when this is called.
|
||||
*
|
||||
* @param thread Pointer to uninitialized struct k_thread
|
||||
* @param stack Pointer to the stack object
|
||||
* @param stack_size Stack object buffer size in bytes
|
||||
* @param stack_ptr Aligned initial stack pointer
|
||||
* @param entry Thread entry function
|
||||
* @param p1 1st entry point parameter
|
||||
* @param p2 2nd entry point parameter
|
||||
* @param p3 3rd entry point parameter
|
||||
*/
|
||||
void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
||||
size_t stack_size, k_thread_entry_t entry,
|
||||
char *stack_ptr, k_thread_entry_t entry,
|
||||
void *p1, void *p2, void *p3);
|
||||
|
||||
#ifdef CONFIG_USE_SWITCH
|
||||
|
|
|
@ -46,23 +46,6 @@ extern void z_setup_new_thread(struct k_thread *new_thread,
|
|||
void *p1, void *p2, void *p3,
|
||||
int prio, uint32_t options, const char *name);
|
||||
|
||||
static inline void z_new_thread_init(struct k_thread *thread,
|
||||
char *stack, size_t stack_size)
|
||||
{
|
||||
#if !defined(CONFIG_INIT_STACKS) && !defined(CONFIG_THREAD_STACK_INFO)
|
||||
ARG_UNUSED(stack);
|
||||
ARG_UNUSED(stack_size);
|
||||
ARG_UNUSED(thread);
|
||||
#endif
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(stack, 0xaa, stack_size);
|
||||
#endif
|
||||
#if defined(CONFIG_THREAD_STACK_INFO)
|
||||
thread->stack_info.start = (uintptr_t)stack;
|
||||
thread->stack_info.size = stack_size;
|
||||
#endif /* CONFIG_THREAD_STACK_INFO */
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Allocate some memory from the current thread's resource pool
|
||||
*
|
||||
|
|
113
kernel/thread.c
113
kernel/thread.c
|
@ -408,15 +408,10 @@ static void schedule_new_thread(struct k_thread *thread, k_timeout_t delay)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if !CONFIG_STACK_POINTER_RANDOM
|
||||
static inline size_t adjust_stack_size(size_t stack_size)
|
||||
{
|
||||
return stack_size;
|
||||
}
|
||||
#else
|
||||
#if CONFIG_STACK_POINTER_RANDOM
|
||||
int z_stack_adjust_initialized;
|
||||
|
||||
static inline size_t adjust_stack_size(size_t stack_size)
|
||||
static size_t random_offset(size_t stack_size)
|
||||
{
|
||||
size_t random_val;
|
||||
|
||||
|
@ -435,22 +430,82 @@ static inline size_t adjust_stack_size(size_t stack_size)
|
|||
const size_t fuzz = random_val % CONFIG_STACK_POINTER_RANDOM;
|
||||
|
||||
if (unlikely(fuzz * 2 > stack_size)) {
|
||||
return stack_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return stack_size - fuzz;
|
||||
return fuzz;
|
||||
}
|
||||
#if defined(CONFIG_STACK_GROWS_UP)
|
||||
/* This is so rare not bothering for now */
|
||||
#error "Stack pointer randomization not implemented for upward growing stacks"
|
||||
#endif /* CONFIG_STACK_GROWS_UP */
|
||||
|
||||
#endif /* CONFIG_STACK_POINTER_RANDOM */
|
||||
|
||||
static char *setup_thread_stack(struct k_thread *new_thread,
|
||||
k_thread_stack_t *stack, size_t stack_size)
|
||||
{
|
||||
size_t stack_obj_size;
|
||||
size_t delta = 0;
|
||||
char *stack_ptr;
|
||||
|
||||
stack_obj_size = Z_THREAD_STACK_SIZE_ADJUST(stack_size);
|
||||
stack_ptr = (char *)stack + stack_obj_size;
|
||||
|
||||
#if defined(CONFIG_INIT_STACKS) || defined(CONFIG_THREAD_STACK_INFO) || \
|
||||
defined(CONFIG_STACK_SENTINEL)
|
||||
char *stack_buf_start;
|
||||
|
||||
stack_buf_start = Z_THREAD_STACK_BUFFER(stack);
|
||||
#endif
|
||||
#if defined(CONFIG_INIT_STACKS) || defined(CONFIG_THREAD_STACK_INFO) || \
|
||||
CONFIG_STACK_POINTER_RANDOM
|
||||
size_t stack_buf_size;
|
||||
|
||||
stack_buf_size = stack_obj_size - K_THREAD_STACK_RESERVED;
|
||||
#endif
|
||||
#ifdef CONFIG_INIT_STACKS
|
||||
memset(stack_buf_start, 0xaa, stack_buf_size);
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
|
||||
* We periodically check that it's still present and kill the thread
|
||||
* if it isn't.
|
||||
*/
|
||||
*((uint32_t *)stack_buf_start) = STACK_SENTINEL;
|
||||
#endif /* CONFIG_STACK_SENTINEL */
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
size_t tls_size = sizeof(struct _thread_userspace_local_data);
|
||||
|
||||
/* reserve space on highest memory of stack buffer for local data */
|
||||
delta += tls_size;
|
||||
new_thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)(stack_ptr - delta);
|
||||
#endif
|
||||
#if CONFIG_STACK_POINTER_RANDOM
|
||||
delta += random_offset(stack_buf_size);
|
||||
#endif
|
||||
delta = ROUND_UP(delta, ARCH_STACK_PTR_ALIGN);
|
||||
#ifdef CONFIG_THREAD_STACK_INFO
|
||||
/* Initial values. Arches which implement MPU guards that "borrow"
|
||||
* memory from the stack buffer (not tracked in K_THREAD_STACK_RESERVED)
|
||||
* will need to appropriately update this.
|
||||
*
|
||||
* The bounds tracked here correspond to the area of the stack object
|
||||
* that the thread can access, which includes TLS.
|
||||
*/
|
||||
new_thread->stack_info.start = (uintptr_t)stack_buf_start;
|
||||
new_thread->stack_info.size = stack_buf_size;
|
||||
new_thread->stack_info.delta = delta;
|
||||
#endif
|
||||
stack_ptr -= delta;
|
||||
|
||||
return stack_ptr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note:
|
||||
* The caller must guarantee that the stack_size passed here corresponds
|
||||
* to the amount of stack memory available for the thread.
|
||||
* The provided stack_size value is presumed to be either the result of
|
||||
* K_THREAD_STACK_SIZEOF(stack), or the size value passed to the instance
|
||||
* of K_THREAD_STACK_DEFINE() which defined 'stack'.
|
||||
*/
|
||||
void z_setup_new_thread(struct k_thread *new_thread,
|
||||
k_thread_stack_t *stack, size_t stack_size,
|
||||
|
@ -458,6 +513,8 @@ void z_setup_new_thread(struct k_thread *new_thread,
|
|||
void *p1, void *p2, void *p3,
|
||||
int prio, uint32_t options, const char *name)
|
||||
{
|
||||
char *stack_ptr;
|
||||
|
||||
Z_ASSERT_VALID_PRIO(prio, entry);
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
|
@ -470,21 +527,14 @@ void z_setup_new_thread(struct k_thread *new_thread,
|
|||
/* Any given thread has access to itself */
|
||||
k_object_access_grant(new_thread, new_thread);
|
||||
#endif
|
||||
stack_size = adjust_stack_size(stack_size);
|
||||
|
||||
z_waitq_init(&new_thread->base.join_waiters);
|
||||
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
|
||||
/* reserve space on top of stack for local data */
|
||||
stack_size = Z_STACK_PTR_ALIGN(stack_size
|
||||
- sizeof(*new_thread->userspace_local_data));
|
||||
#endif
|
||||
#endif
|
||||
/* Initialize various struct k_thread members */
|
||||
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
|
||||
stack_ptr = setup_thread_stack(new_thread, stack, stack_size);
|
||||
|
||||
arch_new_thread(new_thread, stack, stack_ptr, entry, p1, p2, p3);
|
||||
|
||||
arch_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3);
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
new_thread->init_data = NULL;
|
||||
new_thread->fn_abort = NULL;
|
||||
|
@ -497,23 +547,6 @@ void z_setup_new_thread(struct k_thread *new_thread,
|
|||
__ASSERT(new_thread->switch_handle != NULL,
|
||||
"arch layer failed to initialize switch_handle");
|
||||
#endif
|
||||
#ifdef CONFIG_STACK_SENTINEL
|
||||
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
|
||||
* We periodically check that it's still present and kill the thread
|
||||
* if it isn't.
|
||||
*/
|
||||
*((uint32_t *)new_thread->stack_info.start) = STACK_SENTINEL;
|
||||
#endif /* CONFIG_STACK_SENTINEL */
|
||||
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
|
||||
#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
|
||||
/* don't set again if the arch's own code in arch_new_thread() has
|
||||
* already set the pointer.
|
||||
*/
|
||||
new_thread->userspace_local_data =
|
||||
(struct _thread_userspace_local_data *)
|
||||
(Z_THREAD_STACK_BUFFER(stack) + stack_size);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
new_thread->custom_data = NULL;
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#define CONFIG_MP_NUM_CPUS 1
|
||||
#define CONFIG_SYS_CLOCK_TICKS_PER_SEC 100
|
||||
#define CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC 10000000
|
||||
#define ARCH_STACK_PTR_ALIGN 8
|
||||
/* FIXME: Properly integrate with Zephyr's arch specific code */
|
||||
#define CONFIG_X86 1
|
||||
#define CONFIG_PRINTK 1
|
||||
|
|
|
@ -47,7 +47,7 @@ static inline void set_fault_valid(bool valid)
|
|||
#if defined(CONFIG_X86)
|
||||
#define MEM_REGION_ALLOC (4096)
|
||||
#elif defined(CONFIG_ARC)
|
||||
#define MEM_REGION_ALLOC (STACK_ALIGN)
|
||||
#define MEM_REGION_ALLOC (Z_ARC_MPU_ALIGN)
|
||||
#elif defined(CONFIG_ARM)
|
||||
#define MEM_REGION_ALLOC (Z_THREAD_MIN_STACK_ALIGN)
|
||||
#else
|
||||
|
|
|
@ -310,7 +310,7 @@ static void test_write_kernel_data(void)
|
|||
K_APP_DMEM(part0) volatile char *priv_stack_ptr;
|
||||
#if defined(CONFIG_ARC)
|
||||
K_APP_DMEM(part0) int32_t size = (0 - CONFIG_PRIVILEGED_STACK_SIZE -
|
||||
STACK_GUARD_SIZE);
|
||||
Z_ARC_STACK_GUARD_SIZE);
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue