kernel: reduce scope of z_new_thread_init()

The core kernel z_setup_new_thread() calls into arch_new_thread(),
which calls back into the core kernel via z_new_thread_init().

Move everything that doesn't have to be in z_new_thread_init() to
z_setup_new_thread() and convert to an inline function.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-04-19 14:28:15 -07:00 committed by Anas Nashif
commit c0df99cc77
11 changed files with 45 additions and 62 deletions

View file

@ -137,7 +137,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif
}
z_new_thread_init(thread, pStackMem, stackAdjSize, priority, options);
z_new_thread_init(thread, pStackMem, stackAdjSize);
/* carve the thread entry struct from the "base" of
the privileged stack */
@ -164,7 +164,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pStackMem += ARCH_THREAD_STACK_RESERVED;
stackEnd = pStackMem + stackSize;
z_new_thread_init(thread, pStackMem, stackSize, priority, options);
z_new_thread_init(thread, pStackMem, stackSize);
stackAdjEnd = stackEnd;

View file

@ -94,8 +94,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct __esf *pInitCtx;
z_new_thread_init(thread, pStackMem, stackSize, priority,
options);
z_new_thread_init(thread, pStackMem, stackSize);
/* Carve the thread entry struct from the "base" of the stack
*

View file

@ -71,7 +71,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
stackEnd = pStackMem + stackSize;
z_new_thread_init(thread, pStackMem, stackSize, priority, options);
z_new_thread_init(thread, pStackMem, stackSize);
pInitCtx = (struct init_stack_frame *)(STACK_ROUND_DOWN(stackEnd -
sizeof(struct init_stack_frame)));

View file

@ -38,7 +38,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct init_stack_frame *iframe;
z_new_thread_init(thread, stack_memory, stack_size, priority, options);
z_new_thread_init(thread, stack_memory, stack_size);
/* Initial stack frame data, stored at the base of the stack */
iframe = (struct init_stack_frame *)

View file

@ -36,7 +36,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
posix_thread_status_t *thread_status;
z_new_thread_init(thread, stack_memory, stack_size, priority, options);
z_new_thread_init(thread, stack_memory, stack_size);
/* We store it in the same place where normal archs store the
* "initial stack frame"

View file

@ -22,7 +22,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct __esf *stack_init;
z_new_thread_init(thread, stack_memory, stack_size, priority, options);
z_new_thread_init(thread, stack_memory, stack_size);
/* Initial stack frame for thread */
stack_init = (struct __esf *)

View file

@ -71,7 +71,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
Z_ASSERT_VALID_PRIO(priority, entry);
stack_buf = Z_THREAD_STACK_BUFFER(stack);
z_new_thread_init(thread, stack_buf, stack_size, priority, options);
z_new_thread_init(thread, stack_buf, stack_size);
#if CONFIG_X86_STACK_PROTECTION
struct z_x86_thread_stack_header *header =

View file

@ -19,8 +19,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
void *switch_entry;
Z_ASSERT_VALID_PRIO(priority, entry);
z_new_thread_init(thread, Z_THREAD_STACK_BUFFER(stack),
stack_size, priority, options);
z_new_thread_init(thread, Z_THREAD_STACK_BUFFER(stack), stack_size);
#if CONFIG_X86_STACK_PROTECTION
struct z_x86_thread_stack_header *header =

View file

@ -67,7 +67,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* Align downward. The API as specified requires a runtime check. */
top = (char *)(((unsigned int)top) & ~3);
z_new_thread_init(thread, base, sz, prio, opts);
z_new_thread_init(thread, base, sz);
thread->switch_handle = xtensa_init_stack((void *)top, entry,
p1, p2, p3);

View file

@ -46,9 +46,22 @@ extern void z_setup_new_thread(struct k_thread *new_thread,
void *p1, void *p2, void *p3,
int prio, u32_t options, const char *name);
extern void z_new_thread_init(struct k_thread *thread,
char *pStack, size_t stackSize,
int prio, unsigned int options);
static inline void z_new_thread_init(struct k_thread *thread,
char *stack, size_t stack_size)
{
#if !defined(CONFIG_INIT_STACKS) && !defined(CONFIG_THREAD_STACK_INFO)
ARG_UNUSED(stack);
ARG_UNUSED(stack_size);
ARG_UNUSED(thread);
#endif
#ifdef CONFIG_INIT_STACKS
memset(stack, 0xaa, stack_size);
#endif
#if defined(CONFIG_THREAD_STACK_INFO)
thread->stack_info.start = (uintptr_t)stack;
thread->stack_info.size = stack_size;
#endif /* CONFIG_THREAD_STACK_INFO */
}
/**
* @brief Allocate some memory from the current thread's resource pool

View file

@ -462,51 +462,6 @@ static inline size_t adjust_stack_size(size_t stack_size)
#endif /* CONFIG_STACK_POINTER_RANDOM */
void z_new_thread_init(struct k_thread *thread,
char *pStack, size_t stackSize,
int prio, unsigned int options)
{
#if !defined(CONFIG_INIT_STACKS) && !defined(CONFIG_THREAD_STACK_INFO)
ARG_UNUSED(pStack);
ARG_UNUSED(stackSize);
#endif
#ifdef CONFIG_INIT_STACKS
memset(pStack, 0xaa, stackSize);
#endif
#ifdef CONFIG_STACK_SENTINEL
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
* We periodically check that it's still present and kill the thread
* if it isn't.
*/
*((u32_t *)pStack) = STACK_SENTINEL;
#endif /* CONFIG_STACK_SENTINEL */
/* Initialize various struct k_thread members */
z_init_thread_base(&thread->base, prio, _THREAD_PRESTART, options);
/* static threads overwrite it afterwards with real value */
thread->init_data = NULL;
thread->fn_abort = NULL;
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
thread->custom_data = NULL;
#endif
#ifdef CONFIG_THREAD_NAME
thread->name[0] = '\0';
#endif
#if defined(CONFIG_USERSPACE)
thread->mem_domain_info.mem_domain = NULL;
#endif /* CONFIG_USERSPACE */
#if defined(CONFIG_THREAD_STACK_INFO)
thread->stack_info.start = (uintptr_t)pStack;
thread->stack_info.size = (u32_t)stackSize;
#endif /* CONFIG_THREAD_STACK_INFO */
}
/*
* Note:
* The caller must guarantee that the stack_size passed here corresponds
@ -522,6 +477,7 @@ void z_setup_new_thread(struct k_thread *new_thread,
z_object_init(new_thread);
z_object_init(stack);
new_thread->stack_obj = stack;
new_thread->mem_domain_info.mem_domain = NULL;
/* Any given thread has access to itself */
k_object_access_grant(new_thread, new_thread);
@ -537,9 +493,14 @@ void z_setup_new_thread(struct k_thread *new_thread,
- sizeof(*new_thread->userspace_local_data));
#endif
#endif
/* Initialize various struct k_thread members */
z_init_thread_base(&new_thread->base, prio, _THREAD_PRESTART, options);
arch_new_thread(new_thread, stack, stack_size, entry, p1, p2, p3,
prio, options);
/* static threads overwrite it afterwards with real value */
new_thread->init_data = NULL;
new_thread->fn_abort = NULL;
#ifdef CONFIG_USE_SWITCH
/* switch_handle must be non-null except when inside z_swap()
@ -549,7 +510,13 @@ void z_setup_new_thread(struct k_thread *new_thread,
__ASSERT(new_thread->switch_handle != NULL,
"arch layer failed to initialize switch_handle");
#endif
#ifdef CONFIG_STACK_SENTINEL
/* Put the stack sentinel at the lowest 4 bytes of the stack area.
* We periodically check that it's still present and kill the thread
* if it isn't.
*/
*((u32_t *)new_thread->stack_info.start) = STACK_SENTINEL;
#endif /* CONFIG_STACK_SENTINEL */
#ifdef CONFIG_THREAD_USERSPACE_LOCAL_DATA
#ifndef CONFIG_THREAD_USERSPACE_LOCAL_DATA_ARCH_DEFER_SETUP
/* don't set again if the arch's own code in arch_new_thread() has
@ -560,7 +527,10 @@ void z_setup_new_thread(struct k_thread *new_thread,
(Z_THREAD_STACK_BUFFER(stack) + stack_size);
#endif
#endif
#ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */
new_thread->custom_data = NULL;
#endif
#ifdef CONFIG_THREAD_MONITOR
new_thread->entry.pEntry = entry;
new_thread->entry.parameter1 = p1;
@ -579,6 +549,8 @@ void z_setup_new_thread(struct k_thread *new_thread,
CONFIG_THREAD_MAX_NAME_LEN - 1);
/* Ensure NULL termination, truncate if longer */
new_thread->name[CONFIG_THREAD_MAX_NAME_LEN - 1] = '\0';
} else {
new_thread->name[0] = '\0';
}
#endif
#ifdef CONFIG_SCHED_CPU_MASK