kernel: streamline initialization of _thread_base and timeouts
Move _thread_base initialization to _init_thread_base(), remove mention of "nano" in timeouts init and move timeout init to _init_thread_base(). Initialize all base fields via the _init_thread_base in semaphore groups code. Change-Id: I05b70b06261f4776bda6d67f358190428d4a954a Signed-off-by: Benjamin Walsh <benjamin.walsh@windriver.com>
This commit is contained in:
parent
50eb51a745
commit
069fd3624e
9 changed files with 32 additions and 43 deletions
|
@ -124,14 +124,11 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
|||
pInitCtx->status32 = _ARC_V2_STATUS32_E(_ARC_V2_DEF_IRQ_LEVEL);
|
||||
#endif
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite them afterwards with real values */
|
||||
thread->init_data = NULL;
|
||||
thread->fn_abort = NULL;
|
||||
thread->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
|
@ -158,8 +155,6 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
|||
thread->callee_saved.sp =
|
||||
(uint32_t)pInitCtx - ___callee_saved_stack_t_SIZEOF;
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
|
||||
/* initial values in all other regs/k_thread entries are irrelevant */
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
|
|
@ -112,14 +112,11 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
|||
pInitCtx->xpsr =
|
||||
0x01000000UL; /* clear all, thumb bit is 1, even if RO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
tcs->base.flags = options | K_PRESTART;
|
||||
tcs->base.sched_locked = 0;
|
||||
_init_thread_base(&tcs->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
tcs->init_data = NULL;
|
||||
tcs->fn_abort = NULL;
|
||||
tcs->base.prio = priority;
|
||||
|
||||
#ifdef CONFIG_THREAD_CUSTOM_DATA
|
||||
/* Initialize custom data field (value is opaque to kernel) */
|
||||
|
@ -140,8 +137,6 @@ void _new_thread(char *pStackMem, size_t stackSize,
|
|||
|
||||
/* swap_return_value can contain garbage */
|
||||
|
||||
_nano_timeout_thread_init(tcs);
|
||||
|
||||
/* initial values in all other registers/TCS entries are irrelevant */
|
||||
|
||||
thread_monitor_init(tcs);
|
||||
|
|
|
@ -85,11 +85,8 @@ void _new_thread(char *stack_memory, size_t stack_size,
|
|||
|
||||
/* Initialize various struct k_thread members */
|
||||
thread = (struct k_thread *)stack_memory;
|
||||
thread->base.prio = priority;
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
|
@ -104,9 +101,5 @@ void _new_thread(char *stack_memory, size_t stack_size,
|
|||
thread->callee_saved.key = NIOS2_STATUS_PIE_MSK;
|
||||
/* Leave the rest of thread->callee_saved junk */
|
||||
|
||||
#ifdef CONFIG_NANO_TIMEOUTS
|
||||
_nano_timeout_thread_init(thread);
|
||||
#endif
|
||||
|
||||
thread_monitor_init(thread);
|
||||
}
|
||||
|
|
|
@ -83,15 +83,11 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
|||
/* ptr to the new task's k_thread */
|
||||
struct k_thread *thread = (struct k_thread *)pStackMem;
|
||||
|
||||
thread->base.prio = priority;
|
||||
#if (defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO))
|
||||
thread->arch.excNestCount = 0;
|
||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||
|
||||
/* k_q_node initialized upon first insertion in a list */
|
||||
|
||||
thread->base.flags = options | K_PRESTART;
|
||||
thread->base.sched_locked = 0;
|
||||
_init_thread_base(&thread->base, priority, K_PRESTART, options);
|
||||
|
||||
/* static threads overwrite it afterwards with real value */
|
||||
thread->init_data = NULL;
|
||||
|
@ -137,8 +133,6 @@ static void _new_thread_internal(char *pStackMem, unsigned stackSize,
|
|||
PRINTK("\nstruct thread * = 0x%x", thread);
|
||||
|
||||
thread_monitor_init(thread);
|
||||
|
||||
_nano_timeout_thread_init(thread);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GDB_INFO) || defined(CONFIG_DEBUG_INFO) \
|
||||
|
|
|
@ -181,6 +181,10 @@ _set_thread_return_value_with_data(struct k_thread *thread,
|
|||
thread->base.swap_data = data;
|
||||
}
|
||||
|
||||
extern void _init_thread_base(struct _thread_base *thread_base,
|
||||
int priority, uint32_t initial_state,
|
||||
unsigned int options);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#endif /* _kernel_structs__h_ */
|
||||
|
|
|
@ -65,18 +65,10 @@ static inline void _init_timeout(struct _timeout *t, _timeout_func_t func)
|
|||
*/
|
||||
}
|
||||
|
||||
static inline void _init_thread_timeout(struct k_thread *thread)
|
||||
static ALWAYS_INLINE void
|
||||
_init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
_init_timeout(&thread->base.timeout, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX - backwards compatibility until the arch part is updated to call
|
||||
* _init_thread_timeout()
|
||||
*/
|
||||
static inline void _nano_timeout_thread_init(struct k_thread *thread)
|
||||
{
|
||||
_init_thread_timeout(thread);
|
||||
_init_timeout(&thread_base->timeout, NULL);
|
||||
}
|
||||
|
||||
/* remove a thread timing out from kernel object's wait queue */
|
||||
|
|
|
@ -30,8 +30,10 @@ extern "C" {
|
|||
#ifdef CONFIG_SYS_CLOCK_EXISTS
|
||||
#include <timeout_q.h>
|
||||
#else
|
||||
#define _init_thread_timeout(thread) do { } while ((0))
|
||||
#define _nano_timeout_thread_init(thread) _init_thread_timeout(thread)
|
||||
static ALWAYS_INLINE void _init_thread_timeout(struct _thread_base *thread_base)
|
||||
{
|
||||
ARG_UNUSED(thread_base);
|
||||
}
|
||||
#define _add_thread_timeout(thread, wait_q, timeout) do { } while (0)
|
||||
static inline int _abort_thread_timeout(struct k_thread *thread) { return 0; }
|
||||
#define _get_next_timeout_expiry() (K_FOREVER)
|
||||
|
|
|
@ -123,10 +123,8 @@ int k_sem_group_take(struct k_sem *sem_array[], struct k_sem **sem,
|
|||
_current->base.swap_data = &list;
|
||||
|
||||
for (int i = 0; i < num; i++) {
|
||||
wait_objects[i].dummy.flags = K_DUMMY;
|
||||
wait_objects[i].dummy.prio = priority;
|
||||
|
||||
_init_thread_timeout((struct k_thread *)&wait_objects[i].dummy);
|
||||
_init_thread_base(&wait_objects[i].dummy, priority, K_DUMMY, 0);
|
||||
|
||||
sys_dlist_append(&list, &wait_objects[i].desc.semg_node);
|
||||
wait_objects[i].desc.thread = _current;
|
||||
|
|
|
@ -416,6 +416,22 @@ void _init_static_threads(void)
|
|||
k_sched_unlock();
|
||||
}
|
||||
|
||||
void _init_thread_base(struct _thread_base *thread_base, int priority,
|
||||
uint32_t initial_state, unsigned int options)
|
||||
{
|
||||
/* k_q_node is initialized upon first insertion in a list */
|
||||
|
||||
thread_base->flags = options | initial_state;
|
||||
|
||||
thread_base->prio = priority;
|
||||
|
||||
thread_base->sched_locked = 0;
|
||||
|
||||
/* swap_data does not need to be initialized */
|
||||
|
||||
_init_thread_timeout(thread_base);
|
||||
}
|
||||
|
||||
uint32_t _k_thread_group_mask_get(struct k_thread *thread)
|
||||
{
|
||||
struct _static_thread_data *thread_data = thread->init_data;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue