xtensa: remove legacy kernel cruft

fibers/tasks are now just threads and we should not be using
struct *tcs any more.

Change-Id: Iee5369abcc66b4357a0c75537025fe8edb0ffbb4
Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2017-03-30 11:55:13 -07:00 committed by Anas Nashif
commit 5c9f7e28a1
4 changed files with 49 additions and 53 deletions

View file

@ -31,7 +31,7 @@
#include <kernel_offsets.h> #include <kernel_offsets.h>
/* Xtensa-specific TCS structure member offsets */ /* Xtensa-specific k_thread structure member offsets */
GEN_OFFSET_SYM(_callee_saved_t, topOfStack); GEN_OFFSET_SYM(_callee_saved_t, topOfStack);
GEN_OFFSET_SYM(_callee_saved_t, retval); GEN_OFFSET_SYM(_callee_saved_t, retval);
@ -57,7 +57,7 @@ GEN_ABSOLUTE_SYM(____esf_t_SIZEOF, sizeof(__esf_t));
/* size of the entire preempt registers structure */ /* size of the entire preempt registers structure */
GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(_caller_saved_t)); GEN_ABSOLUTE_SYM(__tPreempt_SIZEOF, sizeof(_caller_saved_t));
/* size of the struct tcs structure without save area for coproc regs */ /* size of the struct k_thread structure without save area for coproc regs */
GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF, GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
sizeof(struct k_thread) - sizeof(tCoopCoprocReg) - sizeof(struct k_thread) - sizeof(tCoopCoprocReg) -
sizeof(tPreempCoprocReg)); sizeof(tPreempCoprocReg));

View file

@ -17,12 +17,11 @@ extern void _xt_user_exit(void);
#if CONFIG_MICROKERNEL #if CONFIG_MICROKERNEL
extern FUNC_NORETURN void _TaskAbort(void); extern FUNC_NORETURN void _TaskAbort(void);
#endif #endif
extern void fiber_abort(void);
#if defined(CONFIG_THREAD_MONITOR) #if defined(CONFIG_THREAD_MONITOR)
#define THREAD_MONITOR_INIT(tcs) _thread_monitor_init(tcs) #define THREAD_MONITOR_INIT(thread) _thread_monitor_init(thread)
#else #else
#define THREAD_MONITOR_INIT(tcs) \ #define THREAD_MONITOR_INIT(thread) \
do {/* do nothing */ \ do {/* do nothing */ \
} while ((0)) } while ((0))
#endif #endif
@ -37,20 +36,19 @@ extern void fiber_abort(void);
* @return N/A * @return N/A
*/ */
static inline void _thread_monitor_init(struct tcs *tcs) static inline void _thread_monitor_init(struct k_thread *thread)
{ {
unsigned int key; unsigned int key;
/* /*
* Add the newly initialized thread to head of the list of threads. * Add the newly initialized thread to head of the list of threads.
* This singly linked list of threads maintains ALL the threads in the * This singly linked list of threads maintains ALL the threads in the
* system: * system regardless of whether they are runnable.
* both tasks and fibers regardless of whether they are runnable.
*/ */
key = irq_lock(); key = irq_lock();
tcs->next_thread = _nanokernel.threads; thread->next_thread = _nanokernel.threads;
_nanokernel.threads = tcs; _nanokernel.threads = thread;
irq_unlock(key); irq_unlock(key);
} }
#endif /* CONFIG_THREAD_MONITOR */ #endif /* CONFIG_THREAD_MONITOR */
@ -58,7 +56,7 @@ static inline void _thread_monitor_init(struct tcs *tcs)
/* /*
* @brief Initialize a new thread from its stack space * @brief Initialize a new thread from its stack space
* *
* The control structure (TCS) is put at the lower address of the stack. An * The struct k_thread is put at the lower address of the stack. An
* initial context, to be "restored" by __return_from_coop(), is put at * initial context, to be "restored" by __return_from_coop(), is put at
* the other end of the stack, and thus reusable by the stack when not * the other end of the stack, and thus reusable by the stack when not
* needed anymore. * needed anymore.
@ -75,7 +73,7 @@ static inline void _thread_monitor_init(struct tcs *tcs)
* @param p1 first param to entry point * @param p1 first param to entry point
* @param p2 second param to entry point * @param p2 second param to entry point
* @param p3 third param to entry point * @param p3 third param to entry point
* @param fiber prio, -1 for task * @param prio thread priority
* @param options is unused (saved for future expansion) * @param options is unused (saved for future expansion)
* *
* @return N/A * @return N/A
@ -89,10 +87,10 @@ void _new_thread(char *pStack, size_t stackSize,
/* Align stack end to maximum alignment requirement. */ /* Align stack end to maximum alignment requirement. */
char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize, char *stackEnd = (char *)ROUND_DOWN(pStack + stackSize,
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN)); (XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
/* TCS is located at top of stack while frames are located at end /* k_thread is located at top of stack while frames are located at end
* of it * of it
*/ */
struct tcs *tcs = (struct tcs *)(pStack); struct k_thread *thread = (struct k_thread *)(pStack);
#if XCHAL_CP_NUM > 0 #if XCHAL_CP_NUM > 0
uint32_t *cpSA; uint32_t *cpSA;
#endif #endif
@ -105,16 +103,16 @@ void _new_thread(char *pStack, size_t stackSize,
memset(pStack, 0xaa, stackSize); memset(pStack, 0xaa, stackSize);
#endif #endif
#if XCHAL_CP_NUM > 0 #if XCHAL_CP_NUM > 0
/* Coprocessor's stack is allocated just after the TCS */ /* Coprocessor's stack is allocated just after the k_thread */
tcs->arch.preempCoprocReg.cpStack = pStack + sizeof(struct k_thread); thread->arch.preempCoprocReg.cpStack = pStack + sizeof(struct k_thread);
cpSA = (uint32_t *)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA); cpSA = (uint32_t *)(thread->arch.preempCoprocReg.cpStack + XT_CP_ASA);
/* Coprocessor's save area alignment is at leat 16 bytes */ /* Coprocessor's save area alignment is at leat 16 bytes */
*cpSA = ROUND_UP(cpSA + 1, *cpSA = ROUND_UP(cpSA + 1,
(XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN)); (XCHAL_TOTAL_SA_ALIGN < 16 ? 16 : XCHAL_TOTAL_SA_ALIGN));
#ifdef CONFIG_DEBUG #ifdef CONFIG_DEBUG
printk("cpStack = %p\n", tcs->arch.preempCoprocReg.cpStack); printk("cpStack = %p\n", thread->arch.preempCoprocReg.cpStack);
printk("cpAsa = %p\n", printk("cpAsa = %p\n",
*(void **)(tcs->arch.preempCoprocReg.cpStack + XT_CP_ASA)); *(void **)(thread->arch.preempCoprocReg.cpStack + XT_CP_ASA));
#endif #endif
#endif #endif
/* Thread's first frame alignment is granted as both operands are /* Thread's first frame alignment is granted as both operands are
@ -155,25 +153,27 @@ void _new_thread(char *pStack, size_t stackSize,
pInitCtx->a9 = (uint32_t)p3; pInitCtx->a9 = (uint32_t)p3;
pInitCtx->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1); pInitCtx->ps = PS_UM | PS_EXCM | PS_WOE | PS_CALLINC(1);
#endif #endif
tcs->callee_saved.topOfStack = pInitCtx; thread->callee_saved.topOfStack = pInitCtx;
tcs->arch.flags = 0; thread->arch.flags = 0;
_init_thread_base(&tcs->base, prio, _THREAD_PRESTART, options); _init_thread_base(&thread->base, prio, _THREAD_PRESTART, options);
/* static threads overwrite it afterwards with real value */ /* static threads overwrite it afterwards with real value */
tcs->init_data = NULL; thread->init_data = NULL;
tcs->fn_abort = NULL; thread->fn_abort = NULL;
#ifdef CONFIG_THREAD_CUSTOM_DATA #ifdef CONFIG_THREAD_CUSTOM_DATA
/* Initialize custom data field (value is opaque to kernel) */ /* Initialize custom data field (value is opaque to kernel) */
tcs->custom_data = NULL; thread->custom_data = NULL;
#endif #endif
#ifdef CONFIG_THREAD_MONITOR #ifdef CONFIG_THREAD_MONITOR
/* /*
* In debug mode tcs->entry give direct access to the thread entry * In debug mode thread->entry give direct access to the thread entry
* and the corresponding parameters. * and the corresponding parameters.
*/ */
tcs->entry = (struct __thread_entry *)(pInitCtx); thread->entry = (struct __thread_entry *)(pInitCtx);
#endif #endif
/* initial values in all other registers/TCS entries are irrelevant */ /* initial values in all other registers/k_thread entries are
* irrelevant
*/
THREAD_MONITOR_INIT(tcs); THREAD_MONITOR_INIT(thread);
} }

View file

@ -61,7 +61,7 @@ struct _caller_saved {
* and exception stubs (_ExcEnt/_ExcEnter) use the stack to save and * and exception stubs (_ExcEnt/_ExcEnter) use the stack to save and
* restore the values of these registers in order to support interrupt * restore the values of these registers in order to support interrupt
* nesting. The stubs do _not_ copy the saved values from the stack * nesting. The stubs do _not_ copy the saved values from the stack
* into the TCS. * into the k_thread.
*/ */
}; };
@ -77,7 +77,7 @@ struct _callee_saved {
/* /*
* The following registers are considered non-volatile, i.e. * The following registers are considered non-volatile, i.e.
* callee-saved, but their values are pushed onto the stack rather than * callee-saved, but their values are pushed onto the stack rather than
* stored in the TCS structure: * stored in the k_thread structure:
*/ */
uint32_t retval; /* a2 */ uint32_t retval; /* a2 */
XtExcFrame *topOfStack; /* a1 = sp */ XtExcFrame *topOfStack; /* a1 = sp */
@ -121,16 +121,14 @@ typedef struct s_preempCoprocReg {
/* /*
* The thread control stucture definition. It contains the * The thread control stucture definition. It contains the
* various fields to manage a _single_ thread. The TCS will be aligned * various fields to manage a _single_ thread.
* to the appropriate architecture specific boundary via the
* _new_thread() call.
*/ */
struct _thread_arch { struct _thread_arch {
/* /*
* See the above flag definitions above for valid bit settings. This * See the above flag definitions above for valid bit settings. This
* field must remain near the start of struct tcs, specifically before * field must remain near the start of struct k_thread, specifically
* any #ifdef'ed fields since the host tools currently use a fixed * before any #ifdef'ed fields since the host tools currently use a
* offset to read the 'flags' field. * fixed offset to read the 'flags' field.
*/ */
uint32_t flags; uint32_t flags;
#ifdef CONFIG_THREAD_CUSTOM_DATA #ifdef CONFIG_THREAD_CUSTOM_DATA
@ -140,24 +138,24 @@ struct _thread_arch {
/* thread entry and parameters description */ /* thread entry and parameters description */
struct __thread_entry *entry; struct __thread_entry *entry;
/* next item in list of ALL fiber+tasks */ /* next item in list of ALL threads n*/
struct tcs *next_thread; struct k_thread *next_thread;
#endif #endif
#ifdef CONFIG_ERRNO #ifdef CONFIG_ERRNO
int errno_var; int errno_var;
#endif #endif
/* /*
* The location of all floating point related structures/fields MUST be * The location of all floating point related structures/fields MUST be
* located at the end of struct tcs. This way only the fibers/tasks * located at the end of struct k_thread. This way only the threads
* that actually utilize non-integer capabilities need to account for * that actually utilize non-integer capabilities need to account for
* the increased memory required for storing FP state when sizing * the increased memory required for storing FP state when sizing
* stacks. * stacks.
* *
* Given that stacks "grow down" on Xtensa, and the TCS is located at * Given that stacks "grow down" on Xtensa, and the k_thread is located
* the start of a thread's "workspace" memory, the stacks of * at the start of a thread's "workspace" memory, the stacks of threads
* fibers/tasks that do not utilize floating point instruction can * that do not utilize floating point instruction can effectively
* effectively consume the memory occupied by the 'tCoopCoprocReg' * consume the memory occupied by the 'tCoopCoprocReg' and
* and 'tPreempCoprocReg' structures without ill effect. * 'tPreempCoprocReg' structures without ill effect.
* *
* TODO: Move Xtensa coprocessor's stack here to get rid of extra * TODO: Move Xtensa coprocessor's stack here to get rid of extra
* indirection * indirection

View file

@ -49,14 +49,14 @@ static ALWAYS_INLINE void nanoArchInit(void)
/** /**
* *
* @brief Set the return value for the specified fiber (inline) * @brief Set the return value for the specified thread (inline)
* *
* @param fiber pointer to fiber * @param thread pointer to thread
* @param value value to set as return value * @param value value to set as return value
* *
* The register used to store the return value from a function call invocation * The register used to store the return value from a function call invocation
* is set to <value>. It is assumed that the specified <fiber> is pending, and * is set to <value>. It is assumed that the specified thread is pending, and
* thus the fibers context is stored in its TCS. * thus the thread's context is stored in its k_thread.
* *
* @return N/A * @return N/A
*/ */
@ -69,10 +69,8 @@ _set_thread_return_value(struct k_thread *thread, unsigned int value)
extern void nano_cpu_atomic_idle(unsigned int imask); extern void nano_cpu_atomic_idle(unsigned int imask);
/* /*
* _IntLibInit() is called from the non-arch specific nanokernel function, * Required by the core kernel even though we don't have to do anything on this
* _nano_init(). The IA-32 nanokernel does not require any special * arch.
* initialization of the interrupt subsystem. However, we still need to
* provide an _IntLibInit() of some sort to prevent build errors.
*/ */
static inline void _IntLibInit(void) static inline void _IntLibInit(void)
{ {