kernel/x86: move INT_ACTIVE/EXC_ACTIVE to thread_state

They are internal states, not user-facing.

Also prepend an underscore since they are kernel internal symbols.

Change-Id: I53740e0d04a796ba1ccc409b5809438cdb189332
Signed-off-by: Benjamin Walsh <walsh.benj@gmail.com>
This commit is contained in:
Benjamin Walsh 2017-01-22 12:07:29 -05:00 committed by Anas Nashif
commit 4b65502448
5 changed files with 24 additions and 18 deletions

View file

@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _exception_enter)
* registers and the stack of the preempted thread.
*/
testb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
testb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
jne alreadyInException
movl %esp, _thread_offset_to_esf(%edx)
@ -134,14 +134,14 @@ alreadyInException:
#endif /* CONFIG_GDB_INFO */
/*
* Set the EXC_ACTIVE bit in the TCS of the current thread.
* Set the _EXC_ACTIVE state bit of the current thread.
* This enables _Swap() to preserve the thread's FP registers
* (where needed) if the exception handler causes a context switch.
* It also indicates to debug tools that an exception is being
* handled in the event of a context switch.
*/
orb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
orb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
@ -195,12 +195,12 @@ allDone:
jne nestedException
/*
* Clear the EXC_ACTIVE bit in the k_thread of the current execution
* Clear the _EXC_ACTIVE bit in the k_thread of the current execution
* context if we are not in a nested exception (ie, when we exit the
* outermost exception).
*/
andb $~EXC_ACTIVE, _thread_offset_to_execution_flags (%ecx)
andb $~_EXC_ACTIVE, _thread_offset_to_thread_state(%ecx)
nestedException:
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */

View file

@ -122,7 +122,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
fp_owner = _kernel.current_fp;
if (fp_owner) {
if (fp_owner->base.execution_flags & INT_OR_EXC_MASK) {
if (fp_owner->base.thread_state & _INT_OR_EXC_MASK) {
_FpCtxSave(fp_owner);
}
}

View file

@ -290,14 +290,14 @@ alreadyOnIntStack:
je noReschedule
/*
* Set the INT_ACTIVE bit in the k_thread to allow the upcoming call to
* Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
* _Swap() to determine whether non-floating registers need to be
* preserved using the lazy save/restore algorithm, or to indicate to
* debug tools that a preemptive context switch has occurred.
*/
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
orb $INT_ACTIVE, _thread_offset_to_execution_flags(%edx)
orb $_INT_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif
/*
@ -340,12 +340,12 @@ alreadyOnIntStack:
defined(CONFIG_GDB_INFO) )
/*
* _Swap() has restored the floating point registers, if needed.
* Clear the INT_ACTIVE bit of the interrupted thread's TCS
* Clear the _INT_ACTIVE bit in the interrupted thread's state
* since it has served its purpose.
*/
movl _kernel + _kernel_offset_to_current, %eax
andb $~INT_ACTIVE, _thread_offset_to_execution_flags(%eax)
andb $~_INT_ACTIVE, _thread_offset_to_thread_state(%eax)
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
/* Restore volatile registers and return to the interrupted thread */

View file

@ -44,7 +44,7 @@
* the non-volatile integer registers need to be saved in the TCS of the
* outgoing thread. The restoration of the integer registers of the incoming
* thread depends on whether that thread was preemptively context switched out.
* The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->execution_flags field
* The _INT_ACTIVE and _EXC_ACTIVE bits in the k_thread->thread_state field
* will signify that the thread was preemptively context switched out, and thus
* both the volatile and non-volatile integer registers need to be restored.
*
@ -187,7 +187,7 @@ SECTION_FUNC(TEXT, _Swap)
* was preemptively context switched.
*/
testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%ebx)
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%ebx)
je restoreContext_NoFloatSave
@ -227,7 +227,7 @@ restoreContext_NoFloatSave:
* was previously preemptively context switched out.
*/
testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%eax)
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%eax)
je restoreContext_NoFloatRestore
#ifdef CONFIG_SSE

View file

@ -42,21 +42,27 @@
#define STACK_ALIGN_SIZE 4
/* x86 Bitmask definitions for struct k_thread->execution_flags */
/* x86 Bitmask definitions for struct k_thread->thread_state */
/* executing context is interrupt handler */
#define INT_ACTIVE (1 << 7)
#define _INT_ACTIVE (1 << 7)
/* executing context is exception handler */
#define EXC_ACTIVE (1 << 6)
#define _EXC_ACTIVE (1 << 6)
#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
#define _INT_OR_EXC_MASK (_INT_ACTIVE | _EXC_ACTIVE)
/* end - states */
/* x86 Bitmask definitions for struct k_thread->execution_flags */
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
/* thread uses SSEx (and also FP) registers */
#define K_SSE_REGS (1 << 5)
#define K_SSE_REGS (1 << 7)
#endif
/* end - execution flags */
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
#elif defined(CONFIG_FP_SHARING)