kernel/x86: move INT_ACTIVE/EXC_ACTIVE to thread_state
They are internal states, not user-facing. Also prepend an underscore since they are kernel internal symbols. Change-Id: I53740e0d04a796ba1ccc409b5809438cdb189332 Signed-off-by: Benjamin Walsh <walsh.benj@gmail.com>
This commit is contained in:
parent
867f8ee371
commit
4b65502448
5 changed files with 24 additions and 18 deletions
|
@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _exception_enter)
|
||||||
* registers and the stack of the preempted thread.
|
* registers and the stack of the preempted thread.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
testb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
|
testb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
|
||||||
jne alreadyInException
|
jne alreadyInException
|
||||||
movl %esp, _thread_offset_to_esf(%edx)
|
movl %esp, _thread_offset_to_esf(%edx)
|
||||||
|
|
||||||
|
@ -134,14 +134,14 @@ alreadyInException:
|
||||||
#endif /* CONFIG_GDB_INFO */
|
#endif /* CONFIG_GDB_INFO */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the EXC_ACTIVE bit in the TCS of the current thread.
|
* Set the _EXC_ACTIVE state bit of the current thread.
|
||||||
* This enables _Swap() to preserve the thread's FP registers
|
* This enables _Swap() to preserve the thread's FP registers
|
||||||
* (where needed) if the exception handler causes a context switch.
|
* (where needed) if the exception handler causes a context switch.
|
||||||
* It also indicates to debug tools that an exception is being
|
* It also indicates to debug tools that an exception is being
|
||||||
* handled in the event of a context switch.
|
* handled in the event of a context switch.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
orb $EXC_ACTIVE, _thread_offset_to_execution_flags(%edx)
|
orb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
|
||||||
|
|
||||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||||
|
|
||||||
|
@ -195,12 +195,12 @@ allDone:
|
||||||
jne nestedException
|
jne nestedException
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear the EXC_ACTIVE bit in the k_thread of the current execution
|
* Clear the _EXC_ACTIVE bit in the k_thread of the current execution
|
||||||
* context if we are not in a nested exception (ie, when we exit the
|
* context if we are not in a nested exception (ie, when we exit the
|
||||||
* outermost exception).
|
* outermost exception).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
andb $~EXC_ACTIVE, _thread_offset_to_execution_flags (%ecx)
|
andb $~_EXC_ACTIVE, _thread_offset_to_thread_state(%ecx)
|
||||||
|
|
||||||
nestedException:
|
nestedException:
|
||||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||||
|
|
|
@ -122,7 +122,7 @@ void k_float_enable(struct tcs *tcs, unsigned int options)
|
||||||
|
|
||||||
fp_owner = _kernel.current_fp;
|
fp_owner = _kernel.current_fp;
|
||||||
if (fp_owner) {
|
if (fp_owner) {
|
||||||
if (fp_owner->base.execution_flags & INT_OR_EXC_MASK) {
|
if (fp_owner->base.thread_state & _INT_OR_EXC_MASK) {
|
||||||
_FpCtxSave(fp_owner);
|
_FpCtxSave(fp_owner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -290,14 +290,14 @@ alreadyOnIntStack:
|
||||||
je noReschedule
|
je noReschedule
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the INT_ACTIVE bit in the k_thread to allow the upcoming call to
|
* Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
|
||||||
* _Swap() to determine whether non-floating registers need to be
|
* _Swap() to determine whether non-floating registers need to be
|
||||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||||
* debug tools that a preemptive context switch has occurred.
|
* debug tools that a preemptive context switch has occurred.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
#if defined(CONFIG_FP_SHARING) || defined(CONFIG_GDB_INFO)
|
||||||
orb $INT_ACTIVE, _thread_offset_to_execution_flags(%edx)
|
orb $_INT_ACTIVE, _thread_offset_to_thread_state(%edx)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -340,12 +340,12 @@ alreadyOnIntStack:
|
||||||
defined(CONFIG_GDB_INFO) )
|
defined(CONFIG_GDB_INFO) )
|
||||||
/*
|
/*
|
||||||
* _Swap() has restored the floating point registers, if needed.
|
* _Swap() has restored the floating point registers, if needed.
|
||||||
* Clear the INT_ACTIVE bit of the interrupted thread's TCS
|
* Clear the _INT_ACTIVE bit in the interrupted thread's state
|
||||||
* since it has served its purpose.
|
* since it has served its purpose.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
movl _kernel + _kernel_offset_to_current, %eax
|
movl _kernel + _kernel_offset_to_current, %eax
|
||||||
andb $~INT_ACTIVE, _thread_offset_to_execution_flags(%eax)
|
andb $~_INT_ACTIVE, _thread_offset_to_thread_state(%eax)
|
||||||
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
#endif /* CONFIG_FP_SHARING || CONFIG_GDB_INFO */
|
||||||
|
|
||||||
/* Restore volatile registers and return to the interrupted thread */
|
/* Restore volatile registers and return to the interrupted thread */
|
||||||
|
|
|
@ -44,7 +44,7 @@
|
||||||
* the non-volatile integer registers need to be saved in the TCS of the
|
* the non-volatile integer registers need to be saved in the TCS of the
|
||||||
* outgoing thread. The restoration of the integer registers of the incoming
|
* outgoing thread. The restoration of the integer registers of the incoming
|
||||||
* thread depends on whether that thread was preemptively context switched out.
|
* thread depends on whether that thread was preemptively context switched out.
|
||||||
* The INT_ACTIVE and EXC_ACTIVE bits in the k_thread->execution_flags field
|
* The _INT_ACTIVE and _EXC_ACTIVE bits in the k_thread->thread_state field
|
||||||
* will signify that the thread was preemptively context switched out, and thus
|
* will signify that the thread was preemptively context switched out, and thus
|
||||||
* both the volatile and non-volatile integer registers need to be restored.
|
* both the volatile and non-volatile integer registers need to be restored.
|
||||||
*
|
*
|
||||||
|
@ -187,7 +187,7 @@ SECTION_FUNC(TEXT, _Swap)
|
||||||
* was preemptively context switched.
|
* was preemptively context switched.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%ebx)
|
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%ebx)
|
||||||
je restoreContext_NoFloatSave
|
je restoreContext_NoFloatSave
|
||||||
|
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ restoreContext_NoFloatSave:
|
||||||
* was previously preemptively context switched out.
|
* was previously preemptively context switched out.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
testb $INT_OR_EXC_MASK, _thread_offset_to_execution_flags(%eax)
|
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%eax)
|
||||||
je restoreContext_NoFloatRestore
|
je restoreContext_NoFloatRestore
|
||||||
|
|
||||||
#ifdef CONFIG_SSE
|
#ifdef CONFIG_SSE
|
||||||
|
|
|
@ -42,21 +42,27 @@
|
||||||
|
|
||||||
#define STACK_ALIGN_SIZE 4
|
#define STACK_ALIGN_SIZE 4
|
||||||
|
|
||||||
/* x86 Bitmask definitions for struct k_thread->execution_flags */
|
/* x86 Bitmask definitions for struct k_thread->thread_state */
|
||||||
|
|
||||||
/* executing context is interrupt handler */
|
/* executing context is interrupt handler */
|
||||||
#define INT_ACTIVE (1 << 7)
|
#define _INT_ACTIVE (1 << 7)
|
||||||
|
|
||||||
/* executing context is exception handler */
|
/* executing context is exception handler */
|
||||||
#define EXC_ACTIVE (1 << 6)
|
#define _EXC_ACTIVE (1 << 6)
|
||||||
|
|
||||||
#define INT_OR_EXC_MASK (INT_ACTIVE | EXC_ACTIVE)
|
#define _INT_OR_EXC_MASK (_INT_ACTIVE | _EXC_ACTIVE)
|
||||||
|
|
||||||
|
/* end - states */
|
||||||
|
|
||||||
|
/* x86 Bitmask definitions for struct k_thread->execution_flags */
|
||||||
|
|
||||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||||
/* thread uses SSEx (and also FP) registers */
|
/* thread uses SSEx (and also FP) registers */
|
||||||
#define K_SSE_REGS (1 << 5)
|
#define K_SSE_REGS (1 << 7)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* end - execution flags */
|
||||||
|
|
||||||
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
|
||||||
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
|
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
|
||||||
#elif defined(CONFIG_FP_SHARING)
|
#elif defined(CONFIG_FP_SHARING)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue