arch/x86/ia32: move IA32 thread state to _thread_arch
There are not enough bits in k_thread.thread_state with SMP enabled, and the field is (should be) private to the scheduler, anyway. So move state bits to the _thread_arch where they belong. While we're at it, refactor some offset data w/r/t _thread_arch because it can be shared between 32- and 64-bit subarches. Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
parent
a224998355
commit
a95c94cfe2
11 changed files with 34 additions and 34 deletions
|
@ -133,14 +133,14 @@ SECTION_FUNC(TEXT, _exception_enter)
|
|||
incl _thread_offset_to_excNestCount(%edx)
|
||||
|
||||
/*
|
||||
* Set the _EXC_ACTIVE state bit of the current thread.
|
||||
* This enables z_swap() to preserve the thread's FP registers
|
||||
* (where needed) if the exception handler causes a context switch.
|
||||
* It also indicates to debug tools that an exception is being
|
||||
* handled in the event of a context switch.
|
||||
* Set X86_THREAD_FLAG_EXC in the current thread. This enables
|
||||
* z_swap() to preserve the thread's FP registers (where needed)
|
||||
* if the exception handler causes a context switch. It also
|
||||
* indicates to debug tools that an exception is being handled
|
||||
* in the event of a context switch.
|
||||
*/
|
||||
|
||||
orb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
|
||||
orb $X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%edx)
|
||||
|
||||
#endif /* CONFIG_LAZY_FP_SHARING */
|
||||
|
||||
|
@ -187,12 +187,12 @@ allDone:
|
|||
jne nestedException
|
||||
|
||||
/*
|
||||
* Clear the _EXC_ACTIVE bit in the k_thread of the current execution
|
||||
* Clear X86_THREAD_FLAG_EXC in the k_thread of the current execution
|
||||
* context if we are not in a nested exception (ie, when we exit the
|
||||
* outermost exception).
|
||||
*/
|
||||
|
||||
andb $~_EXC_ACTIVE, _thread_offset_to_thread_state(%ecx)
|
||||
andb $~X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%ecx)
|
||||
|
||||
nestedException:
|
||||
#endif /* CONFIG_LAZY_FP_SHARING */
|
||||
|
|
|
@ -208,7 +208,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
|
|||
|
||||
fp_owner = _kernel.current_fp;
|
||||
if (fp_owner != NULL) {
|
||||
if ((fp_owner->base.thread_state & _INT_OR_EXC_MASK) != 0) {
|
||||
if ((fp_owner->arch.flags & X86_THREAD_FLAG_ALL) != 0) {
|
||||
FpCtxSave(fp_owner);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -227,14 +227,14 @@ alreadyOnIntStack:
|
|||
je noReschedule
|
||||
|
||||
/*
|
||||
* Set the _INT_ACTIVE bit in the k_thread to allow the upcoming call to
|
||||
* __swap() to determine whether non-floating registers need to be
|
||||
* Set X86_THREAD_FLAG_INT bit in k_thread to allow the upcoming call
|
||||
* to __swap() to determine whether non-floating registers need to be
|
||||
* preserved using the lazy save/restore algorithm, or to indicate to
|
||||
* debug tools that a preemptive context switch has occurred.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
orb $_INT_ACTIVE, _thread_offset_to_thread_state(%edx)
|
||||
orb $X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -265,12 +265,12 @@ alreadyOnIntStack:
|
|||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
/*
|
||||
* __swap() has restored the floating point registers, if needed.
|
||||
* Clear the _INT_ACTIVE bit in the interrupted thread's state
|
||||
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state
|
||||
* since it has served its purpose.
|
||||
*/
|
||||
|
||||
movl _kernel + _kernel_offset_to_current, %eax
|
||||
andb $~_INT_ACTIVE, _thread_offset_to_thread_state(%eax)
|
||||
andb $~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax)
|
||||
#endif /* CONFIG_LAZY_FP_SHARING */
|
||||
|
||||
/* Restore volatile registers and return to the interrupted thread */
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
* the non-volatile integer registers need to be saved in the TCS of the
|
||||
* outgoing thread. The restoration of the integer registers of the incoming
|
||||
* thread depends on whether that thread was preemptively context switched out.
|
||||
* The _INT_ACTIVE and _EXC_ACTIVE bits in the k_thread->thread_state field
|
||||
* The X86_THREAD_FLAG_INT and _EXC bits in the k_thread->arch.flags field
|
||||
* will signify that the thread was preemptively context switched out, and thus
|
||||
* both the volatile and non-volatile integer registers need to be restored.
|
||||
*
|
||||
|
@ -196,7 +196,7 @@ SECTION_FUNC(TEXT, __swap)
|
|||
|
||||
|
||||
/*
|
||||
* Determine whether the incoming thread utilizes floating point registers
|
||||
* Determine whether the incoming thread utilizes floating point regs
|
||||
* _and_ whether the thread was context switched out preemptively.
|
||||
*/
|
||||
|
||||
|
@ -236,7 +236,7 @@ SECTION_FUNC(TEXT, __swap)
|
|||
* was preemptively context switched.
|
||||
*/
|
||||
|
||||
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%ebx)
|
||||
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%ebx)
|
||||
je restoreContext_NoFloatSave
|
||||
|
||||
|
||||
|
@ -276,7 +276,7 @@ restoreContext_NoFloatSave:
|
|||
* was previously preemptively context switched out.
|
||||
*/
|
||||
|
||||
testb $_INT_OR_EXC_MASK, _thread_offset_to_thread_state(%eax)
|
||||
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%eax)
|
||||
je restoreContext_NoFloatRestore
|
||||
|
||||
#ifdef CONFIG_SSE
|
||||
|
|
|
@ -263,4 +263,6 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
|
|||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
thread->arch.excNestCount = 0;
|
||||
#endif /* CONFIG_LAZY_FP_SHARING */
|
||||
|
||||
thread->arch.flags = 0;
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ GEN_OFFSET_SYM(_callee_saved_t, rip);
|
|||
GEN_OFFSET_SYM(_callee_saved_t, rflags);
|
||||
GEN_OFFSET_SYM(_callee_saved_t, rax);
|
||||
|
||||
GEN_OFFSET_SYM(_thread_arch_t, flags);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, rcx);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, rdx);
|
||||
GEN_OFFSET_SYM(_thread_arch_t, rsi);
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include "ia32_offsets.c"
|
||||
#endif
|
||||
|
||||
GEN_OFFSET_SYM(_thread_arch_t, flags);
|
||||
|
||||
/* size of struct x86_multiboot_info, used by crt0.S/locore.S */
|
||||
|
||||
GEN_ABSOLUTE_SYM(__X86_MULTIBOOT_INFO_SIZEOF,
|
||||
|
|
|
@ -50,18 +50,6 @@
|
|||
|
||||
#define STACK_ALIGN_SIZE 4
|
||||
|
||||
/* x86 Bitmask definitions for struct k_thread.thread_state */
|
||||
|
||||
/* executing context is interrupt handler */
|
||||
#define _INT_ACTIVE (1 << 7)
|
||||
|
||||
/* executing context is exception handler */
|
||||
#define _EXC_ACTIVE (1 << 6)
|
||||
|
||||
#define _INT_OR_EXC_MASK (_INT_ACTIVE | _EXC_ACTIVE)
|
||||
|
||||
/* end - states */
|
||||
|
||||
#if defined(CONFIG_LAZY_FP_SHARING) && defined(CONFIG_SSE)
|
||||
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
|
||||
#elif defined(CONFIG_LAZY_FP_SHARING)
|
||||
|
|
|
@ -33,6 +33,14 @@
|
|||
#define FP_REG_SET_ALIGN 4
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Bits for _thread_arch.flags, see their use in intstub.S et al.
|
||||
*/
|
||||
|
||||
#define X86_THREAD_FLAG_INT 0x01
|
||||
#define X86_THREAD_FLAG_EXC 0x02
|
||||
#define X86_THREAD_FLAG_ALL (X86_THREAD_FLAG_INT | X86_THREAD_FLAG_EXC)
|
||||
|
||||
#ifndef _ASMLANGUAGE
|
||||
#include <stdint.h>
|
||||
#include <ia32/mmustructs.h>
|
||||
|
@ -198,6 +206,7 @@ typedef struct s_preempFloatReg {
|
|||
*/
|
||||
|
||||
struct _thread_arch {
|
||||
u8_t flags;
|
||||
|
||||
#if defined(CONFIG_LAZY_FP_SHARING)
|
||||
/*
|
||||
|
|
|
@ -38,9 +38,6 @@
|
|||
#define _thread_offset_to_rax \
|
||||
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rax_OFFSET)
|
||||
|
||||
#define _thread_offset_to_flags \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_flags_OFFSET)
|
||||
|
||||
#define _thread_offset_to_rcx \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_rcx_OFFSET)
|
||||
|
||||
|
|
|
@ -12,4 +12,7 @@
|
|||
#include <ia32/offsets_short_arch.h>
|
||||
#endif
|
||||
|
||||
#define _thread_offset_to_flags \
|
||||
(___thread_t_arch_OFFSET + ___thread_arch_t_flags_OFFSET)
|
||||
|
||||
#endif /* ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue