arch/x86: (Intel64) migrate from __swap to z_arch_switch()

The latter primitive is required for SMP.

Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
Charles E. Youse 2019-09-23 13:57:12 -04:00 committed by Andrew Boie
commit 074ce889fb
4 changed files with 43 additions and 46 deletions

View file

@ -45,6 +45,8 @@ config X86_LONGMODE
prompt "Run in long (64-bit) mode" prompt "Run in long (64-bit) mode"
default n default n
select 64BIT select 64BIT
select USE_SWITCH_SUPPORTED
select USE_SWITCH
config MAX_IRQ_LINES config MAX_IRQ_LINES
int "Number of IRQ lines" int "Number of IRQ lines"

View file

@ -174,25 +174,24 @@ mxcsr: .long X86_MXCSR_SANE
#endif #endif
/* /*
* XXX: describe __swap, __resume, stacks * void z_arch_switch(void *switch_to, void **switched_from);
*
* Note that switch_handle for us is simply a pointer to the containing
* 'struct k_thread', thus:
*
* RDI = (struct k_thread *) switch_to
* RSI = (struct k_thread **) switched_from
*/ */
.globl _k_neg_eagain /* from errno.c: int _k_neg_eagain = -EAGAIN; */ .globl z_arch_switch
z_arch_switch:
.globl __swap movq (%rsi), %rsi
__swap:
movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
movq ___cpu_t_current_OFFSET(%rsi), %rsi
andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi) andb $~X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi)
movl _k_neg_eagain, %eax
movl %eax, _thread_offset_to_rax(%rsi)
popq %rax popq %rax
movq %rax, _thread_offset_to_rip(%rsi) movq %rax, _thread_offset_to_rip(%rsi)
movq %rsp, _thread_offset_to_rsp(%rsi) movq %rsp, _thread_offset_to_rsp(%rsi)
movl %edi, %edi /* N.B.: zero extend */
movq %rdi, _thread_offset_to_rflags(%rsi)
movq %rbx, _thread_offset_to_rbx(%rsi) movq %rbx, _thread_offset_to_rbx(%rsi)
movq %rbp, _thread_offset_to_rbp(%rsi) movq %rbp, _thread_offset_to_rbp(%rsi)
movq %r12, _thread_offset_to_r12(%rsi) movq %r12, _thread_offset_to_r12(%rsi)
@ -207,40 +206,36 @@ __swap:
/* /*
* Entry: * Entry:
* RSP = top of _interrupt_stack * RSP = top of _interrupt_stack
* RDI = (struct k_thread *) thread to resume
*/ */
__resume: __resume:
movq $_kernel, %rdi
movq _kernel_offset_to_ready_q_cache(%rdi), %rsi
movq %gs:__x86_tss64_t_cpu_OFFSET, %rdi
movq %rsi, ___cpu_t_current_OFFSET(%rdi)
pushq $X86_KERNEL_DS_64 /* SS */ pushq $X86_KERNEL_DS_64 /* SS */
pushq _thread_offset_to_rsp(%rsi) /* RSP */ pushq _thread_offset_to_rsp(%rdi) /* RSP */
pushq _thread_offset_to_rflags(%rsi) /* RFLAGS */ pushq _thread_offset_to_rflags(%rdi) /* RFLAGS */
pushq $X86_KERNEL_CS_64 /* CS */ pushq $X86_KERNEL_CS_64 /* CS */
pushq _thread_offset_to_rip(%rsi) /* RIP */ pushq _thread_offset_to_rip(%rdi) /* RIP */
movq _thread_offset_to_rbx(%rsi), %rbx movq _thread_offset_to_rbx(%rdi), %rbx
movq _thread_offset_to_rbp(%rsi), %rbp movq _thread_offset_to_rbp(%rdi), %rbp
movq _thread_offset_to_r12(%rsi), %r12 movq _thread_offset_to_r12(%rdi), %r12
movq _thread_offset_to_r13(%rsi), %r13 movq _thread_offset_to_r13(%rdi), %r13
movq _thread_offset_to_r14(%rsi), %r14 movq _thread_offset_to_r14(%rdi), %r14
movq _thread_offset_to_r15(%rsi), %r15 movq _thread_offset_to_r15(%rdi), %r15
movq _thread_offset_to_rax(%rsi), %rax movq _thread_offset_to_rax(%rdi), %rax
testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rsi) testb $X86_THREAD_FLAG_ALL, _thread_offset_to_flags(%rdi)
jz 1f jz 1f
fxrstor _thread_offset_to_sse(%rsi) fxrstor _thread_offset_to_sse(%rdi)
movq _thread_offset_to_rcx(%rsi), %rcx movq _thread_offset_to_rcx(%rdi), %rcx
movq _thread_offset_to_rdx(%rsi), %rdx movq _thread_offset_to_rdx(%rdi), %rdx
movq _thread_offset_to_rdi(%rsi), %rdi movq _thread_offset_to_rsi(%rdi), %rsi
movq _thread_offset_to_r8(%rsi), %r8 movq _thread_offset_to_r8(%rdi), %r8
movq _thread_offset_to_r9(%rsi), %r9 movq _thread_offset_to_r9(%rdi), %r9
movq _thread_offset_to_r10(%rsi), %r10 movq _thread_offset_to_r10(%rdi), %r10
movq _thread_offset_to_r11(%rsi), %r11 movq _thread_offset_to_r11(%rdi), %r11
movq _thread_offset_to_rsi(%rsi), %rsi /* do last :-) */ movq _thread_offset_to_rdi(%rdi), %rdi /* do last :-) */
1: iretq 1: iretq
@ -506,15 +501,18 @@ irq_dispatch:
movl %eax, (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI) movl %eax, (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
#endif #endif
#ifdef CONFIG_STACK_SENTINEL
call z_check_stack_sentinel
#endif
movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi movq %gs:__x86_tss64_t_cpu_OFFSET, %rsi
cli cli
addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET
decl ___cpu_t_nested_OFFSET(%rsi) decl ___cpu_t_nested_OFFSET(%rsi)
/* if not nested, exit via __resume (might change threads) */ jnz irq_exit_nested
jz __resume
/* not nested; ask the scheduler who's up next and resume it */
movq ___cpu_t_current_OFFSET(%rsi), %rdi
call z_get_next_switch_handle
movq %rax, %rdi
jmp __resume
irq_exit_nested: irq_exit_nested:
fxrstor (%rsp) fxrstor (%rsp)

View file

@ -32,4 +32,5 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
x86_sse_init(thread); x86_sse_init(thread);
thread->arch.flags = X86_THREAD_FLAG_ALL; thread->arch.flags = X86_THREAD_FLAG_ALL;
thread->switch_handle = thread;
} }

View file

@ -8,11 +8,7 @@
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
static ALWAYS_INLINE void extern void z_arch_switch(void *switch_to, void **switched_from);
z_set_thread_return_value(struct k_thread *thread, unsigned int value)
{
thread->callee_saved.rax = value;
}
static inline void kernel_arch_init(void) static inline void kernel_arch_init(void)
{ {