x86: move some per-cpu initialization to C
No reason we need to stay in assembly domain once we have GS and a stack set up. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
a594ca7c8f
commit
2690c9e550
4 changed files with 38 additions and 38 deletions
|
@ -117,3 +117,28 @@ void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
|||
while (x86_cpuboot[cpu_num].ready == 0) {
|
||||
}
|
||||
}
|
||||
|
||||
/* Per-CPU initialization, C domain. On the first CPU, z_x86_prep_c is the
|
||||
* next step. For other CPUs it is probably smp_init_top().
|
||||
*/
|
||||
FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot)
|
||||
{
|
||||
x86_sse_init(NULL);
|
||||
|
||||
z_loapic_enable();
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Set landing site for 'syscall' instruction */
|
||||
z_x86_msr_write(X86_LSTAR_MSR, (u64_t)z_x86_syscall_entry_stub);
|
||||
|
||||
/* Set segment descriptors for syscall privilege transitions */
|
||||
z_x86_msr_write(X86_STAR_MSR, (u64_t)X86_STAR_UPPER << 32);
|
||||
|
||||
/* Mask applied to RFLAGS when making a syscall */
|
||||
z_x86_msr_write(X86_FMASK_MSR, EFLAGS_SYSCALL);
|
||||
#endif
|
||||
|
||||
/* Enter kernel, never return */
|
||||
cpuboot->ready++;
|
||||
cpuboot->fn(cpuboot->arg);
|
||||
}
|
||||
|
|
|
@ -162,42 +162,9 @@ go64: movl %cr4, %eax /* enable PAE and SSE */
|
|||
rep stosq
|
||||
#endif
|
||||
|
||||
xorl %edi, %edi
|
||||
call x86_sse_init
|
||||
|
||||
#ifdef CONFIG_LOAPIC
|
||||
call z_loapic_enable
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
/* Set landing site for system calls made with 'syscall' instruction */
|
||||
movq $z_x86_syscall_entry_stub, %rax
|
||||
movq %rax, %rdx
|
||||
shrq $32, %rdx
|
||||
movl $X86_LSTAR_MSR, %ecx
|
||||
/* LSTAR set to 64-bit address denoted by EDX:EAX */
|
||||
wrmsr
|
||||
|
||||
/* Set segment descriptors in STAR */
|
||||
xorl %eax, %eax /* Zero low bits, reserved */
|
||||
movl $X86_STAR_UPPER, %edx
|
||||
movl $X86_STAR_MSR, %ecx
|
||||
wrmsr
|
||||
|
||||
/* Set EFLAGS mask applied when making system calls. Currently we
|
||||
* mask interrupts and clear direction flag.
|
||||
*/
|
||||
movl $0x600, %eax
|
||||
xorl %edx, %edx
|
||||
movl $X86_FMASK_MSR, %ecx
|
||||
wrmsr
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
||||
/* don't replace CALL with JMP; honor the ABI stack alignment! */
|
||||
|
||||
incl __x86_cpuboot_t_ready_OFFSET(%rbp)
|
||||
movq __x86_cpuboot_t_arg_OFFSET(%rbp), %rdi
|
||||
call *__x86_cpuboot_t_fn_OFFSET(%rbp) /* enter kernel; never return */
|
||||
/* Enter C domain now that we have a stack set up, never to return */
|
||||
movq %rbp, %rdi
|
||||
call z_x86_cpu_init
|
||||
|
||||
/*
|
||||
* void x86_sse_init(struct k_thread *thread);
|
||||
|
|
|
@ -30,6 +30,12 @@ static inline void arch_kernel_init(void)
|
|||
/* nothing */;
|
||||
}
|
||||
|
||||
FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot);
|
||||
|
||||
void x86_sse_init(struct k_thread *thread);
|
||||
|
||||
void z_x86_syscall_entry_stub(void);
|
||||
|
||||
#endif /* _ASMLANGUAGE */
|
||||
|
||||
#endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_FUNC_H_ */
|
||||
|
|
|
@ -41,9 +41,11 @@
|
|||
* EFLAGS/RFLAGS definitions. (RFLAGS is just zero-extended EFLAGS.)
|
||||
*/
|
||||
|
||||
#define EFLAGS_IF 0x00000200U /* interrupts enabled */
|
||||
#define EFLAGS_INITIAL (EFLAGS_IF)
|
||||
#define EFLAGS_IF BIT(9) /* interrupts enabled */
|
||||
#define EFLAGS_DF BIT(10) /* Direction flag */
|
||||
|
||||
#define EFLAGS_INITIAL (EFLAGS_IF)
|
||||
#define EFLAGS_SYSCALL (EFLAGS_IF | EFLAGS_DF)
|
||||
/*
|
||||
* Control register definitions.
|
||||
*/
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue