arch/x86: (Intel64) make z_arch_start_cpu() synchronous
Don't leave z_arch_start_cpu() until the target CPU has been started. Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
parent
5a9a33b0cf
commit
5abab591c2
4 changed files with 7 additions and 1 deletions
|
@ -18,7 +18,7 @@
|
|||
|
||||
__weak u8_t x86_cpu_loapics[] = { 0, 1, 2, 3 };
|
||||
|
||||
extern FUNC_NORETURN void z_x86_prep_c(int, struct multiboot_info *);
|
||||
extern FUNC_NORETURN void z_x86_prep_c(int dummy, struct multiboot_info *info);
|
||||
|
||||
extern char x86_ap_start[]; /* AP entry point in locore.S */
|
||||
|
||||
|
@ -110,4 +110,7 @@ void z_arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
|||
z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_INIT, 0);
|
||||
k_busy_wait(10000);
|
||||
z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_STARTUP, vector);
|
||||
|
||||
while (x86_cpuboot[cpu_num].ready == 0) {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,6 +159,7 @@ go64: movl %cr4, %eax /* enable PAE and SSE */
|
|||
|
||||
/* don't replace CALL with JMP; honor the ABI stack alignment! */
|
||||
|
||||
incl __x86_cpuboot_t_ready_OFFSET(%rbp)
|
||||
movq __x86_cpuboot_t_arg_OFFSET(%rbp), %rsi
|
||||
call *__x86_cpuboot_t_fn_OFFSET(%rbp) /* enter kernel; never return */
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ GEN_OFFSET_SYM(x86_tss64_t, ist1);
|
|||
GEN_OFFSET_SYM(x86_tss64_t, cpu);
|
||||
GEN_ABSOLUTE_SYM(__X86_TSS64_SIZEOF, sizeof(x86_tss64_t));
|
||||
|
||||
GEN_OFFSET_SYM(x86_cpuboot_t, ready);
|
||||
GEN_OFFSET_SYM(x86_cpuboot_t, tr);
|
||||
GEN_OFFSET_SYM(x86_cpuboot_t, gs);
|
||||
GEN_OFFSET_SYM(x86_cpuboot_t, sp);
|
||||
|
|
|
@ -83,6 +83,7 @@ typedef struct x86_tss64 x86_tss64_t;
|
|||
*/
|
||||
|
||||
struct x86_cpuboot {
|
||||
volatile int ready; /* CPU has started */
|
||||
u16_t tr; /* selector for task register */
|
||||
u16_t gs; /* selector for GS */
|
||||
u64_t sp; /* initial stack pointer */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue