2019-09-20 11:26:44 -04:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2019 Intel Corporation
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <kernel_arch_data.h>
|
|
|
|
#include <kernel_arch_func.h>
|
2019-09-23 12:47:47 -04:00
|
|
|
#include <kernel_structs.h>
|
2019-09-28 20:52:07 -04:00
|
|
|
#include <arch/x86/multiboot.h>
|
2019-10-18 12:08:10 -07:00
|
|
|
#include <arch/x86/mmustructs.h>
|
2019-09-28 22:38:03 -04:00
|
|
|
#include <drivers/interrupt_controller/loapic.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map of CPU logical IDs to CPU local APIC IDs. By default,
|
|
|
|
* we assume this simple identity mapping, as found in QEMU.
|
|
|
|
* The symbol is weak so that boards/SoC files can override.
|
|
|
|
*/
|
|
|
|
|
|
|
|
__weak u8_t x86_cpu_loapics[] = { 0, 1, 2, 3 };
|
2019-09-28 20:52:07 -04:00
|
|
|
|
2019-09-28 22:38:03 -04:00
|
|
|
extern char x86_ap_start[]; /* AP entry point in locore.S */
|
|
|
|
|
2019-09-20 11:26:44 -04:00
|
|
|
extern u8_t _exception_stack[];
|
2019-09-28 22:38:03 -04:00
|
|
|
extern u8_t _exception_stack1[];
|
|
|
|
extern u8_t _exception_stack2[];
|
|
|
|
extern u8_t _exception_stack3[];
|
2019-09-20 11:26:44 -04:00
|
|
|
|
2019-12-18 23:57:25 -08:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
extern u8_t z_x86_trampoline_stack[];
|
|
|
|
extern u8_t z_x86_trampoline_stack1[];
|
|
|
|
extern u8_t z_x86_trampoline_stack2[];
|
|
|
|
extern u8_t z_x86_trampoline_stack3[];
|
|
|
|
#endif /* CONFIG_X86_KPTI */
|
|
|
|
|
2019-09-20 11:26:44 -04:00
|
|
|
Z_GENERIC_SECTION(.tss)
|
|
|
|
struct x86_tss64 tss0 = {
|
2019-12-18 23:57:25 -08:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
.ist2 = (u64_t) z_x86_trampoline_stack + Z_X86_TRAMPOLINE_STACK_SIZE,
|
|
|
|
#endif
|
2019-09-20 11:26:44 -04:00
|
|
|
.ist7 = (u64_t) _exception_stack + CONFIG_EXCEPTION_STACK_SIZE,
|
2019-09-28 22:38:03 -04:00
|
|
|
.iomapb = 0xFFFF,
|
2019-09-23 12:47:47 -04:00
|
|
|
.cpu = &(_kernel.cpus[0])
|
2019-09-20 11:26:44 -04:00
|
|
|
};
|
2019-09-28 20:52:07 -04:00
|
|
|
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 1
|
2019-09-28 22:38:03 -04:00
|
|
|
Z_GENERIC_SECTION(.tss)
|
|
|
|
struct x86_tss64 tss1 = {
|
2019-12-18 23:57:25 -08:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
.ist2 = (u64_t) z_x86_trampoline_stack1 + Z_X86_TRAMPOLINE_STACK_SIZE,
|
|
|
|
#endif
|
2019-09-28 22:38:03 -04:00
|
|
|
.ist7 = (u64_t) _exception_stack1 + CONFIG_EXCEPTION_STACK_SIZE,
|
|
|
|
.iomapb = 0xFFFF,
|
|
|
|
.cpu = &(_kernel.cpus[1])
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 2
|
2019-09-28 22:38:03 -04:00
|
|
|
Z_GENERIC_SECTION(.tss)
|
|
|
|
struct x86_tss64 tss2 = {
|
2019-12-18 23:57:25 -08:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
.ist2 = (u64_t) z_x86_trampoline_stack2 + Z_X86_TRAMPOLINE_STACK_SIZE,
|
|
|
|
#endif
|
2019-09-28 22:38:03 -04:00
|
|
|
.ist7 = (u64_t) _exception_stack2 + CONFIG_EXCEPTION_STACK_SIZE,
|
|
|
|
.iomapb = 0xFFFF,
|
|
|
|
.cpu = &(_kernel.cpus[2])
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 3
|
2019-09-28 22:38:03 -04:00
|
|
|
Z_GENERIC_SECTION(.tss)
|
|
|
|
struct x86_tss64 tss3 = {
|
2019-12-18 23:57:25 -08:00
|
|
|
#ifdef CONFIG_X86_KPTI
|
|
|
|
.ist2 = (u64_t) z_x86_trampoline_stack3 + Z_X86_TRAMPOLINE_STACK_SIZE,
|
|
|
|
#endif
|
2019-09-28 22:38:03 -04:00
|
|
|
.ist7 = (u64_t) _exception_stack3 + CONFIG_EXCEPTION_STACK_SIZE,
|
|
|
|
.iomapb = 0xFFFF,
|
|
|
|
.cpu = &(_kernel.cpus[3])
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2019-10-18 12:08:10 -07:00
|
|
|
extern struct x86_page_tables z_x86_flat_ptables;
|
|
|
|
|
2019-09-28 20:52:07 -04:00
|
|
|
struct x86_cpuboot x86_cpuboot[] = {
|
|
|
|
{
|
|
|
|
.tr = X86_KERNEL_CPU0_TR,
|
2019-11-12 17:56:21 -08:00
|
|
|
.gs_base = &tss0,
|
2019-09-28 20:52:07 -04:00
|
|
|
.sp = (u64_t) _interrupt_stack + CONFIG_ISR_STACK_SIZE,
|
2019-10-18 12:08:10 -07:00
|
|
|
.fn = z_x86_prep_c,
|
|
|
|
#ifdef CONFIG_X86_MMU
|
|
|
|
.ptables = &z_x86_flat_ptables,
|
|
|
|
#endif
|
2019-09-28 22:38:03 -04:00
|
|
|
},
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 1
|
2019-09-28 22:38:03 -04:00
|
|
|
{
|
|
|
|
.tr = X86_KERNEL_CPU1_TR,
|
2019-11-12 17:56:21 -08:00
|
|
|
.gs_base = &tss1
|
2019-09-28 22:38:03 -04:00
|
|
|
},
|
|
|
|
#endif
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 2
|
2019-09-28 22:38:03 -04:00
|
|
|
{
|
|
|
|
.tr = X86_KERNEL_CPU2_TR,
|
2019-11-12 17:56:21 -08:00
|
|
|
.gs_base = &tss2
|
2019-09-28 22:38:03 -04:00
|
|
|
},
|
|
|
|
#endif
|
2019-10-07 13:42:29 -04:00
|
|
|
#if CONFIG_MP_NUM_CPUS > 3
|
2019-09-28 22:38:03 -04:00
|
|
|
{
|
|
|
|
.tr = X86_KERNEL_CPU3_TR,
|
2019-11-12 17:56:21 -08:00
|
|
|
.gs_base = &tss3
|
2019-09-28 22:38:03 -04:00
|
|
|
},
|
|
|
|
#endif
|
2019-09-28 20:52:07 -04:00
|
|
|
};
|
2019-09-28 22:38:03 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Send the INIT/STARTUP IPI sequence required to start up CPU 'cpu_num', which
|
2020-01-10 12:51:38 -08:00
|
|
|
* will enter the kernel at fn(arg), running on the specified stack.
|
2019-09-28 22:38:03 -04:00
|
|
|
*/
|
|
|
|
|
2019-11-07 12:43:29 -08:00
|
|
|
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
|
2020-01-10 12:51:38 -08:00
|
|
|
arch_cpustart_t fn, void *arg)
|
2019-09-28 22:38:03 -04:00
|
|
|
{
|
|
|
|
u8_t vector = ((unsigned long) x86_ap_start) >> 12;
|
|
|
|
u8_t apic_id = x86_cpu_loapics[cpu_num];
|
|
|
|
|
|
|
|
x86_cpuboot[cpu_num].sp = (u64_t) Z_THREAD_STACK_BUFFER(stack) + sz;
|
|
|
|
x86_cpuboot[cpu_num].fn = fn;
|
|
|
|
x86_cpuboot[cpu_num].arg = arg;
|
2019-10-18 12:08:10 -07:00
|
|
|
#ifdef CONFIG_X86_MMU
|
|
|
|
x86_cpuboot[cpu_num].ptables = &z_x86_kernel_ptables;
|
|
|
|
#endif /* CONFIG_X86_MMU */
|
2019-09-28 22:38:03 -04:00
|
|
|
|
|
|
|
z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_INIT, 0);
|
|
|
|
k_busy_wait(10000);
|
|
|
|
z_loapic_ipi(apic_id, LOAPIC_ICR_IPI_STARTUP, vector);
|
2019-09-30 18:16:16 -04:00
|
|
|
|
|
|
|
while (x86_cpuboot[cpu_num].ready == 0) {
|
|
|
|
}
|
2019-09-28 22:38:03 -04:00
|
|
|
}
|
2020-01-10 13:46:07 -08:00
|
|
|
|
|
|
|
/* Per-CPU initialization, C domain. On the first CPU, z_x86_prep_c is the
|
|
|
|
* next step. For other CPUs it is probably smp_init_top().
|
|
|
|
*/
|
|
|
|
FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot)
|
|
|
|
{
|
|
|
|
x86_sse_init(NULL);
|
|
|
|
|
|
|
|
z_loapic_enable();
|
|
|
|
|
|
|
|
#ifdef CONFIG_USERSPACE
|
|
|
|
/* Set landing site for 'syscall' instruction */
|
|
|
|
z_x86_msr_write(X86_LSTAR_MSR, (u64_t)z_x86_syscall_entry_stub);
|
|
|
|
|
|
|
|
/* Set segment descriptors for syscall privilege transitions */
|
|
|
|
z_x86_msr_write(X86_STAR_MSR, (u64_t)X86_STAR_UPPER << 32);
|
|
|
|
|
|
|
|
/* Mask applied to RFLAGS when making a syscall */
|
|
|
|
z_x86_msr_write(X86_FMASK_MSR, EFLAGS_SYSCALL);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Enter kernel, never return */
|
|
|
|
cpuboot->ready++;
|
|
|
|
cpuboot->fn(cpuboot->arg);
|
|
|
|
}
|