arch/x86: initial Intel64 support

First "complete" version of Intel64 support for x86. Compilation of
apps for supported boards (read: up_squared) with CONFIG_X86_LONGMODE=y
is now working. Booting, device drivers, interrupts, scheduling, etc.
appear to be functioning properly. Beware that this is ALHPA quality,
not ready for production use, but the port has advanced far enough that
it's time to start working through the test suite and samples, fleshing
out any missing features, and squashing bugs.

Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
Charles E. Youse 2019-07-04 20:17:14 -07:00 committed by Andrew Boie
commit 4ddaa59a89
26 changed files with 733 additions and 117 deletions

View file

@ -45,6 +45,15 @@ config X86_LONGMODE
prompt "Run in long (64-bit) mode" prompt "Run in long (64-bit) mode"
default n default n
config MAX_IRQ_LINES
int "Number of IRQ lines"
default 128
range 0 256
help
This option specifies the number of IRQ lines in the system. It
determines the size of the _irq_to_interrupt_vector_table, which
is used to track the association between vectors and IRQ numbers.
config XIP config XIP
default n default n

View file

@ -9,6 +9,7 @@ zephyr_compile_options_ifdef(CONFIG_COVERAGE_GCOV
-fno-inline -fno-inline
) )
zephyr_library_sources(cpuhalt.c)
zephyr_library_sources_if_kconfig(pcie.c) zephyr_library_sources_if_kconfig(pcie.c)
zephyr_library_sources_if_kconfig(reboot_rst_cnt.c) zephyr_library_sources_if_kconfig(reboot_rst_cnt.c)
zephyr_library_sources_ifdef(CONFIG_X86_MULTIBOOT multiboot.c) zephyr_library_sources_ifdef(CONFIG_X86_MULTIBOOT multiboot.c)

View file

@ -32,17 +32,6 @@ config IDT_NUM_VECTORS
Interrupt Descriptor Table (IDT). By default all 256 vectors are Interrupt Descriptor Table (IDT). By default all 256 vectors are
supported in an IDT requiring 2048 bytes of memory. supported in an IDT requiring 2048 bytes of memory.
config MAX_IRQ_LINES
int "Number of IRQ lines"
default 128
range 0 256
help
This option specifies the number of IRQ lines in the system.
It can be tuned to save some bytes in ROM, as it determines the
size of the _irq_to_interrupt_vector_table, which is used at runtime
to program to the PIC the association between vectors and
interrupts.
config SET_GDT config SET_GDT
bool "Setup GDT as part of boot process" bool "Setup GDT as part of boot process"
default y default y

View file

@ -1,27 +1,7 @@
/* /*
* Copyright (c) 2011-2015 Wind River Systems, Inc. * Copyright (c) 2011-2015 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
/**
* @file CPU power management code for IA-32
*
* This module provides an implementation of the architecture-specific
* k_cpu_idle() primitive required by the kernel idle loop component.
* It can be called within an implementation of _sys_power_save_idle(),
* which is provided for the kernel by the platform.
*
* The module also provides an implementation of k_cpu_atomic_idle(), which
* atomically re-enables interrupts and enters low power mode.
*
* INTERNAL
* These implementations of k_cpu_idle() and k_cpu_atomic_idle() could be
* used when operating as a Hypervisor guest. More specifically, the Hypervisor
* supports the execution of the 'hlt' instruction from a guest (results in a
* VM exit), and more importantly, the Hypervisor will respect the
* single instruction delay slot after the 'sti' instruction as required
* by k_cpu_atomic_idle().
*/
#include <zephyr.h> #include <zephyr.h>
#include <debug/tracing.h> #include <debug/tracing.h>
@ -33,11 +13,11 @@ extern u64_t __idle_time_stamp; /* timestamp when CPU went idle */
/** /**
* *
* @brief Power save idle routine for IA-32 * @brief Power save idle routine
* *
* This function will be called by the kernel idle loop or possibly within * This function will be called by the kernel idle loop or possibly within
* an implementation of _sys_power_save_idle in the kernel when the * an implementation of _sys_power_save_idle in the kernel when the
* '_sys_power_save_flag' variable is non-zero. The IA-32 'hlt' instruction * '_sys_power_save_flag' variable is non-zero. The 'hlt' instruction
* will be issued causing a low-power consumption sleep mode. * will be issued causing a low-power consumption sleep mode.
* *
* @return N/A * @return N/A

View file

@ -11,7 +11,6 @@ endif()
zephyr_library_sources( zephyr_library_sources(
ia32/cache.c ia32/cache.c
ia32/cache_s.S ia32/cache_s.S
ia32/cpuhalt.c
ia32/crt0.S ia32/crt0.S
ia32/excstub.S ia32/excstub.S
ia32/intstub.S ia32/intstub.S

View file

@ -382,7 +382,7 @@ z_x86_enable_paging:
/* Enable PAE */ /* Enable PAE */
movl %cr4, %eax movl %cr4, %eax
orl $CR4_PAE_ENABLE, %eax orl $CR4_PAE, %eax
movl %eax, %cr4 movl %eax, %cr4
/* IA32_EFER NXE bit set */ /* IA32_EFER NXE bit set */
@ -393,7 +393,7 @@ z_x86_enable_paging:
/* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */ /* Enable paging (CR0.PG, bit 31) / write protect (CR0.WP, bit 16) */
movl %cr0, %eax movl %cr0, %eax
orl $CR0_PG_WP_ENABLE, %eax orl $(CR0_PG | CR0_WP), %eax
movl %eax, %cr0 movl %eax, %cr0
ret ret

View file

@ -10,5 +10,6 @@ set_property(SOURCE intel64/locore.S PROPERTY LANGUAGE ASM)
zephyr_library_sources( zephyr_library_sources(
intel64/locore.S intel64/locore.S
intel64/irq_manage.c
intel64/thread.c intel64/thread.c
) )

View file

@ -0,0 +1,78 @@
/*
* Copyright (c) 2019 Intel Corporation
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <kernel_arch_data.h>
#include <drivers/interrupt_controller/sysapic.h>
#include <irq.h>
unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES];
/*
* The low-level interrupt code consults these arrays to dispatch IRQs, so
* so be sure to keep locore.S up to date with any changes. Note the indices:
* use (vector - IV_IRQS), since exception vectors do not appear here.
*/
#define NR_IRQ_VECTORS (IV_NR_VECTORS - IV_IRQS) /* # vectors free for IRQs */
void (*x86_irq_funcs[NR_IRQ_VECTORS])(void *);
void *x86_irq_args[NR_IRQ_VECTORS];
/*
*
*/
static int allocate_vector(unsigned int priority)
{
const int VECTORS_PER_PRIORITY = 16;
const int MAX_PRIORITY = 13;
int vector;
int i;
if (priority >= MAX_PRIORITY) {
priority = MAX_PRIORITY;
}
vector = (priority * VECTORS_PER_PRIORITY) + IV_IRQS;
for (i = 0; i < VECTORS_PER_PRIORITY; ++i, ++vector) {
if (x86_irq_funcs[vector - IV_IRQS] == NULL) {
return vector;
}
}
return -1;
}
/*
* N.B.: the API docs don't say anything about returning error values, but
* this function returns -1 if a vector at the specific priority can't be
* allocated. Whether it should simply __ASSERT instead is up for debate.
*/
int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
void (*func)(void *arg), void *arg, u32_t flags)
{
u32_t key;
int vector;
__ASSERT(irq <= CONFIG_MAX_IRQ_LINES, "IRQ %u out of range", irq);
key = irq_lock();
vector = allocate_vector(priority);
if (vector >= 0) {
_irq_to_interrupt_vector[irq] = vector;
z_irq_controller_irq_config(vector, irq, flags);
x86_irq_funcs[vector - IV_IRQS] = func;
x86_irq_args[vector - IV_IRQS] = arg;
}
irq_unlock(key);
return vector;
}

View file

@ -7,9 +7,10 @@
#include <sys/util.h> #include <sys/util.h>
#include <arch/x86/msr.h> #include <arch/x86/msr.h>
#include <kernel_arch_data.h> #include <kernel_arch_data.h>
#include <offsets_short.h>
#include <drivers/interrupt_controller/loapic.h>
#define NR_IDT_VECTOR 256 /* full IDT, we're not short of RAM */ #define IST1_SIZE 4096 /* must be a multiple of 16 */
#define IRQ_STACK_SIZE 4096 /* must be a multiple of 16 */
.section .locore,"ax" .section .locore,"ax"
.code32 .code32
@ -18,10 +19,29 @@
__start: __start:
/* /*
* multiboot starts us here in 32-bit flat mode with interrupts * multiboot starts us here in 32-bit flat mode with interrupts and
* and paging disabled. first, switch to our own GDT and stack. * paging disabled. first, check to see if EAX has a valid multiboot
* magic in it, and if not, zero EBX so we know it's bogus later.
* Remember not to clobber EBX until we've copied the boot info struct!
*/ */
#ifdef CONFIG_X86_MULTIBOOT_INFO
cmpl $X86_MULTIBOOT_EAX_MAGIC, %eax
je 1f
xorl %ebx, %ebx
1:
#endif
/* FIXME: ...disabling the PICs is common to IA32 and Intel64... */
#ifdef CONFIG_PIC_DISABLE
movb $0xff, %al
outb %al, $0x21
outb %al, $0xA1
#endif
/* next, switch to our own GDT/IDT and stack. */
lgdt gdt48 lgdt gdt48
lidt idt48 lidt idt48
jmpl $X86_KERNEL_CS_32, $1f jmpl $X86_KERNEL_CS_32, $1f
@ -32,11 +52,9 @@ __start:
movw %ax, %fs movw %ax, %fs
movw %ax, %gs movw %ax, %gs
movl $(irq_stack + IRQ_STACK_SIZE), %esp movl $(ist1 + IST1_SIZE), %esp
/* /* transition to long mode, by the book. */
* transition to long mode, by the book.
*/
movl %cr4, %eax /* enable PAE */ movl %cr4, %eax /* enable PAE */
orl $CR4_PAE, %eax orl $CR4_PAE, %eax
@ -54,9 +72,7 @@ __start:
orl $CR0_PG, %eax orl $CR0_PG, %eax
movl %eax, %cr0 movl %eax, %cr0
/* /* jump into long mode, reload the segment registers (again). */
* jump into long mode, reload the segment registers (again).
*/
jmpl $X86_KERNEL_CS_64, $1f jmpl $X86_KERNEL_CS_64, $1f
.code64 .code64
@ -67,17 +83,37 @@ __start:
movw %ax, %fs movw %ax, %fs
movw %ax, %gs movw %ax, %gs
/* movl $X86_KERNEL_TSS, %eax
* func() is just a placeholder C function. don't replace CALL ltr %ax
* with JMP; we must honor the ABI stack alignment requirements.
*/
call func cld
xorl %eax, %eax
movq $__bss_start, %rdi
movq $__bss_num_qwords, %rcx
rep stosq
#ifdef CONFIG_X86_MULTIBOOT_INFO
/* If EBX isn't 0, then we have a valid multiboot info struct. */
testl %ebx, %ebx
jz 1f
movl %ebx, %esi
movl $__X86_MULTIBOOT_INFO_SIZEOF, %ecx
movq $x86_multiboot_info, %rdi
rep
movsb
1:
#endif /* CONFIG_X86_MULTIBOOT_INFO */
/* don't replace CALL with JMP; honor the ABI stack alignment! */
call z_cstart
stop: jmp stop stop: jmp stop
/* /*
* The multiboot header is identical (for obvious reasons) to the 32-bit * FIXME: The multiboot header is identical (for obvious reasons) to the
* version in ia32/crt0.S. They should be refactored into a common file. * version in ia32/crt0.S. They should be refactored into a common file.
*/ */
@ -95,6 +131,75 @@ stop: jmp stop
#endif /* CONFIG_X86_MULTIBOOT_FRAMEBUF */ #endif /* CONFIG_X86_MULTIBOOT_FRAMEBUF */
#endif #endif
/*
* XXX: describe __swap, __resume, stacks
*/
.globl _k_neg_eagain /* from errno.c: int _k_neg_eagain = -EAGAIN; */
.globl __swap
__swap:
movq $_kernel, %rsi
movq _kernel_offset_to_current(%rsi), %rsi
orb $_THREAD_SWAPPED, _thread_offset_to_thread_state(%rsi)
movl _k_neg_eagain, %eax
movl %eax, _thread_offset_to_rax(%rsi)
popq %rax
movq %rax, _thread_offset_to_rip(%rsi)
movq %rsp, _thread_offset_to_rsp(%rsi)
movl %edi, %edi /* N.B.: zero extend */
movq %rdi, _thread_offset_to_rflags(%rsi)
movq %rbx, _thread_offset_to_rbx(%rsi)
movq %rbp, _thread_offset_to_rbp(%rsi)
movq %r12, _thread_offset_to_r12(%rsi)
movq %r13, _thread_offset_to_r13(%rsi)
movq %r14, _thread_offset_to_r14(%rsi)
movq %r15, _thread_offset_to_r15(%rsi)
movq $(ist1 + IST1_SIZE), %rsp
/* fall through to __resume */
/*
* Entry:
* RSP = top of ist1
*/
__resume:
movq $_kernel, %rdi
movq _kernel_offset_to_ready_q_cache(%rdi), %rsi
movq %rsi, _kernel_offset_to_current(%rdi)
pushq $X86_KERNEL_DS_64 /* SS */
pushq _thread_offset_to_rsp(%rsi) /* RSP */
pushq _thread_offset_to_rflags(%rsi) /* RFLAGS */
pushq $X86_KERNEL_CS_64 /* CS */
pushq _thread_offset_to_rip(%rsi) /* RIP */
movq _thread_offset_to_rbx(%rsi), %rbx
movq _thread_offset_to_rbp(%rsi), %rbp
movq _thread_offset_to_r12(%rsi), %r12
movq _thread_offset_to_r13(%rsi), %r13
movq _thread_offset_to_r14(%rsi), %r14
movq _thread_offset_to_r15(%rsi), %r15
movq _thread_offset_to_rax(%rsi), %rax
testb $_THREAD_SWAPPED, _thread_offset_to_thread_state(%rsi)
jnz 1f
movq _thread_offset_to_rcx(%rsi), %rcx
movq _thread_offset_to_rdx(%rsi), %rdx
movq _thread_offset_to_rdi(%rsi), %rdi
movq _thread_offset_to_r8(%rsi), %r8
movq _thread_offset_to_r9(%rsi), %r9
movq _thread_offset_to_r10(%rsi), %r10
movq _thread_offset_to_r11(%rsi), %r11
movq _thread_offset_to_rsi(%rsi), %rsi /* do last :-) */
1: iretq
/* /*
* GDT - a single GDT is shared by all threads (and, eventually, all CPUs). * GDT - a single GDT is shared by all threads (and, eventually, all CPUs).
* This layout must agree with the selectors in intel64/kernel_arch_data.h. * This layout must agree with the selectors in intel64/kernel_arch_data.h.
@ -107,22 +212,245 @@ gdt:
.word 0xFFFF, 0, 0x9A00, 0x00CF /* 0x08: 32-bit kernel code */ .word 0xFFFF, 0, 0x9A00, 0x00CF /* 0x08: 32-bit kernel code */
.word 0xFFFF, 0, 0x9200, 0x00CF /* 0x10: 32-bit kernel data */ .word 0xFFFF, 0, 0x9200, 0x00CF /* 0x10: 32-bit kernel data */
.word 0, 0, 0x9800, 0x0020 /* 0x18: 64-bit kernel code */ .word 0, 0, 0x9800, 0x0020 /* 0x18: 64-bit kernel code */
.word 0, 0, 0x9200, 0x0000 /* 0x20: 64-bit kernel data */
.word 0x67 /* 0x28: 64-bit TSS */
.word tss
.word 0x8900
.word 0
.word 0 /* 0x30: TSS consumes two entries */
.word 0
.word 0
.word 0
gdt48: gdt48:
.word (gdt48 - gdt - 1) .word (gdt48 - gdt - 1)
.long gdt .long gdt
/* /*
* IDT. Empty for now. * TSS - no privilege transitions (yet) so only used for IST1.
*/ */
.align 8 .align 8
tss: .long 0
.long 0, 0 /* RSP0 */
.long 0, 0 /* RSP1 */
.long 0, 0 /* RSP2 */
.long 0, 0
.long (ist1 + IST1_SIZE), 0 /* IST1 */
.long 0, 0 /* IST2 */
.long 0, 0 /* IST3 */
.long 0, 0 /* IST4 */
.long 0, 0 /* IST5 */
.long 0, 0 /* IST6 */
.long 0, 0 /* IST7 */
.long 0, 0
.long 0
/*
* IDT.
*/
#define TRAP 0x8f
#define INTR 0x8e
#define IDT(nr, type, ist) \
.word vector_ ## nr, X86_KERNEL_CS_64; \
.byte ist, type; \
.word 0, 0, 0, 0, 0
.align 16
idt:
IDT( 0, TRAP, 0); IDT( 1, TRAP, 0); IDT( 2, TRAP, 0); IDT( 3, TRAP, 0)
IDT( 4, TRAP, 0); IDT( 5, TRAP, 0); IDT( 6, TRAP, 0); IDT( 7, TRAP, 0)
IDT( 8, TRAP, 0); IDT( 9, TRAP, 0); IDT( 10, TRAP, 0); IDT( 11, TRAP, 0)
IDT( 12, TRAP, 0); IDT( 13, TRAP, 0); IDT( 14, TRAP, 0); IDT( 15, TRAP, 0)
IDT( 16, TRAP, 0); IDT( 17, TRAP, 0); IDT( 18, TRAP, 0); IDT( 19, TRAP, 0)
IDT( 20, TRAP, 0); IDT( 21, TRAP, 0); IDT( 22, TRAP, 0); IDT( 23, TRAP, 0)
IDT( 24, TRAP, 0); IDT( 25, TRAP, 0); IDT( 26, TRAP, 0); IDT( 27, TRAP, 0)
IDT( 28, TRAP, 0); IDT( 29, TRAP, 0); IDT( 30, TRAP, 0); IDT( 31, TRAP, 0)
IDT( 32, INTR, 1); IDT( 33, INTR, 1); IDT( 34, INTR, 1); IDT( 35, INTR, 1)
IDT( 36, INTR, 1); IDT( 37, INTR, 1); IDT( 38, INTR, 1); IDT( 39, INTR, 1)
IDT( 40, INTR, 1); IDT( 41, INTR, 1); IDT( 42, INTR, 1); IDT( 43, INTR, 1)
IDT( 44, INTR, 1); IDT( 45, INTR, 1); IDT( 46, INTR, 1); IDT( 47, INTR, 1)
IDT( 48, INTR, 1); IDT( 49, INTR, 1); IDT( 50, INTR, 1); IDT( 51, INTR, 1)
IDT( 52, INTR, 1); IDT( 53, INTR, 1); IDT( 54, INTR, 1); IDT( 55, INTR, 1)
IDT( 56, INTR, 1); IDT( 57, INTR, 1); IDT( 58, INTR, 1); IDT( 59, INTR, 1)
IDT( 60, INTR, 1); IDT( 61, INTR, 1); IDT( 62, INTR, 1); IDT( 63, INTR, 1)
IDT( 64, INTR, 1); IDT( 65, INTR, 1); IDT( 66, INTR, 1); IDT( 67, INTR, 1)
IDT( 68, INTR, 1); IDT( 69, INTR, 1); IDT( 70, INTR, 1); IDT( 71, INTR, 1)
IDT( 72, INTR, 1); IDT( 73, INTR, 1); IDT( 74, INTR, 1); IDT( 75, INTR, 1)
IDT( 76, INTR, 1); IDT( 77, INTR, 1); IDT( 78, INTR, 1); IDT( 79, INTR, 1)
IDT( 80, INTR, 1); IDT( 81, INTR, 1); IDT( 82, INTR, 1); IDT( 83, INTR, 1)
IDT( 84, INTR, 1); IDT( 85, INTR, 1); IDT( 86, INTR, 1); IDT( 87, INTR, 1)
IDT( 88, INTR, 1); IDT( 89, INTR, 1); IDT( 90, INTR, 1); IDT( 91, INTR, 1)
IDT( 92, INTR, 1); IDT( 93, INTR, 1); IDT( 94, INTR, 1); IDT( 95, INTR, 1)
IDT( 96, INTR, 1); IDT( 97, INTR, 1); IDT( 98, INTR, 1); IDT( 99, INTR, 1)
IDT(100, INTR, 1); IDT(101, INTR, 1); IDT(102, INTR, 1); IDT(103, INTR, 1)
IDT(104, INTR, 1); IDT(105, INTR, 1); IDT(106, INTR, 1); IDT(107, INTR, 1)
IDT(108, INTR, 1); IDT(109, INTR, 1); IDT(110, INTR, 1); IDT(111, INTR, 1)
IDT(112, INTR, 1); IDT(113, INTR, 1); IDT(114, INTR, 1); IDT(115, INTR, 1)
IDT(116, INTR, 1); IDT(117, INTR, 1); IDT(118, INTR, 1); IDT(119, INTR, 1)
IDT(120, INTR, 1); IDT(121, INTR, 1); IDT(122, INTR, 1); IDT(123, INTR, 1)
IDT(124, INTR, 1); IDT(125, INTR, 1); IDT(126, INTR, 1); IDT(127, INTR, 1)
IDT(128, INTR, 1); IDT(129, INTR, 1); IDT(130, INTR, 1); IDT(131, INTR, 1)
IDT(132, INTR, 1); IDT(133, INTR, 1); IDT(134, INTR, 1); IDT(135, INTR, 1)
IDT(136, INTR, 1); IDT(137, INTR, 1); IDT(138, INTR, 1); IDT(139, INTR, 1)
IDT(140, INTR, 1); IDT(141, INTR, 1); IDT(142, INTR, 1); IDT(143, INTR, 1)
IDT(144, INTR, 1); IDT(145, INTR, 1); IDT(146, INTR, 1); IDT(147, INTR, 1)
IDT(148, INTR, 1); IDT(149, INTR, 1); IDT(150, INTR, 1); IDT(151, INTR, 1)
IDT(152, INTR, 1); IDT(153, INTR, 1); IDT(154, INTR, 1); IDT(155, INTR, 1)
IDT(156, INTR, 1); IDT(157, INTR, 1); IDT(158, INTR, 1); IDT(159, INTR, 1)
IDT(160, INTR, 1); IDT(161, INTR, 1); IDT(162, INTR, 1); IDT(163, INTR, 1)
IDT(164, INTR, 1); IDT(165, INTR, 1); IDT(166, INTR, 1); IDT(167, INTR, 1)
IDT(168, INTR, 1); IDT(169, INTR, 1); IDT(170, INTR, 1); IDT(171, INTR, 1)
IDT(172, INTR, 1); IDT(173, INTR, 1); IDT(174, INTR, 1); IDT(175, INTR, 1)
IDT(176, INTR, 1); IDT(177, INTR, 1); IDT(178, INTR, 1); IDT(179, INTR, 1)
IDT(180, INTR, 1); IDT(181, INTR, 1); IDT(182, INTR, 1); IDT(183, INTR, 1)
IDT(184, INTR, 1); IDT(185, INTR, 1); IDT(186, INTR, 1); IDT(187, INTR, 1)
IDT(188, INTR, 1); IDT(189, INTR, 1); IDT(190, INTR, 1); IDT(191, INTR, 1)
IDT(192, INTR, 1); IDT(193, INTR, 1); IDT(194, INTR, 1); IDT(195, INTR, 1)
IDT(196, INTR, 1); IDT(197, INTR, 1); IDT(198, INTR, 1); IDT(199, INTR, 1)
IDT(200, INTR, 1); IDT(201, INTR, 1); IDT(202, INTR, 1); IDT(203, INTR, 1)
IDT(204, INTR, 1); IDT(205, INTR, 1); IDT(206, INTR, 1); IDT(207, INTR, 1)
IDT(208, INTR, 1); IDT(209, INTR, 1); IDT(210, INTR, 1); IDT(211, INTR, 1)
IDT(212, INTR, 1); IDT(213, INTR, 1); IDT(214, INTR, 1); IDT(215, INTR, 1)
IDT(216, INTR, 1); IDT(217, INTR, 1); IDT(218, INTR, 1); IDT(219, INTR, 1)
IDT(220, INTR, 1); IDT(221, INTR, 1); IDT(222, INTR, 1); IDT(223, INTR, 1)
IDT(224, INTR, 1); IDT(225, INTR, 1); IDT(226, INTR, 1); IDT(227, INTR, 1)
IDT(228, INTR, 1); IDT(229, INTR, 1); IDT(230, INTR, 1); IDT(231, INTR, 1)
IDT(232, INTR, 1); IDT(233, INTR, 1); IDT(234, INTR, 1); IDT(235, INTR, 1)
IDT(236, INTR, 1); IDT(237, INTR, 1); IDT(238, INTR, 1); IDT(239, INTR, 1)
IDT(240, INTR, 1); IDT(241, INTR, 1); IDT(242, INTR, 1); IDT(243, INTR, 1)
IDT(244, INTR, 1); IDT(245, INTR, 1); IDT(246, INTR, 1); IDT(247, INTR, 1)
IDT(248, INTR, 1); IDT(249, INTR, 1); IDT(250, INTR, 1); IDT(251, INTR, 1)
IDT(252, INTR, 1); IDT(253, INTR, 1); IDT(254, INTR, 1); IDT(255, INTR, 1)
idt: .fill (NR_IDT_VECTOR*16), 1, 0
idt48: idt48:
.word (idt48 - idt - 1) .word (idt48 - idt - 1)
.long idt .long idt
#define EXCEPT_CODE(nr) vector_ ## nr: pushq $nr; jmp except
#define EXCEPT(nr) vector_ ## nr: pushq $0; pushq $nr; jmp except
except:
/* save registers and dispatch to x86_exception() */
hlt
EXCEPT ( 0); EXCEPT ( 1); EXCEPT ( 2); EXCEPT ( 3)
EXCEPT ( 4); EXCEPT ( 5); EXCEPT ( 6); EXCEPT ( 7)
EXCEPT_CODE ( 8); EXCEPT ( 9); EXCEPT_CODE (10); EXCEPT_CODE (11)
EXCEPT_CODE (12); EXCEPT_CODE (13); EXCEPT_CODE (14); EXCEPT (15)
EXCEPT (16); EXCEPT_CODE (17); EXCEPT (18); EXCEPT (19)
EXCEPT (20); EXCEPT (21); EXCEPT (22); EXCEPT (23)
EXCEPT (24); EXCEPT (25); EXCEPT (26); EXCEPT (27)
EXCEPT (28); EXCEPT (29); EXCEPT (30); EXCEPT (31)
/*
* When we arrive at 'irq' from one of the IRQ(X)
* stubs, we're on IST1 and it contains:
*
* SS
* RSP
* RFLAGS
* CS
* RIP
* (vector number - IV_IRQS) <-- RSP points here
* RSI <-- we push this on entry
*
* Our job is to save the state of the interrupted thread so that
* __resume can restart it where it left off, then service the IRQ.
* We can then EOI the local APIC and head out via __resume - which
* may resume a different thread, if the scheduler decided to preempt.
*/
.globl x86_irq_funcs /* see irq_manage.c .. */
.globl x86_irq_args /* .. for these definitions */
irq:
pushq %rsi
movq $_kernel, %rsi
incl _kernel_offset_to_nested(%rsi)
movq _kernel_offset_to_current(%rsi), %rsi
andb $(~_THREAD_SWAPPED), _thread_offset_to_thread_state(%rsi)
movq %rbx, _thread_offset_to_rbx(%rsi)
movq %rbp, _thread_offset_to_rbp(%rsi)
movq %r12, _thread_offset_to_r12(%rsi)
movq %r13, _thread_offset_to_r13(%rsi)
movq %r14, _thread_offset_to_r14(%rsi)
movq %r15, _thread_offset_to_r15(%rsi)
movq %rax, _thread_offset_to_rax(%rsi)
movq %rcx, _thread_offset_to_rcx(%rsi)
movq %rdx, _thread_offset_to_rdx(%rsi)
movq %rdi, _thread_offset_to_rdi(%rsi)
movq %r8, _thread_offset_to_r8(%rsi)
movq %r9, _thread_offset_to_r9(%rsi)
movq %r10, _thread_offset_to_r10(%rsi)
movq %r11, _thread_offset_to_r11(%rsi)
popq %rax /* RSI */
movq %rax, _thread_offset_to_rsi(%rsi)
popq %rcx /* vector number */
popq %rax /* RIP */
movq %rax, _thread_offset_to_rip(%rsi)
popq %rax /* CS: discard */
popq %rax /* RFLAGS */
movq %rax, _thread_offset_to_rflags(%rsi)
popq %rax /* RSP */
movq %rax, _thread_offset_to_rsp(%rsi)
popq %rax /* SS: discard */
movq x86_irq_funcs(,%rcx,8), %rbx
movq x86_irq_args(,%rcx,8), %rdi
call *%rbx
#ifdef CONFIG_X2APIC
call z_x2apic_eoi
#else
xorl %eax, %eax
movl %eax, (CONFIG_LOAPIC_BASE_ADDRESS + LOAPIC_EOI)
#endif
movq $_kernel, %rsi
decl _kernel_offset_to_nested(%rsi)
jmp __resume
#define IRQ(nr) vector_ ## nr: pushq $(nr - IV_IRQS); jmp irq
IRQ( 32); IRQ( 33); IRQ( 34); IRQ( 35); IRQ( 36); IRQ( 37); IRQ( 38); IRQ( 39)
IRQ( 40); IRQ( 41); IRQ( 42); IRQ( 43); IRQ( 44); IRQ( 45); IRQ( 46); IRQ( 47)
IRQ( 48); IRQ( 49); IRQ( 50); IRQ( 51); IRQ( 52); IRQ( 53); IRQ( 54); IRQ( 55)
IRQ( 56); IRQ( 57); IRQ( 58); IRQ( 59); IRQ( 60); IRQ( 61); IRQ( 62); IRQ( 63)
IRQ( 64); IRQ( 65); IRQ( 66); IRQ( 67); IRQ( 68); IRQ( 69); IRQ( 70); IRQ( 71)
IRQ( 72); IRQ( 73); IRQ( 74); IRQ( 75); IRQ( 76); IRQ( 77); IRQ( 78); IRQ( 79)
IRQ( 80); IRQ( 81); IRQ( 82); IRQ( 83); IRQ( 84); IRQ( 85); IRQ( 86); IRQ( 87)
IRQ( 88); IRQ( 89); IRQ( 90); IRQ( 91); IRQ( 92); IRQ( 93); IRQ( 94); IRQ( 95)
IRQ( 96); IRQ( 97); IRQ( 98); IRQ( 99); IRQ(100); IRQ(101); IRQ(102); IRQ(103)
IRQ(104); IRQ(105); IRQ(106); IRQ(107); IRQ(108); IRQ(109); IRQ(110); IRQ(111)
IRQ(112); IRQ(113); IRQ(114); IRQ(115); IRQ(116); IRQ(117); IRQ(118); IRQ(119)
IRQ(120); IRQ(121); IRQ(122); IRQ(123); IRQ(124); IRQ(125); IRQ(126); IRQ(127)
IRQ(128); IRQ(129); IRQ(130); IRQ(131); IRQ(132); IRQ(133); IRQ(134); IRQ(135)
IRQ(136); IRQ(137); IRQ(138); IRQ(139); IRQ(140); IRQ(141); IRQ(142); IRQ(143)
IRQ(144); IRQ(145); IRQ(146); IRQ(147); IRQ(148); IRQ(149); IRQ(150); IRQ(151)
IRQ(152); IRQ(153); IRQ(154); IRQ(155); IRQ(156); IRQ(157); IRQ(158); IRQ(159)
IRQ(160); IRQ(161); IRQ(162); IRQ(163); IRQ(164); IRQ(165); IRQ(166); IRQ(167)
IRQ(168); IRQ(169); IRQ(170); IRQ(171); IRQ(172); IRQ(173); IRQ(174); IRQ(175)
IRQ(176); IRQ(177); IRQ(178); IRQ(179); IRQ(180); IRQ(181); IRQ(182); IRQ(183)
IRQ(184); IRQ(185); IRQ(186); IRQ(187); IRQ(188); IRQ(189); IRQ(190); IRQ(191)
IRQ(192); IRQ(193); IRQ(194); IRQ(195); IRQ(196); IRQ(197); IRQ(198); IRQ(199)
IRQ(200); IRQ(201); IRQ(202); IRQ(203); IRQ(204); IRQ(205); IRQ(206); IRQ(207)
IRQ(208); IRQ(209); IRQ(210); IRQ(211); IRQ(212); IRQ(213); IRQ(214); IRQ(215)
IRQ(216); IRQ(217); IRQ(218); IRQ(219); IRQ(220); IRQ(221); IRQ(222); IRQ(223)
IRQ(224); IRQ(225); IRQ(226); IRQ(227); IRQ(228); IRQ(229); IRQ(230); IRQ(231)
IRQ(232); IRQ(233); IRQ(234); IRQ(235); IRQ(236); IRQ(237); IRQ(238); IRQ(239)
IRQ(240); IRQ(241); IRQ(242); IRQ(243); IRQ(244); IRQ(245); IRQ(246); IRQ(247)
IRQ(248); IRQ(249); IRQ(250); IRQ(251); IRQ(252); IRQ(253); IRQ(254); IRQ(255)
/* /*
* Page tables. Long mode requires them, but we don't implement any memory * Page tables. Long mode requires them, but we don't implement any memory
* protection yet, so these simply identity-map the first 4GB w/ 1GB pages. * protection yet, so these simply identity-map the first 4GB w/ 1GB pages.
@ -146,11 +474,10 @@ pdp: .long 0x00000183 /* 0x183 = G, 1GB, R/W, P */
.fill 4064, 1, 0 .fill 4064, 1, 0
/* /*
* For now, the "IRQ stack" is just used as a scratch stack during * IST1 is used both during IRQ processing and early kernel initialization.
* early kernel initialization.
*/ */
.align 16 .align 16
irq_stack: ist1:
.fill IRQ_STACK_SIZE, 1, 0xFF .fill IST1_SIZE, 1, 0xFF

View file

@ -3,8 +3,27 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
void func(void) #include <kernel.h>
#include <ksched.h>
#include <kernel_structs.h>
#include <kernel_internal.h>
void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
size_t stack_size, k_thread_entry_t entry,
void *parameter1, void *parameter2, void *parameter3,
int priority, unsigned int options)
{ {
for (;;) { Z_ASSERT_VALID_PRIO(priority, entry);
} z_new_thread_init(thread, Z_THREAD_STACK_BUFFER(stack),
stack_size, priority, options);
thread->callee_saved.rsp = (long) Z_THREAD_STACK_BUFFER(stack);
thread->callee_saved.rsp += (stack_size - 8); /* fake RIP for ABI */
thread->callee_saved.rip = (long) z_thread_entry;
thread->callee_saved.rflags = EFLAGS_INITIAL;
thread->arch.rdi = (long) entry;
thread->arch.rsi = (long) parameter1;
thread->arch.rdx = (long) parameter2;
thread->arch.rcx = (long) parameter3;
} }

View file

@ -22,13 +22,9 @@
* completeness. * completeness.
*/ */
#include <gen_offset.h> /* located in kernel/include */
/* list of headers that define whose structure offsets will be generated */ /* list of headers that define whose structure offsets will be generated */
#include <kernel_structs.h>
#include <ia32/mmustructs.h> #include <ia32/mmustructs.h>
#include <arch/x86/multiboot.h>
#include <kernel_offsets.h> #include <kernel_offsets.h>
@ -70,10 +66,3 @@ GEN_OFFSET_SYM(z_arch_esf_t, eflags);
/* size of the MMU_REGION structure. Used by linker scripts */ /* size of the MMU_REGION structure. Used by linker scripts */
GEN_ABSOLUTE_SYM(__MMU_REGION_SIZEOF, sizeof(struct mmu_region)); GEN_ABSOLUTE_SYM(__MMU_REGION_SIZEOF, sizeof(struct mmu_region));
/* size of struct x86_multiboot_info, used by crt0.S */
GEN_ABSOLUTE_SYM(__X86_MULTIBOOT_INFO_SIZEOF,
sizeof(struct x86_multiboot_info));
GEN_ABS_SYM_END

View file

@ -3,3 +3,26 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#include <kernel_arch_thread.h>
#include <kernel_offsets.h>
GEN_OFFSET_SYM(_callee_saved_t, rsp);
GEN_OFFSET_SYM(_callee_saved_t, rbp);
GEN_OFFSET_SYM(_callee_saved_t, rbx);
GEN_OFFSET_SYM(_callee_saved_t, r12);
GEN_OFFSET_SYM(_callee_saved_t, r13);
GEN_OFFSET_SYM(_callee_saved_t, r14);
GEN_OFFSET_SYM(_callee_saved_t, r15);
GEN_OFFSET_SYM(_callee_saved_t, rip);
GEN_OFFSET_SYM(_callee_saved_t, rflags);
GEN_OFFSET_SYM(_callee_saved_t, rax);
GEN_OFFSET_SYM(_thread_arch_t, rcx);
GEN_OFFSET_SYM(_thread_arch_t, rdx);
GEN_OFFSET_SYM(_thread_arch_t, rsi);
GEN_OFFSET_SYM(_thread_arch_t, rdi);
GEN_OFFSET_SYM(_thread_arch_t, r8);
GEN_OFFSET_SYM(_thread_arch_t, r9);
GEN_OFFSET_SYM(_thread_arch_t, r10);
GEN_OFFSET_SYM(_thread_arch_t, r11);

View file

@ -3,13 +3,17 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#include <gen_offset.h>
#include <kernel_structs.h>
#include <arch/x86/multiboot.h>
#ifdef CONFIG_X86_LONGMODE #ifdef CONFIG_X86_LONGMODE
#include "intel64_offsets.c" #include "intel64_offsets.c"
#else #else
#include "ia32_offsets.c" #include "ia32_offsets.c"
#endif #endif
/* size of struct x86_multiboot_info, used by crt0.S */ /* size of struct x86_multiboot_info, used by crt0.S/locore.S */
GEN_ABSOLUTE_SYM(__X86_MULTIBOOT_INFO_SIZEOF, GEN_ABSOLUTE_SYM(__X86_MULTIBOOT_INFO_SIZEOF,
sizeof(struct x86_multiboot_info)); sizeof(struct x86_multiboot_info));

View file

@ -98,8 +98,6 @@ void z_x86_enable_paging(void);
} }
#endif #endif
#define z_is_in_isr() (_kernel.nested != 0U)
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_IA32_KERNEL_ARCH_FUNC_H_ */

View file

@ -6,6 +6,14 @@
#ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_
#define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_
/*
* A flag for k_thread.thread_state to tell __resume that the thread
* voluntarily switched itself out, so only a portion of the register
* state needs to be restored. See kernel_arch_thread.h and locore.S.
*/
#define _THREAD_SWAPPED BIT(7)
/* /*
* GDT selectors - these must agree with the GDT layout in locore.S. * GDT selectors - these must agree with the GDT layout in locore.S.
*/ */
@ -13,6 +21,7 @@
#define X86_KERNEL_CS_32 0x08 /* 32-bit kernel code */ #define X86_KERNEL_CS_32 0x08 /* 32-bit kernel code */
#define X86_KERNEL_DS_32 0x10 /* 32-bit kernel data */ #define X86_KERNEL_DS_32 0x10 /* 32-bit kernel data */
#define X86_KERNEL_CS_64 0x18 /* 64-bit kernel code */ #define X86_KERNEL_CS_64 0x18 /* 64-bit kernel code */
#define X86_KERNEL_DS_64 0x00 /* 64-bit kernel data (null!) */ #define X86_KERNEL_DS_64 0x20 /* 64-bit kernel data */
#define X86_KERNEL_TSS 0x28 /* 64-bit task state segment */
#endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ */

View file

@ -8,17 +8,15 @@
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
#define z_is_in_isr() (0)
static ALWAYS_INLINE void static ALWAYS_INLINE void
z_set_thread_return_value(struct k_thread *thread, unsigned int value) z_set_thread_return_value(struct k_thread *thread, unsigned int value)
{ {
/* nothing */ ; thread->callee_saved.rax = value;
} }
static inline void kernel_arch_init(void) static inline void kernel_arch_init(void)
{ {
/* nothing */ ; /* nothing */;
} }
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -6,10 +6,44 @@
#ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_
#define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_
#include <zephyr/types.h>
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
struct _callee_saved { }; /*
struct _thread_arch { }; * The _callee_saved registers are unconditionally saved/restored across
* context switches; the _thread_arch registers are only preserved when
* the thread is interrupted. _THREAD_SWAPPED tells __resume when it can
* cheat and only restore the first set. For more details see locore.S.
*/
struct _callee_saved {
u64_t rsp;
u64_t rbx;
u64_t rbp;
u64_t r12;
u64_t r13;
u64_t r14;
u64_t r15;
u64_t rip;
u64_t rflags;
u64_t rax;
};
typedef struct _callee_saved _callee_saved_t;
struct _thread_arch {
u64_t rcx;
u64_t rdx;
u64_t rsi;
u64_t rdi;
u64_t r8;
u64_t r9;
u64_t r10;
u64_t r11;
};
typedef struct _thread_arch _thread_arch_t;
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -0,0 +1,65 @@
/*
* Copyright (c) 2019 Intel Corp.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_
#include <offsets.h>
#define _thread_offset_to_rsp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rsp_OFFSET)
#define _thread_offset_to_rbx \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rbx_OFFSET)
#define _thread_offset_to_rbp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rbp_OFFSET)
#define _thread_offset_to_r12 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_r12_OFFSET)
#define _thread_offset_to_r13 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_r13_OFFSET)
#define _thread_offset_to_r14 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_r14_OFFSET)
#define _thread_offset_to_r15 \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_r15_OFFSET)
#define _thread_offset_to_rip \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rip_OFFSET)
#define _thread_offset_to_rflags \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rflags_OFFSET)
#define _thread_offset_to_rax \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_rax_OFFSET)
#define _thread_offset_to_rcx \
(___thread_t_arch_OFFSET + ___thread_arch_t_rcx_OFFSET)
#define _thread_offset_to_rdx \
(___thread_t_arch_OFFSET + ___thread_arch_t_rdx_OFFSET)
#define _thread_offset_to_rsi \
(___thread_t_arch_OFFSET + ___thread_arch_t_rsi_OFFSET)
#define _thread_offset_to_rdi \
(___thread_t_arch_OFFSET + ___thread_arch_t_rdi_OFFSET)
#define _thread_offset_to_r8 \
(___thread_t_arch_OFFSET + ___thread_arch_t_r8_OFFSET)
#define _thread_offset_to_r9 \
(___thread_t_arch_OFFSET + ___thread_arch_t_r9_OFFSET)
#define _thread_offset_to_r10 \
(___thread_t_arch_OFFSET + ___thread_arch_t_r10_OFFSET)
#define _thread_offset_to_r11 \
(___thread_t_arch_OFFSET + ___thread_arch_t_r11_OFFSET)
#endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_OFFSETS_SHORT_ARCH_H_ */

View file

@ -31,7 +31,9 @@
#define IV_ALIGNMENT_CHECK 17 #define IV_ALIGNMENT_CHECK 17
#define IV_MACHINE_CHECK 18 #define IV_MACHINE_CHECK 18
#define IV_SIMD_FP 19 #define IV_SIMD_FP 19
#define IV_INTEL_RESERVED_END 31
#define IV_IRQS 32 /* start of vectors available for IRQs */
#define IV_NR_VECTORS 256 /* total number of vectors */
/* /*
* EFLAGS/RFLAGS definitions. (RFLAGS is just zero-extended EFLAGS.) * EFLAGS/RFLAGS definitions. (RFLAGS is just zero-extended EFLAGS.)

View file

@ -12,4 +12,6 @@
#include <ia32/kernel_arch_func.h> #include <ia32/kernel_arch_func.h>
#endif #endif
#define z_is_in_isr() (_kernel.nested != 0U)
#endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_KERNEL_ARCH_FUNC_H_ */

View file

@ -6,7 +6,9 @@
#ifndef ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ #ifndef ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_
#define ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_ #define ZEPHYR_ARCH_X86_INCLUDE_OFFSETS_SHORT_ARCH_H_
#ifndef CONFIG_X86_LONGMODE #ifdef CONFIG_X86_LONGMODE
#include <intel64/offsets_short_arch.h>
#else
#include <ia32/offsets_short_arch.h> #include <ia32/offsets_short_arch.h>
#endif #endif

View file

@ -7,14 +7,14 @@
#define ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_ #define ZEPHYR_INCLUDE_ARCH_X86_ARCH_H_
#include <generated_dts_board.h> #include <generated_dts_board.h>
#include <stdbool.h>
#include <irq.h>
#if !defined(_ASMLANGUAGE) #if !defined(_ASMLANGUAGE)
#include <sys/sys_io.h> #include <sys/sys_io.h>
#include <zephyr/types.h> #include <zephyr/types.h>
#include <stddef.h> #include <stddef.h>
#include <stdbool.h>
#include <irq.h>
static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
{ {
@ -137,6 +137,17 @@ static ALWAYS_INLINE u32_t sys_read32(mm_reg_t addr)
return ret; return ret;
} }
/*
* Map of IRQ numbers to their assigned vectors. On IA32, this is generated
* at build time and defined via the linker script. On Intel64, it's an array.
*/
extern unsigned char _irq_to_interrupt_vector[];
#define Z_IRQ_TO_INTERRUPT_VECTOR(irq) \
((unsigned int) _irq_to_interrupt_vector[irq])
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
#ifdef CONFIG_X86_LONGMODE #ifdef CONFIG_X86_LONGMODE

View file

@ -242,15 +242,6 @@ typedef struct s_isrList {
}) })
/**
* @brief Convert a statically connected IRQ to its interrupt vector number
*
* @param irq IRQ number
*/
extern unsigned char _irq_to_interrupt_vector[];
#define Z_IRQ_TO_INTERRUPT_VECTOR(irq) \
((unsigned int) _irq_to_interrupt_vector[irq])
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
extern void z_arch_irq_direct_pm(void); extern void z_arch_irq_direct_pm(void);
#define Z_ARCH_ISR_DIRECT_PM() z_arch_irq_direct_pm() #define Z_ARCH_ISR_DIRECT_PM() z_arch_irq_direct_pm()

View file

@ -8,8 +8,24 @@
#include <kernel_arch_thread.h> #include <kernel_arch_thread.h>
#define STACK_ALIGN 16
#define STACK_SIZE_ALIGN 16
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \
struct _k_thread_stack_element __noinit \
__aligned(STACK_ALIGN) \
sym[ROUND_UP((size), STACK_SIZE_ALIGN)]
#define Z_ARCH_THREAD_STACK_ARRAY_DEFINE(sym, nmemb, size) \
struct _k_thread_stack_element __noinit \
__aligned(STACK_ALIGN) \
sym[nmemb][ROUND_UP((size), STACK_SIZE_ALIGN)]
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) sizeof(sym)
#define Z_ARCH_THREAD_STACK_BUFFER(sym) ((char *) sym)
static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
{ {
unsigned long key; unsigned long key;
@ -26,21 +42,22 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
return (unsigned int) key; return (unsigned int) key;
} }
/*
* Bogus ESF stuff until I figure out what to with it. I suspect
* this is legacy cruft that we'll want to excise sometime soon, anyway.
*/
struct x86_esf {
};
typedef struct x86_esf z_arch_esf_t;
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
/* /*
* dummies for now * All Intel64 interrupts are dynamically connected.
*/ */
#define Z_ARCH_THREAD_STACK_DEFINE(sym, size) \ #define Z_ARCH_IRQ_CONNECT z_arch_irq_connect_dynamic
struct _k_thread_stack_element sym[size]
#define Z_ARCH_THREAD_STACK_SIZEOF(sym) sizeof(sym)
#define Z_ARCH_THREAD_STACK_BUFFER(sym) ((char *) sym)
#define Z_IRQ_TO_INTERRUPT_VECTOR(irq) (0)
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p)
#define z_x86_msr_read(a) (0)
#endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ */ #endif /* ZEPHYR_INCLUDE_ARCH_X86_INTEL64_ARCH_H_ */

View file

@ -3,6 +3,11 @@
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#define DEVICE_INIT_LEVEL(level) \
__device_##level##_start = .; \
KEEP(*(SORT(.init_##level[0-9]))); \
KEEP(*(SORT(.init_##level[1-9][0-9])));
ENTRY(CONFIG_KERNEL_ENTRY) ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS SECTIONS
@ -30,6 +35,52 @@ SECTIONS
*(.text) *(.text)
*(.text.*) *(.text.*)
*(.rodata.*) *(.rodata.*)
. = ALIGN(8);
__devconfig_start = .;
*(.devconfig.*)
KEEP(*(SORT_BY_NAME(.devconfig*)))
__devconfig_end = .;
}
.data : ALIGN(16)
{
*(.data*)
. = ALIGN(8);
__device_init_start = .;
DEVICE_INIT_LEVEL(PRE_KERNEL_1)
DEVICE_INIT_LEVEL(PRE_KERNEL_2)
DEVICE_INIT_LEVEL(POST_KERNEL)
DEVICE_INIT_LEVEL(APPLICATION)
__device_init_end = .;
. = ALIGN(8);
__static_thread_data_list_start = .;
KEEP(*(SORT_BY_NAME(.__static_thread_data.static.*)))
__static_thread_data_list_end = .;
. = ALIGN(8);
_k_mem_pool_list_start = .;
KEEP(*(._k_mem_pool_static.*))
_k_mem_pool_list_end = .;
}
.bss : ALIGN(16)
{
__bss_start = .;
*(.bss)
*(.bss.*)
*(COMMON)
. = ALIGN(8); /* so __bss_num_qwords is exact */
__bss_end = .;
}
__bss_num_qwords = (__bss_end - __bss_start) >> 3;
.noinit (NOLOAD) : ALIGN(16)
{
*(.noinit.*)
} }
/DISCARD/ : /DISCARD/ :

View file

@ -43,7 +43,24 @@ static inline void z_x86_msr_write(unsigned int msr, u64_t data)
__asm__ volatile ("wrmsr" : : "c"(msr), "a"(low), "d"(high)); __asm__ volatile ("wrmsr" : : "c"(msr), "a"(low), "d"(high));
} }
#ifndef CONFIG_X86_LONGMODE #ifdef CONFIG_X86_LONGMODE
static inline u64_t z_x86_msr_read(unsigned int msr)
{
union {
struct {
u32_t lo;
u32_t hi;
};
u64_t value;
} rv;
__asm__ volatile ("rdmsr" : "=a" (rv.lo), "=d" (rv.hi) : "c" (msr));
return rv.value;
}
#else
static inline u64_t z_x86_msr_read(unsigned int msr) static inline u64_t z_x86_msr_read(unsigned int msr)
{ {