diff --git a/arch/x86/core/intel64.cmake b/arch/x86/core/intel64.cmake index 31fe2ace92d..db83ed7c69b 100644 --- a/arch/x86/core/intel64.cmake +++ b/arch/x86/core/intel64.cmake @@ -10,6 +10,7 @@ set_property(SOURCE intel64/locore.S PROPERTY LANGUAGE ASM) zephyr_library_sources( intel64/locore.S + intel64/tss.c intel64/irq.c intel64/thread.c intel64/fatal.c diff --git a/arch/x86/core/intel64/locore.S b/arch/x86/core/intel64/locore.S index 29478b35cd9..c2ac88a7ebf 100644 --- a/arch/x86/core/intel64/locore.S +++ b/arch/x86/core/intel64/locore.S @@ -59,7 +59,7 @@ __start: movw %ax, %fs movw %ax, %gs - movl $(exception_stack + CONFIG_EXCEPTION_STACK_SIZE), %esp + movl $(_exception_stack + CONFIG_EXCEPTION_STACK_SIZE), %esp /* transition to long mode. along the way, we enable SSE. */ @@ -89,10 +89,11 @@ __start: movw %ax, %es movw %ax, %ss movw %ax, %fs - movw %ax, %gs movl $X86_KERNEL_TSS, %eax ltr %ax + movl $X86_KERNEL_GS_64, %eax + movw %ax, %gs cld xorl %eax, %eax @@ -256,11 +257,20 @@ gdt: .word 0, 0, 0x9800, 0x0020 /* 0x18: 64-bit kernel code */ .word 0, 0, 0x9200, 0x0000 /* 0x20: 64-bit kernel data */ - .word 0x67 /* 0x28: 64-bit TSS */ - .word tss + .word 0, 0, 0, 0 /* 0x28: unused */ + + .word 0 /* 0x30: 64-bit TSS data (for GS) */ + .word tss0 + .word 0x9200 + .word 0 + + .word 0, 0, 0, 0 /* 0x38: unused */ + + .word __X86_TSS64_SIZEOF-1 /* 0x40: 64-bit TSS (16-byte entry) */ + .word tss0 .word 0x8900 .word 0 - .word 0 /* 0x30: TSS consumes two entries */ + .word 0 .word 0 .word 0 .word 0 @@ -269,27 +279,6 @@ gdt48: .word (gdt48 - gdt - 1) .long gdt -/* - * TSS - no privilege transitions (yet) so only used for - * interrupt (IST1) and exception stack (IST7) locations. - */ - -.align 8 -tss: .long 0 -rsp0: .long 0, 0 - .long 0, 0 - .long 0, 0 - .long 0, 0 -ist1: .long (_interrupt_stack + CONFIG_ISR_STACK_SIZE), 0 - .long 0, 0 - .long 0, 0 - .long 0, 0 - .long 0, 0 - .long 0, 0 -ist7: .long (exception_stack + CONFIG_EXCEPTION_STACK_SIZE), 0 - .long 0, 0 - .long 0 - /* * IDT. */ @@ -439,7 +428,7 @@ irq: */ incl _kernel_offset_to_nested(%rsi) - subq $CONFIG_ISR_SUBSTACK_SIZE, ist1 + subq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET cmpl $CONFIG_ISR_DEPTH, _kernel_offset_to_nested(%rsi) jz 1f sti @@ -518,7 +507,7 @@ irq_dispatch: movq $_kernel, %rsi cli - addq $CONFIG_ISR_SUBSTACK_SIZE, ist1 + addq $CONFIG_ISR_SUBSTACK_SIZE, %gs:__x86_tss64_t_ist1_OFFSET decl _kernel_offset_to_nested(%rsi) /* not nested, exit via __resume (might change threads) */ #ifdef CONFIG_STACK_SENTINEL @@ -597,7 +586,8 @@ pdp: .long 0x00000183 /* 0x183 = G, 1GB, R/W, P */ * The exception stack is used both for exceptions and early initialization. */ +.global _exception_stack .align 16 -exception_stack: +_exception_stack: .fill CONFIG_EXCEPTION_STACK_SIZE, 1, 0xAA diff --git a/arch/x86/core/intel64/tss.c b/arch/x86/core/intel64/tss.c new file mode 100644 index 00000000000..6e69ca7aa3b --- /dev/null +++ b/arch/x86/core/intel64/tss.c @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2019 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +extern u8_t _exception_stack[]; + +Z_GENERIC_SECTION(.tss) +struct x86_tss64 tss0 = { + .ist1 = (u64_t) _interrupt_stack + CONFIG_ISR_STACK_SIZE, + .ist7 = (u64_t) _exception_stack + CONFIG_EXCEPTION_STACK_SIZE, + + .iomapb = 0xFFFF /* no I/O access bitmap */ +}; diff --git a/arch/x86/core/offsets/intel64_offsets.c b/arch/x86/core/offsets/intel64_offsets.c index 74d864d6fc4..85f1a1613bf 100644 --- a/arch/x86/core/offsets/intel64_offsets.c +++ b/arch/x86/core/offsets/intel64_offsets.c @@ -27,3 +27,6 @@ GEN_OFFSET_SYM(_thread_arch_t, r9); GEN_OFFSET_SYM(_thread_arch_t, r10); GEN_OFFSET_SYM(_thread_arch_t, r11); GEN_OFFSET_SYM(_thread_arch_t, sse); + +GEN_OFFSET_SYM(x86_tss64_t, ist1); +GEN_ABSOLUTE_SYM(__X86_TSS64_SIZEOF, sizeof(x86_tss64_t)); diff --git a/arch/x86/core/offsets/offsets.c b/arch/x86/core/offsets/offsets.c index dbdd44fa4b4..b56ae2ffef0 100644 --- a/arch/x86/core/offsets/offsets.c +++ b/arch/x86/core/offsets/offsets.c @@ -5,6 +5,7 @@ #include #include +#include #include #ifdef CONFIG_X86_LONGMODE diff --git a/arch/x86/include/intel64/kernel_arch_data.h b/arch/x86/include/intel64/kernel_arch_data.h index 18c6ae5ea5b..aafd15087c8 100644 --- a/arch/x86/include/intel64/kernel_arch_data.h +++ b/arch/x86/include/intel64/kernel_arch_data.h @@ -22,6 +22,46 @@ #define X86_KERNEL_DS_32 0x10 /* 32-bit kernel data */ #define X86_KERNEL_CS_64 0x18 /* 64-bit kernel code */ #define X86_KERNEL_DS_64 0x20 /* 64-bit kernel data */ -#define X86_KERNEL_TSS 0x28 /* 64-bit task state segment */ + +#define X86_KERNEL_GS_64 0x30 /* data selector covering TSS */ +#define X86_KERNEL_TSS 0x40 /* 64-bit task state segment */ + +#ifndef _ASMLANGUAGE + +/* + * 64-bit Task State Segment. One defined per CPU. + */ + +struct x86_tss64 { + /* + * Architecturally-defined portion. It is somewhat tedious to + * enumerate each member specifically (rather than using arrays) + * but we need to get (some of) their offsets from assembly. + */ + + u8_t reserved0[4]; + + u64_t rsp0; /* privileged stacks */ + u64_t rsp1; + u64_t rsp2; + + u8_t reserved[8]; + + u64_t ist1; /* interrupt stacks */ + u64_t ist2; + u64_t ist3; + u64_t ist4; + u64_t ist5; + u64_t ist6; + u64_t ist7; + + u8_t reserved1[10]; + + u16_t iomapb; /* offset to I/O base */ +} __packed __aligned(8); + +typedef struct x86_tss64 x86_tss64_t; + +#endif /* _ASMLANGUAGE */ #endif /* ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_DATA_H_ */ diff --git a/include/arch/x86/intel64/linker.ld b/include/arch/x86/intel64/linker.ld index 230852a5e1d..df9dd0500ee 100644 --- a/include/arch/x86/intel64/linker.ld +++ b/include/arch/x86/intel64/linker.ld @@ -26,6 +26,7 @@ SECTIONS { *(.locore) *(.locore.*) + *(.tss) } /*