From cdb9ac3895ac13b88ad693ba1c59b7c6316f6060 Mon Sep 17 00:00:00 2001 From: "Charles E. Youse" Date: Mon, 15 Jul 2019 13:18:36 -0700 Subject: [PATCH] arch/x86: Add exception reporting code for Intel64 Fleshed out z_arch_esf_t and added code to build this frame when exceptions occur. Created a separate small stack for exceptions and shifted the initialization code to use this instead of the IRQ stack. Moved IRQ stack(s) to irq.c. Signed-off-by: Charles E. Youse --- arch/x86/core/Kconfig.intel64 | 9 +++ arch/x86/core/intel64.cmake | 3 +- arch/x86/core/intel64/fatal.c | 41 ++++++++++ arch/x86/core/intel64/{irq_manage.c => irq.c} | 6 ++ arch/x86/core/intel64/locore.S | 80 ++++++++++++------- arch/x86/include/intel64/kernel_arch_thread.h | 4 +- include/arch/x86/intel64/arch.h | 25 +++++- 7 files changed, 135 insertions(+), 33 deletions(-) create mode 100644 arch/x86/core/intel64/fatal.c rename arch/x86/core/intel64/{irq_manage.c => irq.c} (96%) diff --git a/arch/x86/core/Kconfig.intel64 b/arch/x86/core/Kconfig.intel64 index 0662362635a..5c5b9f15235 100644 --- a/arch/x86/core/Kconfig.intel64 +++ b/arch/x86/core/Kconfig.intel64 @@ -23,4 +23,13 @@ config SYSTEM_WORKQUEUE_STACK_SIZE config OFFLOAD_WORKQUEUE_STACK_SIZE default 8192 +config EXCEPTION_STACK_SIZE + int "Size of the exception stack(s)" + default 1024 + help + The exception stack(s) (one per CPU) are used both for exception + processing and early kernel/CPU initialization. They need only + support limited call-tree depth and must fit into the low core, + so they are typically smaller than the ISR stacks. + endif # X86_LONGMODE diff --git a/arch/x86/core/intel64.cmake b/arch/x86/core/intel64.cmake index a3577a07542..31fe2ace92d 100644 --- a/arch/x86/core/intel64.cmake +++ b/arch/x86/core/intel64.cmake @@ -10,6 +10,7 @@ set_property(SOURCE intel64/locore.S PROPERTY LANGUAGE ASM) zephyr_library_sources( intel64/locore.S - intel64/irq_manage.c + intel64/irq.c intel64/thread.c + intel64/fatal.c ) diff --git a/arch/x86/core/intel64/fatal.c b/arch/x86/core/intel64/fatal.c new file mode 100644 index 00000000000..1cf628532c5 --- /dev/null +++ b/arch/x86/core/intel64/fatal.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2019 Intel Corporation + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +LOG_MODULE_DECLARE(os); + +void z_x86_fatal_error(unsigned int reason, const z_arch_esf_t *esf) +{ + if (esf != NULL) { + LOG_ERR("RIP=%016lx RSP=%016lx RFLAGS=%016lx\n", + esf->rip, esf->rsp, esf->rflags); + + LOG_ERR("RAX=%016lx RBX=%016lx RCX=%016lx RDX=%016lx\n", + esf->rax, esf->rbx, esf->rcx, esf->rdx); + + LOG_ERR("RSI=%016lx RDI=%016lx RBP=%016lx RSP=%016lx\n", + esf->rsi, esf->rdi, esf->rbp, esf->rsp); + + LOG_ERR("R8=%016lx R9=%016lx R10=%016lx R11=%016lx\n", + esf->r8, esf->r9, esf->r10, esf->r11); + + LOG_ERR("R12=%016lx R13=%016lx R14=%016lx R15=%016lx\n", + esf->r12, esf->r13, esf->r14, esf->r15); + } + + z_fatal_error(reason, esf); +} + +void z_x86_exception(const z_arch_esf_t *esf) +{ + LOG_ERR("** CPU Exception %ld (code %ld/0x%lx) **\n", + esf->vector, esf->code, esf->code); + + z_x86_fatal_error(K_ERR_CPU_EXCEPTION, esf); +} diff --git a/arch/x86/core/intel64/irq_manage.c b/arch/x86/core/intel64/irq.c similarity index 96% rename from arch/x86/core/intel64/irq_manage.c rename to arch/x86/core/intel64/irq.c index 6ba4ea4039b..ca7d786bd7a 100644 --- a/arch/x86/core/intel64/irq_manage.c +++ b/arch/x86/core/intel64/irq.c @@ -26,6 +26,12 @@ unsigned char _irq_to_interrupt_vector[CONFIG_MAX_IRQ_LINES]; void (*x86_irq_funcs[NR_IRQ_VECTORS])(void *); void *x86_irq_args[NR_IRQ_VECTORS]; +/* + * Interrupt stack. + */ + +char __aligned(STACK_ALIGN) ist[CONFIG_ISR_STACK_SIZE]; + /* * Find a free IRQ vector at the specified priority, or return -1 if none left. */ diff --git a/arch/x86/core/intel64/locore.S b/arch/x86/core/intel64/locore.S index f49a1383641..3035a28f669 100644 --- a/arch/x86/core/intel64/locore.S +++ b/arch/x86/core/intel64/locore.S @@ -9,6 +9,7 @@ #include #include #include +#include .section .locore,"ax" .code32 @@ -50,7 +51,7 @@ __start: movw %ax, %fs movw %ax, %gs - movl $(ist1 + CONFIG_ISR_STACK_SIZE), %esp + movl $(exception_stack + CONFIG_EXCEPTION_STACK_SIZE), %esp /* transition to long mode, by the book. */ @@ -156,13 +157,13 @@ __swap: movq %r14, _thread_offset_to_r14(%rsi) movq %r15, _thread_offset_to_r15(%rsi) - movq $(ist1 + CONFIG_ISR_STACK_SIZE), %rsp + movq $(ist + CONFIG_ISR_STACK_SIZE), %rsp /* fall through to __resume */ /* * Entry: - * RSP = top of ist1 + * RSP = top of ist */ __resume: @@ -226,22 +227,23 @@ gdt48: .long gdt /* - * TSS - no privilege transitions (yet) so only used for IST1. + * TSS - no privilege transitions (yet) so only used for + * interrupt (IST1) and exception stack (IST7) locations. */ .align 8 tss: .long 0 - .long 0, 0 /* RSP0 */ - .long 0, 0 /* RSP1 */ - .long 0, 0 /* RSP2 */ + .long 0, 0 /* RSP0 */ .long 0, 0 - .long (ist1 + CONFIG_ISR_STACK_SIZE), 0 /* IST1 */ - .long 0, 0 /* IST2 */ - .long 0, 0 /* IST3 */ - .long 0, 0 /* IST4 */ - .long 0, 0 /* IST5 */ - .long 0, 0 /* IST6 */ - .long 0, 0 /* IST7 */ + .long 0, 0 + .long 0, 0 + .long (ist + CONFIG_ISR_STACK_SIZE), 0 /* IST1 */ + .long 0, 0 + .long 0, 0 + .long 0, 0 + .long 0, 0 + .long 0, 0 + .long (exception_stack + CONFIG_EXCEPTION_STACK_SIZE), 0 /* IST7 */ .long 0, 0 .long 0 @@ -259,14 +261,14 @@ tss: .long 0 .align 16 idt: - IDT( 0, TRAP, 0); IDT( 1, TRAP, 0); IDT( 2, TRAP, 0); IDT( 3, TRAP, 0) - IDT( 4, TRAP, 0); IDT( 5, TRAP, 0); IDT( 6, TRAP, 0); IDT( 7, TRAP, 0) - IDT( 8, TRAP, 0); IDT( 9, TRAP, 0); IDT( 10, TRAP, 0); IDT( 11, TRAP, 0) - IDT( 12, TRAP, 0); IDT( 13, TRAP, 0); IDT( 14, TRAP, 0); IDT( 15, TRAP, 0) - IDT( 16, TRAP, 0); IDT( 17, TRAP, 0); IDT( 18, TRAP, 0); IDT( 19, TRAP, 0) - IDT( 20, TRAP, 0); IDT( 21, TRAP, 0); IDT( 22, TRAP, 0); IDT( 23, TRAP, 0) - IDT( 24, TRAP, 0); IDT( 25, TRAP, 0); IDT( 26, TRAP, 0); IDT( 27, TRAP, 0) - IDT( 28, TRAP, 0); IDT( 29, TRAP, 0); IDT( 30, TRAP, 0); IDT( 31, TRAP, 0) + IDT( 0, TRAP, 7); IDT( 1, TRAP, 7); IDT( 2, TRAP, 7); IDT( 3, TRAP, 7) + IDT( 4, TRAP, 7); IDT( 5, TRAP, 7); IDT( 6, TRAP, 7); IDT( 7, TRAP, 7) + IDT( 8, TRAP, 7); IDT( 9, TRAP, 7); IDT( 10, TRAP, 7); IDT( 11, TRAP, 7) + IDT( 12, TRAP, 7); IDT( 13, TRAP, 7); IDT( 14, TRAP, 7); IDT( 15, TRAP, 7) + IDT( 16, TRAP, 7); IDT( 17, TRAP, 7); IDT( 18, TRAP, 7); IDT( 19, TRAP, 7) + IDT( 20, TRAP, 7); IDT( 21, TRAP, 7); IDT( 22, TRAP, 7); IDT( 23, TRAP, 7) + IDT( 24, TRAP, 7); IDT( 25, TRAP, 7); IDT( 26, TRAP, 7); IDT( 27, TRAP, 7) + IDT( 28, TRAP, 7); IDT( 29, TRAP, 7); IDT( 30, TRAP, 7); IDT( 31, TRAP, 7) IDT( 32, INTR, 1); IDT( 33, INTR, 1); IDT( 34, INTR, 1); IDT( 35, INTR, 1) IDT( 36, INTR, 1); IDT( 37, INTR, 1); IDT( 38, INTR, 1); IDT( 39, INTR, 1) @@ -332,9 +334,31 @@ idt48: #define EXCEPT_CODE(nr) vector_ ## nr: pushq $nr; jmp except #define EXCEPT(nr) vector_ ## nr: pushq $0; pushq $nr; jmp except -except: - /* save registers and dispatch to x86_exception() */ - hlt +except: /* + * finish struct NANO_ESF on stack. 'vector' .. 'ss' are + * already there from hardware trap and EXCEPT_*() stub. + */ + + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %r11 + pushq %r10 + pushq %r9 + pushq %r8 + pushq %rdi + pushq %rsi + pushq %rbp + pushq %rdx + pushq %rcx + pushq %rbx + pushq %rax + + movq %rsp, %rdi + call z_x86_exception + + hlt /* should not return, but just in case .. */ EXCEPT ( 0); EXCEPT ( 1); EXCEPT ( 2); EXCEPT ( 3) EXCEPT ( 4); EXCEPT ( 5); EXCEPT ( 6); EXCEPT ( 7) @@ -472,10 +496,10 @@ pdp: .long 0x00000183 /* 0x183 = G, 1GB, R/W, P */ .fill 4064, 1, 0 /* - * IST1 is used both during IRQ processing and early kernel initialization. + * The exception stack is used both for exceptions and early initialization. */ .align 16 -ist1: - .fill CONFIG_ISR_STACK_SIZE, 1, 0xFF +exception_stack: + .fill CONFIG_EXCEPTION_STACK_SIZE, 1, 0xAA diff --git a/arch/x86/include/intel64/kernel_arch_thread.h b/arch/x86/include/intel64/kernel_arch_thread.h index 869772458cf..4e28675f6ac 100644 --- a/arch/x86/include/intel64/kernel_arch_thread.h +++ b/arch/x86/include/intel64/kernel_arch_thread.h @@ -6,10 +6,10 @@ #ifndef ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_ #define ZEPHYR_ARCH_X86_INCLUDE_INTEL64_KERNEL_ARCH_THREAD_H_ -#include - #ifndef _ASMLANGUAGE +#include + /* * The _callee_saved registers are unconditionally saved/restored across * context switches; the _thread_arch registers are only preserved when diff --git a/include/arch/x86/intel64/arch.h b/include/arch/x86/intel64/arch.h index 981582ae3a8..7a4d70af9ed 100644 --- a/include/arch/x86/intel64/arch.h +++ b/include/arch/x86/intel64/arch.h @@ -43,11 +43,32 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) } /* - * Bogus ESF stuff until I figure out what to with it. I suspect - * this is legacy cruft that we'll want to excise sometime soon, anyway. + * the exception stack frame */ struct x86_esf { + unsigned long rax; + unsigned long rbx; + unsigned long rcx; + unsigned long rdx; + unsigned long rbp; + unsigned long rsi; + unsigned long rdi; + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long vector; + unsigned long code; + unsigned long rip; + unsigned long cs; + unsigned long rflags; + unsigned long rsp; + unsigned long ss; }; typedef struct x86_esf z_arch_esf_t;