x86: organize 64-bit ESF

The callee-saved registers have been separated out and will not
be saved/restored if exception debugging is shut off.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-02-06 17:43:50 -08:00 committed by Anas Nashif
commit 768a30c14f
2 changed files with 53 additions and 38 deletions

View file

@ -347,7 +347,7 @@ except: /*
* already there from hardware trap and EXCEPT_*() stub. * already there from hardware trap and EXCEPT_*() stub.
*/ */
pushq %r15 pushq %r11
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
/* Swap GS register values and page tables if we came from user mode */ /* Swap GS register values and page tables if we came from user mode */
@ -356,8 +356,8 @@ except: /*
swapgs swapgs
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
/* Load kernel's page table */ /* Load kernel's page table */
movq $z_x86_kernel_ptables, %r15 movq $z_x86_kernel_ptables, %r11
movq %r15, %cr3 movq %r11, %cr3
#endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_X86_KPTI */
1: 1:
#ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION #ifdef CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION
@ -365,72 +365,77 @@ except: /*
lfence lfence
#endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */ #endif /* CONFIG_X86_BOUNDS_CHECK_BYPASS_MITIGATION */
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
/* Save old trampoline stack pointer in R15 */ /* Save old trampoline stack pointer in R11 */
movq %rsp, %r15 movq %rsp, %r11
/* Switch to the exception stack */ /* Switch to the exception stack */
movq %gs:__x86_tss64_t_ist7_OFFSET, %rsp movq %gs:__x86_tss64_t_ist7_OFFSET, %rsp
/* Transplant trampoline stack contents */ /* Transplant trampoline stack contents */
pushq 56(%r15) /* SS */ pushq 56(%r11) /* SS */
pushq 48(%r15) /* RSP */ pushq 48(%r11) /* RSP */
pushq 40(%r15) /* RFLAGS */ pushq 40(%r11) /* RFLAGS */
pushq 32(%r15) /* CS */ pushq 32(%r11) /* CS */
pushq 24(%r15) /* RIP */ pushq 24(%r11) /* RIP */
pushq 16(%r15) /* Error code */ pushq 16(%r11) /* Error code */
pushq 8(%r15) /* Vector */ pushq 8(%r11) /* Vector */
pushq (%r15) /* Stashed R15 */ pushq (%r11) /* Stashed R15 */
movq $0, (%r15) /* Cover our tracks */ movq $0, (%r11) /* Cover our tracks */
/* We're done, it's safe to re-enable interrupts. */ /* We're done, it's safe to re-enable interrupts. */
sti sti
#endif /* CONFIG_X86_KPTI */ #endif /* CONFIG_X86_KPTI */
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
/* In addition to r11, push the rest of the caller-saved regs */
/* Positioning of this fxsave is important, RSP must be 16-byte
* aligned
*/
subq $X86_FXSAVE_SIZE, %rsp subq $X86_FXSAVE_SIZE, %rsp
fxsave (%rsp) fxsave (%rsp)
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10 pushq %r10
pushq %r9 pushq %r9
pushq %r8 pushq %r8
pushq %rdi pushq %rdi
pushq %rsi pushq %rsi
pushq %rbp
pushq %rdx pushq %rdx
pushq %rcx pushq %rcx
pushq %rbx
pushq %rax pushq %rax
#ifdef CONFIG_EXCEPTION_DEBUG
/* Callee saved regs */
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbp
pushq %rbx
#endif /* CONFIG_EXCEPTION_DEBUG */
movq %rsp, %rdi movq %rsp, %rdi
/* TODO we don't need to push so many registers if we are not
* dumping out exception info since RBX, RBP, R12-R15 are callee-saved
*/
call z_x86_exception call z_x86_exception
/* If we returned, the exception was handled successfully and the /* If we returned, the exception was handled successfully and the
* thread may resume (the pushed RIP may have been modified) * thread may resume (the pushed RIP may have been modified)
*/ */
popq %rax #ifdef CONFIG_EXCEPTION_DEBUG
popq %rbx popq %rbx
popq %rbp
popq %r12
popq %r13
popq %r14
popq %r15
#endif /* CONFIG_EXCEPTION_DEBUG */
popq %rax
popq %rcx popq %rcx
popq %rdx popq %rdx
popq %rbp
popq %rsi popq %rsi
popq %rdi popq %rdi
popq %r8 popq %r8
popq %r9 popq %r9
popq %r10 popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
fxrstor (%rsp) fxrstor (%rsp)
addq $X86_FXSAVE_SIZE, %rsp addq $X86_FXSAVE_SIZE, %rsp
popq %r15 popq %r11
/* Drop the vector/err code pushed by the HW or EXCEPT_*() stub */ /* Drop the vector/err code pushed by the HW or EXCEPT_*() stub */
add $16, %rsp add $16, %rsp

View file

@ -32,22 +32,32 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
*/ */
struct x86_esf { struct x86_esf {
unsigned long rax; #ifdef CONFIG_EXCEPTION_DEBUG
/* callee-saved */
unsigned long rbx; unsigned long rbx;
unsigned long rbp;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
#endif /* CONFIG_EXCEPTION_DEBUG */
/* Caller-saved regs */
unsigned long rax;
unsigned long rcx; unsigned long rcx;
unsigned long rdx; unsigned long rdx;
unsigned long rbp;
unsigned long rsi; unsigned long rsi;
unsigned long rdi; unsigned long rdi;
unsigned long r8; unsigned long r8;
unsigned long r9; unsigned long r9;
unsigned long r10; unsigned long r10;
unsigned long r11; /* Must be aligned 16 bytes from the end of this struct due to
unsigned long r12; * requirements of 'fxsave (%rsp)'
unsigned long r13; */
unsigned long r14;
char fxsave[X86_FXSAVE_SIZE]; char fxsave[X86_FXSAVE_SIZE];
unsigned long r15; unsigned long r11;
/* Pushed by CPU or assembly stub */
unsigned long vector; unsigned long vector;
unsigned long code; unsigned long code;
unsigned long rip; unsigned long rip;