x86: remove retpoline code

This code:

1) Doesn't work
2) Hasn't ever been enabled by default
3) We mitigate Spectre V2 via Extended IBRS anyway

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-12-18 15:11:59 -08:00 committed by Anas Nashif
commit 07c278382a
6 changed files with 4 additions and 61 deletions

View file

@ -43,16 +43,6 @@ endmenu
menu "Processor Capabilities" menu "Processor Capabilities"
config X86_RETPOLINE
bool "Build with retpolines enabled in x86 assembly code"
depends on USERSPACE
help
This is recommended on platforms with speculative executions, to
protect against branch target injection (AKA Spectre-V2). Full
description of how retpolines work can be found here[1].
[1] https://support.google.com/faqs/answer/7625886
config X86_ENABLE_TSS config X86_ENABLE_TSS
bool bool
help help

View file

@ -161,7 +161,7 @@ SECTION_FUNC(TEXT, _exception_enter)
allDone: allDone:
pushl %esp /* push z_arch_esf_t * parameter */ pushl %esp /* push z_arch_esf_t * parameter */
INDIRECT_CALL(%ecx) /* call exception handler */ call *%ecx /* call exception handler */
addl $0x4, %esp addl $0x4, %esp
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FP_SHARING)

View file

@ -196,7 +196,7 @@ alreadyOnIntStack:
sti /* re-enable interrupts */ sti /* re-enable interrupts */
#endif #endif
/* Now call the interrupt handler */ /* Now call the interrupt handler */
INDIRECT_CALL(%edx) call *%edx
/* Discard ISR argument */ /* Discard ISR argument */
addl $0x4, %esp addl $0x4, %esp
#ifdef CONFIG_NESTED_INTERRUPTS #ifdef CONFIG_NESTED_INTERRUPTS

View file

@ -417,5 +417,5 @@ time_read_not_needed:
SECTION_FUNC(TEXT, z_x86_thread_entry_wrapper) SECTION_FUNC(TEXT, z_x86_thread_entry_wrapper)
movl $0, (%esp) movl $0, (%esp)
INDIRECT_JMP(%edi) jmp *%edi
#endif /* _THREAD_WRAPPER_REQUIRED */ #endif /* _THREAD_WRAPPER_REQUIRED */

View file

@ -212,7 +212,7 @@ _id_ok:
mov _k_syscall_table(%edi, %esi, 4), %ebx mov _k_syscall_table(%edi, %esi, 4), %ebx
/* Run the handler, which is some entry in _k_syscall_table */ /* Run the handler, which is some entry in _k_syscall_table */
INDIRECT_CALL(%ebx) call *%ebx
/* EAX now contains return value. Pop or xor everything else to prevent /* EAX now contains return value. Pop or xor everything else to prevent
* information leak from kernel mode. * information leak from kernel mode.

View file

@ -14,53 +14,6 @@
#if defined(_ASMLANGUAGE) #if defined(_ASMLANGUAGE)
#if defined(CONFIG_X86_RETPOLINE)
/*
* For a description of how retpolines are constructed for both indirect
* jumps and indirect calls, please refer to this documentation:
* https://support.google.com/faqs/answer/7625886
*
* Since these macros are used in a few places in arch/x86/core assembly
* routines, with different reg parameters, it's not possible to use
* the "out of line" construction technique to share a trampoline.
*/
#define INDIRECT_JMP_IMPL(reg, id) \
call .set_up_target ## id; \
.speculative_trap ## id: \
pause; \
jmp .speculative_trap ## id; \
.set_up_target ## id: \
mov reg, (%esp); \
ret
#define INDIRECT_CALL_IMPL(reg, id) \
call .set_up_return ## id; \
.inner_indirect_branch ## id: \
call .set_up_target ## id; \
.speculative_trap ## id: \
pause; \
jmp .speculative_trap ## id; \
.set_up_target ## id: \
mov reg, (%esp); \
ret; \
.set_up_return ## id: \
call .inner_indirect_branch ## id
#define INDIRECT_CALL_IMPL1(reg, id) INDIRECT_CALL_IMPL(reg, id)
#define INDIRECT_JMP_IMPL1(reg, id) INDIRECT_JMP_IMPL(reg, id)
#define INDIRECT_CALL(reg) INDIRECT_CALL_IMPL1(reg, __COUNTER__)
#define INDIRECT_JMP(reg) INDIRECT_JMP_IMPL1(reg, __COUNTER__)
#else
#define INDIRECT_CALL(reg) call *reg
#define INDIRECT_JMP(reg) jmp *reg
#endif /* CONFIG_X86_RETPOLINE */
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
GTEXT(z_x86_trampoline_to_user) GTEXT(z_x86_trampoline_to_user)
GTEXT(z_x86_trampoline_to_kernel) GTEXT(z_x86_trampoline_to_kernel)