x86: implement eager FP save/restore

Speculative execution side channel attacks can read the
entire FPU/SIMD register state on affected Intel Core
processors, see CVE-2018-3665.

We now have two options for managing floating point
context between threads on x86: CONFIG_EAGER_FP_SHARING
and CONFIG_LAZY_FP_SHARING.

The mitigation is to unconditionally save/restore these
registers on context switch, instead of the lazy sharing
algorithm used by CONFIG_LAZY_FP_SHARING.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-03-02 13:15:16 -08:00 committed by Andrew Boie
commit 50d72ed9c9
11 changed files with 88 additions and 35 deletions

View file

@ -112,8 +112,8 @@ config X86_NO_SPECULATIVE_VULNERABILITIES
select X86_NO_SPECTRE_V4
select X86_NO_LAZY_FP
help
This hidden option should be set on a per-SOC basis that a
particular SOC does not perform any kind of speculative execution,
This hidden option should be set on a per-SOC basis to indicate that
a particular SOC does not perform any kind of speculative execution,
or is a newer chip which is immune to the class of vulnerabilities
which exploit speculative execution side channel attacks.
@ -182,6 +182,37 @@ config SSE_FP_MATH
Disabling this option means that the compiler utilizes only the
x87 instruction set for floating point operations.
config EAGER_FP_SHARING
bool
depends on FLOAT
depends on USERSPACE
default y if !X86_NO_LAZY_FP
help
This hidden option unconditionally saves/restores the FPU/SIMD
register state on every context switch.
Mitigates CVE-2018-3665, but incurs a performance hit.
For vulnerable systems that process sensitive information in the
FPU register set, should be used any time CONFIG_FLOAT is
enabled, regardless if the FPU is used by one thread or multiple.
config LAZY_FP_SHARING
bool
depends on FLOAT
depends on !EAGER_FP_SHARING
depends on FP_SHARING
default y if X86_NO_LAZY_FP || !USERSPACE
help
This hidden option allows multiple threads to use the floating point
registers, using logic to lazily save/restore the floating point
register state on context switch.
On Intel Core procesors, may be vulnerable to exploits which allows
malware to read the contents of all floating point registers, see
CVE-2018-3665.
endmenu
choice

View file

@ -26,11 +26,11 @@ zephyr_library_sources(
spec_ctrl.c
)
zephyr_library_sources_if_kconfig( irq_offload.c)
zephyr_library_sources_if_kconfig( x86_mmu.c)
zephyr_library_sources_if_kconfig( reboot_rst_cnt.c)
zephyr_library_sources_ifdef(CONFIG_FP_SHARING float.c)
zephyr_library_sources_ifdef(CONFIG_X86_USERSPACE userspace.S)
zephyr_library_sources_if_kconfig( irq_offload.c)
zephyr_library_sources_if_kconfig( x86_mmu.c)
zephyr_library_sources_if_kconfig( reboot_rst_cnt.c)
zephyr_library_sources_ifdef(CONFIG_LAZY_FP_SHARING float.c)
zephyr_library_sources_ifdef(CONFIG_X86_USERSPACE userspace.S)
# Last since we declare default exception handlers here
zephyr_library_sources(fatal.c)

View file

@ -125,7 +125,7 @@ SECTION_FUNC(TEXT, _exception_enter)
#endif
/* ESP is pointing to the ESF at this point */
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
movl _kernel + _kernel_offset_to_current, %edx
@ -142,7 +142,7 @@ SECTION_FUNC(TEXT, _exception_enter)
orb $_EXC_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
/*
* restore interrupt enable state, then call the handler
@ -172,7 +172,7 @@ allDone:
addl $0x4, %esp
#endif
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
movl _kernel + _kernel_offset_to_current, %ecx
@ -202,7 +202,7 @@ allDone:
andb $~_EXC_ACTIVE, _thread_offset_to_thread_state(%ecx)
nestedException:
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
/*
* Pop the non-volatile registers from the stack.

View file

@ -17,6 +17,10 @@
* safely by one or more cooperative threads OR by a single preemptive thread,
* but not by both.
*
* This code is not necessary for systems with CONFIG_EAGER_FP_SHARING, as
* the floating point context is unconditionally saved/restored with every
* context switch.
*
* The floating point register sharing mechanism is designed for minimal
* intrusiveness. Floating point state saving is only performed for threads
* that explicitly indicate they are using FPU registers, to avoid impacting
@ -43,10 +47,6 @@
#include <toolchain.h>
#include <asm_inline.h>
/* the entire library vanishes without the FP_SHARING option enabled */
#ifdef CONFIG_FP_SHARING
/* SSE control/status register default value (used by assembler code) */
extern u32_t _sse_mxcsr_default_value;
@ -244,5 +244,3 @@ void _FpNotAvailableExcHandler(NANO_ESF *pEsf)
k_float_enable(_current, _FP_USER_MASK);
}
_EXCEPTION_CONNECT_NOCODE(_FpNotAvailableExcHandler, IV_DEVICE_NOT_AVAILABLE);
#endif /* CONFIG_FP_SHARING */

View file

@ -260,7 +260,7 @@ alreadyOnIntStack:
* debug tools that a preemptive context switch has occurred.
*/
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
orb $_INT_ACTIVE, _thread_offset_to_thread_state(%edx)
#endif
@ -296,7 +296,7 @@ alreadyOnIntStack:
* returning control to it at the point where it was interrupted ...
*/
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
/*
* __swap() has restored the floating point registers, if needed.
* Clear the _INT_ACTIVE bit in the interrupted thread's state
@ -305,7 +305,7 @@ alreadyOnIntStack:
movl _kernel + _kernel_offset_to_current, %eax
andb $~_INT_ACTIVE, _thread_offset_to_thread_state(%eax)
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
/* Restore volatile registers and return to the interrupted thread */
#ifdef CONFIG_INT_LATENCY_BENCHMARK

View file

@ -32,7 +32,7 @@
#include <kernel_offsets.h>
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, excNestCount);
#endif

View file

@ -171,7 +171,31 @@ SECTION_FUNC(TEXT, __swap)
#endif
#endif
#ifdef CONFIG_FP_SHARING
#ifdef CONFIG_EAGER_FP_SHARING
/* Eager floating point state restore logic
*
* Addresses CVE-2018-3665
* Used as an alternate to CONFIG_LAZY_FP_SHARING if there is any
* sensitive data in the floating point/SIMD registers in a system
* with untrusted threads.
*
* Unconditionally save/restore floating point registers on context
* switch.
*/
/* Save outgpoing thread context */
#ifdef CONFIG_SSE
fxsave _thread_offset_to_preempFloatReg(%edx)
fninit
#else
fnsave _thread_offset_to_preempFloatReg(%edx)
#endif
/* Restore incoming thread context */
#ifdef CONFIG_SSE
fxrstor _thread_offset_to_preempFloatReg(%eax)
#else
frstor _thread_offset_to_preempFloatReg(%eax)
#endif /* CONFIG_SSE */
#elif defined(CONFIG_LAZY_FP_SHARING)
/*
* Clear the CR0[TS] bit (in the event the current thread
* doesn't have floating point enabled) to prevent the "device not
@ -325,7 +349,7 @@ restoreContext_NoFloatSwap:
CROHandlingDone:
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
/* update _kernel.current to reflect incoming thread */

View file

@ -122,9 +122,9 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
*/
thread->callee_saved.esp = (unsigned long)initial_frame;
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
thread->arch.excNestCount = 0;
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
}
#ifdef CONFIG_X86_USERSPACE

View file

@ -40,7 +40,7 @@ static inline unsigned int EflagsGet(void)
}
#ifdef CONFIG_FP_SHARING
#ifdef CONFIG_LAZY_FP_SHARING
/**
*
@ -133,7 +133,7 @@ static inline void _do_sse_regs_init(void)
}
#endif /* CONFIG_SSE */
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
#endif /* _ASMLANGUAGE */

View file

@ -64,9 +64,9 @@
/* end - states */
#if defined(CONFIG_FP_SHARING) && defined(CONFIG_SSE)
#if defined(CONFIG_LAZY_FP_SHARING) && defined(CONFIG_SSE)
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
#elif defined(CONFIG_FP_SHARING)
#elif defined(CONFIG_LAZY_FP_SHARING)
#define _FP_USER_MASK (K_FP_REGS)
#endif

View file

@ -91,14 +91,14 @@ struct _callee_saved {
typedef struct _callee_saved _callee_saved_t;
/*
* The macro CONFIG_FP_SHARING shall be set to indicate that the
* The macros CONFIG_{LAZY|EAGER}_FP_SHARING shall be set to indicate that the
* saving/restoring of the traditional x87 floating point (and MMX) registers
* are supported by the kernel's context swapping code. The macro
* CONFIG_SSE shall _also_ be set if saving/restoring of the XMM
* registers is also supported in the kernel's context swapping code.
*/
#ifdef CONFIG_FP_SHARING
#if defined(CONFIG_EAGER_FP_SHARING) || defined(CONFIG_LAZY_FP_SHARING)
/* definition of a single x87 (floating point / MMX) register */
@ -187,7 +187,7 @@ typedef struct s_FpRegSetEx {
#endif /* CONFIG_SSE == 0 */
#else /* CONFIG_FP_SHARING == 0 */
#else /* !CONFIG_LAZY_FP_SHARING && !CONFIG_EAGER_FP_SHARING */
/* empty floating point register definition */
@ -197,7 +197,7 @@ typedef struct s_FpRegSet {
typedef struct s_FpRegSetEx {
} tFpRegSetEx;
#endif /* CONFIG_FP_SHARING == 0 */
#endif /* CONFIG_LAZY_FP_SHARING || CONFIG_EAGER_FP_SHARING */
/*
* The following structure defines the set of 'non-volatile' x87 FPU/MMX/SSE
@ -241,14 +241,14 @@ typedef struct s_preempFloatReg {
struct _thread_arch {
#if defined(CONFIG_FP_SHARING)
#if defined(CONFIG_LAZY_FP_SHARING)
/*
* Nested exception count to maintain setting of EXC_ACTIVE flag across
* outermost exception. EXC_ACTIVE is used by z_swap() lazy FP
* save/restore and by debug tools.
*/
unsigned excNestCount; /* nested exception count */
#endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_LAZY_FP_SHARING */
/*
* The location of all floating point related structures/fields MUST be