kconfig: Rename x86 FPU sharing symbols

This commit renames the x86 Kconfig `CONFIG_{EAGER,LAZY}_FP_SHARING`
symbol to `CONFIG_{EAGER,LAZY}_FPU_SHARING`, in order to align with the
recent `CONFIG_FP_SHARING` to `CONFIG_FPU_SHARING` renaming.

Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
This commit is contained in:
Stephanos Ioannidis 2020-05-03 18:18:37 +09:00 committed by Ioannis Glaropoulos
commit 0b930a2195
12 changed files with 34 additions and 34 deletions

View file

@ -98,7 +98,7 @@ config SSE_FP_MATH
Disabling this option means that the compiler utilizes only the Disabling this option means that the compiler utilizes only the
x87 instruction set for floating point operations. x87 instruction set for floating point operations.
config EAGER_FP_SHARING config EAGER_FPU_SHARING
bool bool
depends on FPU depends on FPU
depends on USERSPACE depends on USERSPACE
@ -113,10 +113,10 @@ config EAGER_FP_SHARING
FPU register set, should be used any time CONFIG_FPU is FPU register set, should be used any time CONFIG_FPU is
enabled, regardless if the FPU is used by one thread or multiple. enabled, regardless if the FPU is used by one thread or multiple.
config LAZY_FP_SHARING config LAZY_FPU_SHARING
bool bool
depends on FPU depends on FPU
depends on !EAGER_FP_SHARING depends on !EAGER_FPU_SHARING
depends on FPU_SHARING depends on FPU_SHARING
default y if X86_NO_LAZY_FP || !USERSPACE default y if X86_NO_LAZY_FP || !USERSPACE
help help

View file

@ -21,7 +21,7 @@ zephyr_library_sources(
zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD ia32/irq_offload.c) zephyr_library_sources_ifdef(CONFIG_IRQ_OFFLOAD ia32/irq_offload.c)
zephyr_library_sources_ifdef(CONFIG_X86_USERSPACE ia32/userspace.S) zephyr_library_sources_ifdef(CONFIG_X86_USERSPACE ia32/userspace.S)
zephyr_library_sources_ifdef(CONFIG_LAZY_FP_SHARING ia32/float.c) zephyr_library_sources_ifdef(CONFIG_LAZY_FPU_SHARING ia32/float.c)
# Last since we declare default exception handlers here # Last since we declare default exception handlers here
zephyr_library_sources(ia32/fatal.c) zephyr_library_sources(ia32/fatal.c)

View file

@ -124,7 +124,7 @@ SECTION_FUNC(TEXT, _exception_enter)
#endif #endif
/* ESP is pointing to the ESF at this point */ /* ESP is pointing to the ESF at this point */
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
movl _kernel + _kernel_offset_to_current, %edx movl _kernel + _kernel_offset_to_current, %edx
@ -141,7 +141,7 @@ SECTION_FUNC(TEXT, _exception_enter)
orb $X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%edx) orb $X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%edx)
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
/* /*
* restore interrupt enable state, then call the handler * restore interrupt enable state, then call the handler
@ -164,7 +164,7 @@ allDone:
call *%ecx /* call exception handler */ call *%ecx /* call exception handler */
addl $0x4, %esp addl $0x4, %esp
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
movl _kernel + _kernel_offset_to_current, %ecx movl _kernel + _kernel_offset_to_current, %ecx
@ -194,7 +194,7 @@ allDone:
andb $~X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%ecx) andb $~X86_THREAD_FLAG_EXC, _thread_offset_to_flags(%ecx)
nestedException: nestedException:
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
/* /*
* Pop the non-volatile registers from the stack. * Pop the non-volatile registers from the stack.

View file

@ -17,7 +17,7 @@
* safely by one or more cooperative threads OR by a single preemptive thread, * safely by one or more cooperative threads OR by a single preemptive thread,
* but not by both. * but not by both.
* *
* This code is not necessary for systems with CONFIG_EAGER_FP_SHARING, as * This code is not necessary for systems with CONFIG_EAGER_FPU_SHARING, as
* the floating point context is unconditionally saved/restored with every * the floating point context is unconditionally saved/restored with every
* context switch. * context switch.
* *

View file

@ -232,7 +232,7 @@ alreadyOnIntStack:
* debug tools that a preemptive context switch has occurred. * debug tools that a preemptive context switch has occurred.
*/ */
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
orb $X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx) orb $X86_THREAD_FLAG_INT, _thread_offset_to_flags(%edx)
#endif #endif
@ -261,7 +261,7 @@ alreadyOnIntStack:
* returning control to it at the point where it was interrupted ... * returning control to it at the point where it was interrupted ...
*/ */
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
/* /*
* arch_swap() has restored the floating point registers, if needed. * arch_swap() has restored the floating point registers, if needed.
* Clear X86_THREAD_FLAG_INT in the interrupted thread's state * Clear X86_THREAD_FLAG_INT in the interrupted thread's state
@ -270,7 +270,7 @@ alreadyOnIntStack:
movl _kernel + _kernel_offset_to_current, %eax movl _kernel + _kernel_offset_to_current, %eax
andb $~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax) andb $~X86_THREAD_FLAG_INT, _thread_offset_to_flags(%eax)
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
/* Restore volatile registers and return to the interrupted thread */ /* Restore volatile registers and return to the interrupted thread */
popl %edi popl %edi

View file

@ -142,11 +142,11 @@ SECTION_FUNC(TEXT, arch_swap)
*/ */
#endif #endif
#ifdef CONFIG_EAGER_FP_SHARING #ifdef CONFIG_EAGER_FPU_SHARING
/* Eager floating point state restore logic /* Eager floating point state restore logic
* *
* Addresses CVE-2018-3665 * Addresses CVE-2018-3665
* Used as an alternate to CONFIG_LAZY_FP_SHARING if there is any * Used as an alternate to CONFIG_LAZY_FPU_SHARING if there is any
* sensitive data in the floating point/SIMD registers in a system * sensitive data in the floating point/SIMD registers in a system
* with untrusted threads. * with untrusted threads.
* *
@ -166,7 +166,7 @@ SECTION_FUNC(TEXT, arch_swap)
#else #else
frstor _thread_offset_to_preempFloatReg(%eax) frstor _thread_offset_to_preempFloatReg(%eax)
#endif /* CONFIG_SSE */ #endif /* CONFIG_SSE */
#elif defined(CONFIG_LAZY_FP_SHARING) #elif defined(CONFIG_LAZY_FPU_SHARING)
/* /*
* Clear the CR0[TS] bit (in the event the current thread * Clear the CR0[TS] bit (in the event the current thread
* doesn't have floating point enabled) to prevent the "device not * doesn't have floating point enabled) to prevent the "device not
@ -320,7 +320,7 @@ restoreContext_NoFloatSwap:
CROHandlingDone: CROHandlingDone:
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
/* update _kernel.current to reflect incoming thread */ /* update _kernel.current to reflect incoming thread */

View file

@ -51,11 +51,11 @@ extern int z_float_disable(struct k_thread *thread);
int arch_float_disable(struct k_thread *thread) int arch_float_disable(struct k_thread *thread)
{ {
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
return z_float_disable(thread); return z_float_disable(thread);
#else #else
return -ENOSYS; return -ENOSYS;
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
} }
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */ #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
@ -110,8 +110,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* doesn't care about their state when execution begins * doesn't care about their state when execution begins
*/ */
thread->callee_saved.esp = (unsigned long)initial_frame; thread->callee_saved.esp = (unsigned long)initial_frame;
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
thread->arch.excNestCount = 0; thread->arch.excNestCount = 0;
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
thread->arch.flags = 0; thread->arch.flags = 0;
} }

View file

@ -26,7 +26,7 @@
#include <arch/x86/mmustructs.h> #include <arch/x86/mmustructs.h>
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
GEN_OFFSET_SYM(_thread_arch_t, excNestCount); GEN_OFFSET_SYM(_thread_arch_t, excNestCount);
#endif #endif

View file

@ -44,9 +44,9 @@
#define _THREAD_WRAPPER_REQUIRED #define _THREAD_WRAPPER_REQUIRED
#endif #endif
#if defined(CONFIG_LAZY_FP_SHARING) && defined(CONFIG_SSE) #if defined(CONFIG_LAZY_FPU_SHARING) && defined(CONFIG_SSE)
#define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS) #define _FP_USER_MASK (K_FP_REGS | K_SSE_REGS)
#elif defined(CONFIG_LAZY_FP_SHARING) #elif defined(CONFIG_LAZY_FPU_SHARING)
#define _FP_USER_MASK (K_FP_REGS) #define _FP_USER_MASK (K_FP_REGS)
#endif #endif

View file

@ -73,14 +73,14 @@ struct _callee_saved {
typedef struct _callee_saved _callee_saved_t; typedef struct _callee_saved _callee_saved_t;
/* /*
* The macros CONFIG_{LAZY|EAGER}_FP_SHARING shall be set to indicate that the * The macros CONFIG_{LAZY|EAGER}_FPU_SHARING shall be set to indicate that the
* saving/restoring of the traditional x87 floating point (and MMX) registers * saving/restoring of the traditional x87 floating point (and MMX) registers
* are supported by the kernel's context swapping code. The macro * are supported by the kernel's context swapping code. The macro
* CONFIG_SSE shall _also_ be set if saving/restoring of the XMM * CONFIG_SSE shall _also_ be set if saving/restoring of the XMM
* registers is also supported in the kernel's context swapping code. * registers is also supported in the kernel's context swapping code.
*/ */
#if defined(CONFIG_EAGER_FP_SHARING) || defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_EAGER_FPU_SHARING) || defined(CONFIG_LAZY_FPU_SHARING)
/* definition of a single x87 (floating point / MMX) register */ /* definition of a single x87 (floating point / MMX) register */
@ -169,7 +169,7 @@ typedef struct s_FpRegSetEx {
#endif /* CONFIG_SSE == 0 */ #endif /* CONFIG_SSE == 0 */
#else /* !CONFIG_LAZY_FP_SHARING && !CONFIG_EAGER_FP_SHARING */ #else /* !CONFIG_LAZY_FPU_SHARING && !CONFIG_EAGER_FPU_SHARING */
/* empty floating point register definition */ /* empty floating point register definition */
@ -179,7 +179,7 @@ typedef struct s_FpRegSet {
typedef struct s_FpRegSetEx { typedef struct s_FpRegSetEx {
} tFpRegSetEx; } tFpRegSetEx;
#endif /* CONFIG_LAZY_FP_SHARING || CONFIG_EAGER_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING || CONFIG_EAGER_FPU_SHARING */
/* /*
* The following structure defines the set of 'volatile' x87 FPU/MMX/SSE * The following structure defines the set of 'volatile' x87 FPU/MMX/SSE
@ -221,14 +221,14 @@ struct _thread_arch {
char *psp; char *psp;
#endif #endif
#if defined(CONFIG_LAZY_FP_SHARING) #if defined(CONFIG_LAZY_FPU_SHARING)
/* /*
* Nested exception count to maintain setting of EXC_ACTIVE flag across * Nested exception count to maintain setting of EXC_ACTIVE flag across
* outermost exception. EXC_ACTIVE is used by z_swap() lazy FP * outermost exception. EXC_ACTIVE is used by z_swap() lazy FP
* save/restore and by debug tools. * save/restore and by debug tools.
*/ */
unsigned excNestCount; /* nested exception count */ unsigned excNestCount; /* nested exception count */
#endif /* CONFIG_LAZY_FP_SHARING */ #endif /* CONFIG_LAZY_FPU_SHARING */
/* /*
* The location of all floating point related structures/fields MUST be * The location of all floating point related structures/fields MUST be

View file

@ -33,7 +33,7 @@ static void usr_fp_thread_entry_1(void)
} }
#if defined(CONFIG_ARM) || defined(CONFIG_RISCV) || \ #if defined(CONFIG_ARM) || defined(CONFIG_RISCV) || \
(defined(CONFIG_X86) && defined(CONFIG_LAZY_FP_SHARING)) (defined(CONFIG_X86) && defined(CONFIG_LAZY_FPU_SHARING))
#define K_FLOAT_DISABLE_SYSCALL_RETVAL 0 #define K_FLOAT_DISABLE_SYSCALL_RETVAL 0
#else #else
#define K_FLOAT_DISABLE_SYSCALL_RETVAL -ENOSYS #define K_FLOAT_DISABLE_SYSCALL_RETVAL -ENOSYS
@ -87,7 +87,7 @@ void test_k_float_disable_common(void)
zassert_true( zassert_true(
(usr_fp_thread.base.user_options & K_FP_OPTS) != 0, (usr_fp_thread.base.user_options & K_FP_OPTS) != 0,
"usr_fp_thread FP options cleared"); "usr_fp_thread FP options cleared");
#elif defined(CONFIG_X86) && defined(CONFIG_LAZY_FP_SHARING) #elif defined(CONFIG_X86) && defined(CONFIG_LAZY_FPU_SHARING)
zassert_true((k_float_disable(&usr_fp_thread) == 0), zassert_true((k_float_disable(&usr_fp_thread) == 0),
"k_float_disable() failure"); "k_float_disable() failure");
@ -96,7 +96,7 @@ void test_k_float_disable_common(void)
(usr_fp_thread.base.user_options & K_FP_OPTS) == 0, (usr_fp_thread.base.user_options & K_FP_OPTS) == 0,
"usr_fp_thread FP options not clear (0x%0x)", "usr_fp_thread FP options not clear (0x%0x)",
usr_fp_thread.base.user_options); usr_fp_thread.base.user_options);
#elif defined(CONFIG_X86) && !defined(CONFIG_LAZY_FP_SHARING) #elif defined(CONFIG_X86) && !defined(CONFIG_LAZY_FPU_SHARING)
/* Verify k_float_disable() is not supported */ /* Verify k_float_disable() is not supported */
zassert_true((k_float_disable(&usr_fp_thread) == -ENOSYS), zassert_true((k_float_disable(&usr_fp_thread) == -ENOSYS),
"k_float_disable() successful when not supported"); "k_float_disable() successful when not supported");
@ -131,7 +131,7 @@ void test_k_float_disable_syscall(void)
k_yield(); k_yield();
#if defined(CONFIG_ARM) || defined(CONFIG_RISCV) || \ #if defined(CONFIG_ARM) || defined(CONFIG_RISCV) || \
(defined(CONFIG_X86) && defined(CONFIG_LAZY_FP_SHARING)) (defined(CONFIG_X86) && defined(CONFIG_LAZY_FPU_SHARING))
/* Verify K_FP_OPTS are now cleared by the user thread itself */ /* Verify K_FP_OPTS are now cleared by the user thread itself */
zassert_true( zassert_true(

View file

@ -182,7 +182,7 @@ static void load_store_low(void)
* After every 1000 iterations (arbitrarily chosen), explicitly * After every 1000 iterations (arbitrarily chosen), explicitly
* disable floating point operations for the task. * disable floating point operations for the task.
*/ */
#if (defined(CONFIG_X86) && defined(CONFIG_LAZY_FP_SHARING)) || \ #if (defined(CONFIG_X86) && defined(CONFIG_LAZY_FPU_SHARING)) || \
defined(CONFIG_ARMV7_M_ARMV8_M_FP) defined(CONFIG_ARMV7_M_ARMV8_M_FP)
/* /*
* In x86: * In x86: