x86: fix Spectre V1 index checks
We add two points where we add lfences to disable speculation: * In the memory buffer validation code, which takes memory addresses and sizes from userspace and determins whether this memory is actually accessible. * In the system call landing site, after the system call ID has been validated but before it is used. Kconfigs have been added to enable these checks if the CPU is not known to be immune on X86. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
00c2d5afb0
commit
82205e61e7
3 changed files with 24 additions and 4 deletions
|
@ -101,3 +101,13 @@ config X86_RETPOLINE
|
|||
description of how retpolines work can be found here[1].
|
||||
|
||||
[1] https://support.google.com/faqs/answer/7625886
|
||||
|
||||
config X86_BOUNDS_CHECK_BYPASS_MITIGATION
|
||||
bool
|
||||
depends on USERSPACE
|
||||
default y if !X86_NO_SPECTRE_V1
|
||||
select BOUNDS_CHECK_BYPASS_MITIGATION
|
||||
help
|
||||
Hidden config to select arch-independent option to enable
|
||||
Spectre V1 mitigations by default if the CPU is not known
|
||||
to be immune to it.
|
||||
|
|
|
@ -199,6 +199,10 @@ SECTION_FUNC(TEXT, _x86_syscall_entry_stub)
|
|||
jae _bad_syscall
|
||||
|
||||
_id_ok:
|
||||
#ifdef CONFIG_BOUNDS_CHECK_BYPASS_MITIGATION
|
||||
/* Prevent speculation with bogus system call IDs */
|
||||
lfence
|
||||
#endif
|
||||
/* Marshal arguments per calling convention to match what is expected
|
||||
* for _k_syscall_handler_t functions
|
||||
*/
|
||||
|
|
|
@ -76,6 +76,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
|
|||
u32_t end_pdpte_num = MMU_PDPTE_NUM((char *)addr + size - 1);
|
||||
u32_t pdpte;
|
||||
struct x86_mmu_pt *pte_address;
|
||||
int ret = -EPERM;
|
||||
|
||||
start_pde_num = MMU_PDE_NUM(addr);
|
||||
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
|
||||
|
@ -94,7 +95,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
|
|||
|
||||
/* Ensure page directory pointer table entry is present */
|
||||
if (X86_MMU_GET_PDPTE_INDEX(&USER_PDPT, pdpte)->p == 0) {
|
||||
return -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
struct x86_mmu_pd *pd_address =
|
||||
|
@ -111,7 +112,7 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
|
|||
if (!pde_value.p ||
|
||||
!pde_value.us ||
|
||||
(write && !pde_value.rw)) {
|
||||
return -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pte_address = (struct x86_mmu_pt *)
|
||||
|
@ -155,12 +156,17 @@ int _arch_buffer_validate(void *addr, size_t size, int write)
|
|||
if (!pte_value.p ||
|
||||
!pte_value.us ||
|
||||
(write && !pte_value.rw)) {
|
||||
return -EPERM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
#ifdef CONFIG_BOUNDS_CHECK_BYPASS_MITIGATION
|
||||
__asm__ volatile ("lfence" : : : "memory");
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void tlb_flush_page(void *addr)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue