x86: mitigate L1 Terminal Fault vulnerability

During speculative execution, non-present pages are treated
as valid, which may expose their contents through side
channels.

Any non-present PTE will now have its address bits zeroed,
such that any speculative reads to them will go to the NULL
page.

The expected hit on performance is so minor that this is
enabled at all times.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-03-01 11:42:03 -08:00 committed by Andrew Boie
commit 6c8825fc96
2 changed files with 21 additions and 2 deletions

View file

@ -186,12 +186,29 @@ void _x86_mmu_set_flags(struct x86_mmu_pdpt *pdpt, void *ptr,
__ASSERT(!(addr & MMU_PAGE_MASK), "unaligned address provided");
__ASSERT(!(size & MMU_PAGE_MASK), "unaligned size provided");
/* L1TF mitigation: non-present PTEs will have address fields
* zeroed. Expand the mask to include address bits if we are changing
* the present bit.
*/
if ((mask & MMU_PTE_P_MASK) != 0) {
mask |= MMU_PTE_PAGE_MASK;
}
while (size != 0) {
/* TODO we're not generating 2MB entries at the moment */
__ASSERT(X86_MMU_GET_PDE(pdpt, addr)->ps != 1, "2MB PDE found");
pte = X86_MMU_GET_PTE(pdpt, addr);
/* If we're setting the present bit, restore the address
* field. If we're clearing it, then the address field
* will be zeroed instead, mapping the PTE to the NULL page.
*/
if (((mask & MMU_PTE_P_MASK) != 0) &&
((flags & MMU_ENTRY_PRESENT) != 0)) {
flags |= addr;
}
pte->value = (pte->value & ~mask) | flags;
tlb_flush_page((void *)addr);

View file

@ -195,10 +195,12 @@ class PageMode_PAE:
else:
present = PAGE_ENTRY_PRESENT
binary_value = (present | read_write | user_mode | xd)
# L1TF mitigation: map non-present pages to the NULL page
if present:
binary_value |= page_table;
binary_value = (present | read_write | user_mode | page_table | xd)
return binary_value
def clean_up_unused_pdpte(self):