x86: smp: add TLB shootdown logic

This will be needed when we support memory un-mapping, or
the same user mode page tables on multiple CPUs. Neither
are implemented yet.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-10-15 14:43:29 -07:00 committed by Anas Nashif
commit d31f62a955
5 changed files with 67 additions and 15 deletions

View file

@ -43,6 +43,12 @@ config SCHED_IPI_VECTOR
range 33 255
depends on SMP
config TLB_IPI_VECTOR
int "IDT vector to use for TLB shootdown IPI"
default 35
range 33 255
depends on SMP
# We should really only have to provide one of the following two values,
# but a bug in the Zephyr SDK for x86 precludes the use of division in
# the assembler. For now, we require that these values be specified manually,

View file

@ -11,6 +11,7 @@
#include <drivers/interrupt_controller/loapic.h>
#include <irq.h>
#include <logging/log.h>
#include <x86_mmu.h>
LOG_MODULE_DECLARE(os);
@ -130,6 +131,9 @@ void z_x86_ipi_setup(void)
x86_irq_funcs[CONFIG_SCHED_IPI_VECTOR - IV_IRQS] =
(void *) z_sched_ipi;
/* TLB shootdown handling */
x86_irq_funcs[CONFIG_TLB_IPI_VECTOR - IV_IRQS] = z_x86_tlb_ipi;
}
/*

View file

@ -12,20 +12,6 @@
#include <x86_mmu.h>
#ifndef CONFIG_X86_KPTI
/* Set CR3 to a physical address. There must be a valid top-level paging
* structure here or the CPU will triple fault. The incoming page tables must
* have the same kernel mappings wrt supervisor mode. Don't use this function
* unless you know exactly what you are doing.
*/
static inline void cr3_set(uintptr_t phys)
{
#ifdef CONFIG_X86_64
__asm__ volatile("movq %0, %%cr3\n\t" : : "r" (phys) : "memory");
#else
__asm__ volatile("movl %0, %%cr3\n\t" : : "r" (phys) : "memory");
#endif
}
/* Update the to the incoming thread's page table, and update the location of
* the privilege elevation stack.
*
@ -61,7 +47,7 @@ void z_x86_swap_update_page_tables(struct k_thread *incoming)
ptables_phys = incoming->arch.ptables;
if (ptables_phys != z_x86_cr3_get()) {
cr3_set(ptables_phys);
z_x86_cr3_set(ptables_phys);
}
}
#endif /* CONFIG_X86_KPTI */

View file

@ -17,6 +17,7 @@
#include <x86_mmu.h>
#include <init.h>
#include <kernel_internal.h>
#include <drivers/interrupt_controller/loapic.h>
LOG_MODULE_DECLARE(os);
@ -219,6 +220,42 @@ static inline void tlb_flush_page(void *addr)
/* TODO: Need to implement TLB shootdown for SMP */
}
#if defined(CONFIG_SMP)
void z_x86_tlb_ipi(const void *arg)
{
uintptr_t ptables;
ARG_UNUSED(arg);
#ifdef CONFIG_X86_KPTI
/* We're always on the kernel's set of page tables in this context
* if KPTI is turned on
*/
ptables = z_x86_cr3_get();
__ASSERT(ptables == (uintptr_t)&z_x86_kernel_ptables, "");
#else
/* We might have been moved to another memory domain, so always invoke
* z_x86_thread_page_tables_get() instead of using current CR3 value.
*/
ptables = (uintptr_t)z_x86_thread_page_tables_get(_current);
#endif
/*
* In the future, we can consider making this smarter, such as
* propagating which page tables were modified (in case they are
* not active on this CPU) or an address range to call
* tlb_flush_page() on.
*/
LOG_DBG("%s on CPU %d\n", __func__, arch_curr_cpu()->id);
z_x86_cr3_set(ptables);
}
static inline void tlb_shootdown(void)
{
z_loapic_ipi(0, LOAPIC_ICR_IPI_OTHERS, CONFIG_TLB_IPI_VECTOR);
}
#endif /* CONFIG_SMP */
static inline void assert_addr_aligned(uintptr_t addr)
{
#if __ASSERT_ON

View file

@ -127,6 +127,20 @@ void z_x86_apply_mem_domain(struct k_thread *thread,
struct k_mem_domain *mem_domain);
#endif /* CONFIG_USERSPACE */
/* Set CR3 to a physical address. There must be a valid top-level paging
* structure here or the CPU will triple fault. The incoming page tables must
* have the same kernel mappings wrt supervisor mode. Don't use this function
* unless you know exactly what you are doing.
*/
static inline void z_x86_cr3_set(uintptr_t phys)
{
#ifdef CONFIG_X86_64
__asm__ volatile("movq %0, %%cr3\n\t" : : "r" (phys) : "memory");
#else
__asm__ volatile("movl %0, %%cr3\n\t" : : "r" (phys) : "memory");
#endif
}
/* Return cr3 value, which is the physical (not virtual) address of the
* current set of page tables
*/
@ -161,4 +175,9 @@ static inline pentry_t *z_x86_thread_page_tables_get(struct k_thread *thread)
return z_x86_kernel_ptables;
#endif
}
#ifdef CONFIG_SMP
/* Handling function for TLB shootdown inter-processor interrupts. */
void z_x86_tlb_ipi(const void *arg);
#endif
#endif /* ZEPHYR_ARCH_X86_INCLUDE_X86_MMU_H */