xtensa: mmu: invalidate mem domain TLBs during page table swap

This adds a kconfig to enable invalidating the TLBs related to
the incoming thread's memory domain during page table swaps.
It provides a workaround, if needed, to clear out stale TLB
entries used by the thread being swapped out. Those stale
entries may contain incorrect permissions and rings.

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2023-12-01 16:51:46 -08:00 committed by Fabio Baltieri
commit fa25c0b0b8
2 changed files with 19 additions and 0 deletions

View file

@ -153,6 +153,12 @@ config XTENSA_MMU_DOUBLE_MAP
This option specifies that the memory is mapped in two
distinct region, cached and uncached.
config XTENSA_INVALIDATE_MEM_DOMAIN_TLB_ON_SWAP
bool
help
This invalidates all TLBs referred by the incoming thread's
memory domain when swapping page tables.
endif # XTENSA_MMU
config XTENSA_SYSCALL_USE_HELPER

View file

@ -1095,6 +1095,19 @@ void xtensa_swap_update_page_tables(struct k_thread *incoming)
&(incoming->mem_domain_info.mem_domain->arch);
xtensa_set_paging(domain->asid, ptables);
#ifdef CONFIG_XTENSA_INVALIDATE_MEM_DOMAIN_TLB_ON_SWAP
struct k_mem_domain *mem_domain = incoming->mem_domain_info.mem_domain;
for (int idx = 0; idx < mem_domain->num_partitions; idx++) {
struct k_mem_partition *part = &mem_domain->partitions[idx];
uintptr_t end = part->start + part->size;
for (uintptr_t addr = part->start; addr < end; addr += CONFIG_MMU_PAGE_SIZE) {
xtensa_dtlb_vaddr_invalidate((void *)addr);
}
}
#endif
}
#endif /* CONFIG_USERSPACE */