xtensa: allow flushing auto-refill DTLBs on page table swap

This adds a new kconfig and corresponding code to allow flushing
auto-refill data TLBs when page tables are swapped (e.g. during
context switching). This is mainly used to avoid multi-hit TLB
exception raised by certain memory access pattern. If memory is
only marked for user mode access but not inside a memory domain,
accessing that page in kernel mode would result in a TLB being
filled with kernel ASID. When going back into user mode, access
to the memory would result in another TLB being filled with
the user mode ASID. Now there are two entries on the same memory
page, and the multi-hit TLB exception will be raised if that
memory page is accessed. This type of access is better served
using memory partition and memory domain to share data. However,
this type of access is not prohibited but highly discouraged.
Wrapping the code in kconfig is simply because of the execution
penalty as there will be unnecessary TLB refilling being done.
So only enable this if necessary.

Fixes #88772

Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
Daniel Leung 2025-05-01 14:28:05 -07:00 committed by Benjamin Cabé
commit d31ee53b60
6 changed files with 58 additions and 0 deletions

View file

@ -226,6 +226,13 @@ config PRIVILEGED_STACK_SIZE
# Must be multiple of CONFIG_MMU_PAGE_SIZE
default 4096
config XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
bool "Flush all auto-refill data TLBs when swapping page tables"
depends on USERSPACE
help
This flushes (invalidates) all auto-refill data TLBs when page
tables are swapped.
endif # XTENSA_MMU
endif # CPU_HAS_MMU

View file

@ -1122,4 +1122,20 @@ int arch_buffer_validate(const void *addr, size_t size, int write)
return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING);
}
#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
/* This is only used when swapping page tables and auto-refill DTLBs
* needing to be invalidated. Otherwise, SWAP_PAGE_TABLE assembly
* is used to avoid a function call.
*/
void xtensa_swap_update_page_tables(struct k_thread *incoming)
{
struct arch_mem_domain *domain =
&(incoming->mem_domain_info.mem_domain->arch);
xtensa_mmu_set_paging(domain);
xtensa_dtlb_autorefill_invalidate();
}
#endif
#endif /* CONFIG_USERSPACE */

View file

@ -270,8 +270,12 @@ xtensa_userspace_enter:
l32i a6, a1, 24
#ifdef CONFIG_XTENSA_MMU
#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
call4 xtensa_swap_update_page_tables
#else
SWAP_PAGE_TABLE a6, a3, a7
#endif
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif

View file

@ -251,8 +251,12 @@ xtensa_switch:
rsr a6, ZSR_CPU
l32i a6, a6, ___cpu_t_current_OFFSET
#ifdef CONFIG_XTENSA_MMU
#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
call4 xtensa_swap_update_page_tables
#else
SWAP_PAGE_TABLE a6, a4, a7
#endif
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif

View file

@ -395,8 +395,12 @@ _xstack_call0_\@:
l32i a6, a6, ___cpu_t_current_OFFSET
#ifdef CONFIG_XTENSA_MMU
#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP
call4 xtensa_swap_update_page_tables
#else
SWAP_PAGE_TABLE a6, a3, a7
#endif
#endif
#ifdef CONFIG_XTENSA_MPU
call4 xtensa_mpu_map_write
#endif

View file

@ -364,6 +364,29 @@ static inline void xtensa_tlb_autorefill_invalidate(void)
__asm__ volatile("isync");
}
/**
* @brief Invalidate all autorefill DTLB entries.
*
* This should be used carefully since all refill entries in the data
* TLBs are affected. The current stack page will be repopulated by
* this code as it returns.
*/
static inline void xtensa_dtlb_autorefill_invalidate(void)
{
uint8_t way, i, entries;
entries = BIT(XCHAL_DTLB_ARF_ENTRIES_LOG2);
for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) {
for (i = 0; i < entries; i++) {
uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT);
xtensa_dtlb_entry_invalidate(entry);
}
}
__asm__ volatile("isync");
}
/**
* @brief Set the page tables.
*