diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 19ce69b011b..143607a2295 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -226,6 +226,13 @@ config PRIVILEGED_STACK_SIZE # Must be multiple of CONFIG_MMU_PAGE_SIZE default 4096 +config XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP + bool "Flush all auto-refill data TLBs when swapping page tables" + depends on USERSPACE + help + This flushes (invalidates) all auto-refill data TLBs when page + tables are swapped. + endif # XTENSA_MMU endif # CPU_HAS_MMU diff --git a/arch/xtensa/core/ptables.c b/arch/xtensa/core/ptables.c index fa7d4fb0f70..b66a9bc30f1 100644 --- a/arch/xtensa/core/ptables.c +++ b/arch/xtensa/core/ptables.c @@ -1122,4 +1122,20 @@ int arch_buffer_validate(const void *addr, size_t size, int write) return mem_buffer_validate(addr, size, write, XTENSA_MMU_USER_RING); } +#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP +/* This is only used when swapping page tables and auto-refill DTLBs + * needing to be invalidated. Otherwise, SWAP_PAGE_TABLE assembly + * is used to avoid a function call. + */ +void xtensa_swap_update_page_tables(struct k_thread *incoming) +{ + struct arch_mem_domain *domain = + &(incoming->mem_domain_info.mem_domain->arch); + + xtensa_mmu_set_paging(domain); + + xtensa_dtlb_autorefill_invalidate(); +} +#endif + #endif /* CONFIG_USERSPACE */ diff --git a/arch/xtensa/core/userspace.S b/arch/xtensa/core/userspace.S index be143ed1f3a..56d53d59b0b 100644 --- a/arch/xtensa/core/userspace.S +++ b/arch/xtensa/core/userspace.S @@ -270,8 +270,12 @@ xtensa_userspace_enter: l32i a6, a1, 24 #ifdef CONFIG_XTENSA_MMU +#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP + call4 xtensa_swap_update_page_tables +#else SWAP_PAGE_TABLE a6, a3, a7 #endif +#endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif diff --git a/arch/xtensa/core/xtensa_asm2_util.S b/arch/xtensa/core/xtensa_asm2_util.S index 6892831f52a..c488aeee1ce 100644 --- a/arch/xtensa/core/xtensa_asm2_util.S +++ b/arch/xtensa/core/xtensa_asm2_util.S @@ -251,8 +251,12 @@ xtensa_switch: rsr a6, ZSR_CPU l32i a6, a6, ___cpu_t_current_OFFSET #ifdef CONFIG_XTENSA_MMU +#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP + call4 xtensa_swap_update_page_tables +#else SWAP_PAGE_TABLE a6, a4, a7 #endif +#endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif diff --git a/arch/xtensa/include/xtensa_asm2_s.h b/arch/xtensa/include/xtensa_asm2_s.h index 198f04f80e8..d7799ac1125 100644 --- a/arch/xtensa/include/xtensa_asm2_s.h +++ b/arch/xtensa/include/xtensa_asm2_s.h @@ -395,8 +395,12 @@ _xstack_call0_\@: l32i a6, a6, ___cpu_t_current_OFFSET #ifdef CONFIG_XTENSA_MMU +#ifdef CONFIG_XTENSA_MMU_FLUSH_AUTOREFILL_DTLBS_ON_SWAP + call4 xtensa_swap_update_page_tables +#else SWAP_PAGE_TABLE a6, a3, a7 #endif +#endif #ifdef CONFIG_XTENSA_MPU call4 xtensa_mpu_map_write #endif diff --git a/arch/xtensa/include/xtensa_mmu_priv.h b/arch/xtensa/include/xtensa_mmu_priv.h index b4491ff0bc3..7cd51f13293 100644 --- a/arch/xtensa/include/xtensa_mmu_priv.h +++ b/arch/xtensa/include/xtensa_mmu_priv.h @@ -364,6 +364,29 @@ static inline void xtensa_tlb_autorefill_invalidate(void) __asm__ volatile("isync"); } +/** + * @brief Invalidate all autorefill DTLB entries. + * + * This should be used carefully since all refill entries in the data + * TLBs are affected. The current stack page will be repopulated by + * this code as it returns. + */ +static inline void xtensa_dtlb_autorefill_invalidate(void) +{ + uint8_t way, i, entries; + + entries = BIT(XCHAL_DTLB_ARF_ENTRIES_LOG2); + + for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) { + for (i = 0; i < entries; i++) { + uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT); + + xtensa_dtlb_entry_invalidate(entry); + } + } + __asm__ volatile("isync"); +} + /** * @brief Set the page tables. *