xtensa: mmu: Fix possible race condition on tlb shootdown
We need to use the mmu spin lock when invalidating the cache during tlb shootdown, otherwise it is possible that this happens when another thread is updating the page tables. Signed-off-by: Flavio Ceolin <flavio.ceolin@intel.com> Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
156f1d4436
commit
9a33c400a1
1 changed files with 7 additions and 5 deletions
|
@ -828,11 +828,13 @@ void z_xtensa_mmu_tlb_shootdown(void)
|
|||
*/
|
||||
key = arch_irq_lock();
|
||||
|
||||
/* We don't have information on which page tables have changed,
|
||||
* so we just invalidate the cache for all L1 page tables.
|
||||
*/
|
||||
sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
|
||||
sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
|
||||
K_SPINLOCK(&xtensa_mmu_lock) {
|
||||
/* We don't have information on which page tables have changed,
|
||||
* so we just invalidate the cache for all L1 page tables.
|
||||
*/
|
||||
sys_cache_data_invd_range((void *)l1_page_table, sizeof(l1_page_table));
|
||||
sys_cache_data_invd_range((void *)l2_page_tables, sizeof(l2_page_tables));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_USERSPACE
|
||||
struct k_thread *thread = _current_cpu->current;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue