diff --git a/arch/x86/core/userspace.c b/arch/x86/core/userspace.c index 4f82d6e466d..3df7087afd4 100644 --- a/arch/x86/core/userspace.c +++ b/arch/x86/core/userspace.c @@ -110,8 +110,11 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread) FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3) { + k_spinlock_key_t key; + z_x86_thread_pt_init(_current); + key = k_spin_lock(&z_mem_domain_lock); /* Apply memory domain configuration, if assigned. Threads that * started in user mode already had this done via z_setup_new_thread() */ @@ -119,6 +122,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, z_x86_apply_mem_domain(_current, _current->mem_domain_info.mem_domain); } + k_spin_unlock(&z_mem_domain_lock, key); #ifndef CONFIG_X86_KPTI /* We're synchronously dropping into user mode from a thread that diff --git a/arch/x86/include/x86_mmu.h b/arch/x86/include/x86_mmu.h index 158c765ba51..6a81d0c5be4 100644 --- a/arch/x86/include/x86_mmu.h +++ b/arch/x86/include/x86_mmu.h @@ -119,7 +119,10 @@ extern uint8_t z_shared_kernel_page_start; /* Set up per-thread page tables just prior to entering user mode */ void z_x86_thread_pt_init(struct k_thread *thread); -/* Apply a memory domain policy to a set of thread page tables */ +/* Apply a memory domain policy to a set of thread page tables. + * + * Must be called with z_mem_domain_lock held. + */ void z_x86_apply_mem_domain(struct k_thread *thread, struct k_mem_domain *mem_domain); #endif /* CONFIG_USERSPACE */