x86: fix arch_user_mode_enter locking
This function iterates over the thread's memory domain and updates page tables based on it. We need to be holding z_mem_domain_lock while this happens. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
348a0fda62
commit
f6c64e92ce
2 changed files with 8 additions and 1 deletions
|
@ -110,8 +110,11 @@ void *z_x86_userspace_prepare_thread(struct k_thread *thread)
|
|||
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
||||
void *p1, void *p2, void *p3)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
|
||||
z_x86_thread_pt_init(_current);
|
||||
|
||||
key = k_spin_lock(&z_mem_domain_lock);
|
||||
/* Apply memory domain configuration, if assigned. Threads that
|
||||
* started in user mode already had this done via z_setup_new_thread()
|
||||
*/
|
||||
|
@ -119,6 +122,7 @@ FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
|
|||
z_x86_apply_mem_domain(_current,
|
||||
_current->mem_domain_info.mem_domain);
|
||||
}
|
||||
k_spin_unlock(&z_mem_domain_lock, key);
|
||||
|
||||
#ifndef CONFIG_X86_KPTI
|
||||
/* We're synchronously dropping into user mode from a thread that
|
||||
|
|
|
@ -119,7 +119,10 @@ extern uint8_t z_shared_kernel_page_start;
|
|||
/* Set up per-thread page tables just prior to entering user mode */
|
||||
void z_x86_thread_pt_init(struct k_thread *thread);
|
||||
|
||||
/* Apply a memory domain policy to a set of thread page tables */
|
||||
/* Apply a memory domain policy to a set of thread page tables.
|
||||
*
|
||||
* Must be called with z_mem_domain_lock held.
|
||||
*/
|
||||
void z_x86_apply_mem_domain(struct k_thread *thread,
|
||||
struct k_mem_domain *mem_domain);
|
||||
#endif /* CONFIG_USERSPACE */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue