userspace: make mem domain lock non-static

Strictly speaking, any access to a mem domain or its
containing partitions should be serialized on this lock.

Architecture code may need to grab this lock if it is
using this data during, for example, context switches,
especially if they support SMP as locking interrupts
is not enough.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-10-06 15:53:43 -07:00 committed by Andrew Boie
commit 348a0fda62
2 changed files with 27 additions and 15 deletions

View file

@ -132,6 +132,18 @@ void z_mem_domain_init_thread(struct k_thread *thread);
/* Memory domain teardown hook, called from z_thread_single_abort() */
void z_mem_domain_exit_thread(struct k_thread *thread);
/* This spinlock:
*
* - Protects the full set of active k_mem_domain objects and their contents
* - Serializes calls to arch_mem_domain_* APIs
*
* If architecture code needs to access k_mem_domain structures or the
* partitions they contain at any other point, this spinlock should be held.
* Uniprocessor systems can get away with just locking interrupts but this is
* not recommended.
*/
extern struct k_spinlock z_mem_domain_lock;
#endif /* CONFIG_USERSPACE */
#ifdef CONFIG_GDBSTUB

View file

@ -17,7 +17,7 @@
#include <logging/log.h>
LOG_MODULE_DECLARE(os);
static struct k_spinlock lock;
struct k_spinlock z_mem_domain_lock;
static uint8_t max_partitions;
struct k_mem_domain k_mem_domain_default;
@ -100,7 +100,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
"num_parts of %d exceeds maximum allowable partitions (%d)",
num_parts, max_partitions);
key = k_spin_lock(&lock);
key = k_spin_lock(&z_mem_domain_lock);
domain->num_partitions = 0U;
(void)memset(domain->partitions, 0, sizeof(domain->partitions));
@ -134,7 +134,7 @@ void k_mem_domain_init(struct k_mem_domain *domain, uint8_t num_parts,
}
#endif
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
void k_mem_domain_add_partition(struct k_mem_domain *domain,
@ -147,7 +147,7 @@ void k_mem_domain_add_partition(struct k_mem_domain *domain,
__ASSERT(check_add_partition(domain, part),
"invalid partition %p", part);
key = k_spin_lock(&lock);
key = k_spin_lock(&z_mem_domain_lock);
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
/* A zero-sized partition denotes it's a free partition */
@ -171,7 +171,7 @@ void k_mem_domain_add_partition(struct k_mem_domain *domain,
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
arch_mem_domain_partition_add(domain, p_idx);
#endif
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
void k_mem_domain_remove_partition(struct k_mem_domain *domain,
@ -183,7 +183,7 @@ void k_mem_domain_remove_partition(struct k_mem_domain *domain,
__ASSERT_NO_MSG(domain != NULL);
__ASSERT_NO_MSG(part != NULL);
key = k_spin_lock(&lock);
key = k_spin_lock(&z_mem_domain_lock);
/* find a partition that matches the given start and size */
for (p_idx = 0; p_idx < max_partitions; p_idx++) {
@ -207,7 +207,7 @@ void k_mem_domain_remove_partition(struct k_mem_domain *domain,
domain->num_partitions--;
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
static void add_thread_locked(struct k_mem_domain *domain,
@ -241,29 +241,29 @@ static void remove_thread_locked(struct k_thread *thread)
/* Called from thread object initialization */
void z_mem_domain_init_thread(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&lock);
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
/* New threads inherit memory domain configuration from parent */
add_thread_locked(_current->mem_domain_info.mem_domain, thread);
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
/* Called when thread aborts during teardown tasks. sched_spinlock is held */
void z_mem_domain_exit_thread(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&lock);
k_spinlock_key_t key = k_spin_lock(&z_mem_domain_lock);
remove_thread_locked(thread);
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
void k_mem_domain_add_thread(struct k_mem_domain *domain, k_tid_t thread)
{
k_spinlock_key_t key;
key = k_spin_lock(&lock);
key = k_spin_lock(&z_mem_domain_lock);
remove_thread_locked(thread);
add_thread_locked(domain, thread);
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
void k_mem_domain_remove_thread(k_tid_t thread)
@ -280,7 +280,7 @@ void k_mem_domain_destroy(struct k_mem_domain *domain)
__ASSERT(domain != &k_mem_domain_default,
"cannot destroy default domain");
key = k_spin_lock(&lock);
key = k_spin_lock(&z_mem_domain_lock);
#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
arch_mem_domain_destroy(domain);
@ -294,7 +294,7 @@ void k_mem_domain_destroy(struct k_mem_domain *domain)
add_thread_locked(&k_mem_domain_default, thread);
}
k_spin_unlock(&lock, key);
k_spin_unlock(&z_mem_domain_lock, key);
}
static int init_mem_domain_module(const struct device *arg)