x86: MMU: Memory domain implementation for x86

Added support for memory domain implementation.

GH-3852

Signed-off-by: Adithya Baglody <adithya.nagaraj.baglody@intel.com>
This commit is contained in:
Adithya Baglody 2017-10-09 11:16:53 +05:30 committed by Andrew Boie
commit f7b0731ce4
3 changed files with 174 additions and 0 deletions

View file

@ -204,3 +204,107 @@ void _x86_mmu_set_flags(void *ptr,
addr += MMU_PAGE_SIZE;
}
}
#ifdef CONFIG_X86_USERSPACE
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
#define X86_MEM_DOMAIN_SET_PAGES (0U)
#define X86_MEM_DOMAIN_RESET_PAGES (1U)
/* Pass 1 to page_conf if reset of mem domain pages is needed else pass a 0*/
static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
u32_t page_conf)
{
u32_t partition_index;
u32_t total_partitions;
struct k_mem_partition partition;
u32_t partitions_count;
/* If mem_domain doesn't point to a valid location return.*/
if (mem_domain == NULL) {
goto out;
}
/* Get the total number of partitions*/
total_partitions = mem_domain->num_partitions;
/* Iterate over all the partitions for the given mem_domain
* For x86: interate over all the partitions and set the
* required flags in the correct MMU page tables.
*/
partitions_count = 0;
for (partition_index = 0;
partitions_count < total_partitions;
partition_index++) {
/* Get the partition info */
partition = mem_domain->partitions[partition_index];
if (partition.size == 0) {
continue;
}
partitions_count++;
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
/* Set the partition attributes */
_x86_mmu_set_flags((void *)partition.start,
partition.size,
partition.attr,
K_MEM_PARTITION_PERM_MASK);
} else {
/* Reset the pages to supervisor RW only */
_x86_mmu_set_flags((void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
}
}
out:
return;
}
/* Load the required parttions of the new incoming thread */
void _x86_mmu_mem_domain_load(struct k_thread *thread)
{
_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_SET_PAGES);
}
/* Destroy or reset the mmu page tables when necessary.
* Needed when either swap takes place or k_mem_domain_destroy is called.
*/
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
{
_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
}
/* Reset/destroy one partition spcified in the argument of the API. */
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
u32_t total_partitions;
struct k_mem_partition partition;
if (domain == NULL) {
goto out;
}
total_partitions = domain->num_partitions;
__ASSERT(partition_id <= total_partitions,
"invalid partitions");
partition = domain->partitions[partition_id];
_x86_mmu_set_flags((void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
out:
return;
}
u8_t _arch_mem_domain_max_partitions_get(void)
{
return CONFIG_MAX_DOMAIN_PARTITIONS;
}
#endif /* CONFIG_X86_USERSPACE*/