x86: MMU: Memory domain implementation for x86

Added support for memory domain implementation.

GH-3852

Signed-off-by: Adithya Baglody <adithya.nagaraj.baglody@intel.com>
This commit is contained in:
Adithya Baglody 2017-10-09 11:16:53 +05:30 committed by Andrew Boie
commit f7b0731ce4
3 changed files with 174 additions and 0 deletions

View file

@ -204,3 +204,107 @@ void _x86_mmu_set_flags(void *ptr,
addr += MMU_PAGE_SIZE;
}
}
#ifdef CONFIG_X86_USERSPACE
/* Helper macros needed to be passed to x86_update_mem_domain_pages */
#define X86_MEM_DOMAIN_SET_PAGES (0U)
#define X86_MEM_DOMAIN_RESET_PAGES (1U)
/* Pass 1 to page_conf if reset of mem domain pages is needed else pass a 0*/
static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
u32_t page_conf)
{
u32_t partition_index;
u32_t total_partitions;
struct k_mem_partition partition;
u32_t partitions_count;
/* If mem_domain doesn't point to a valid location return.*/
if (mem_domain == NULL) {
goto out;
}
/* Get the total number of partitions*/
total_partitions = mem_domain->num_partitions;
/* Iterate over all the partitions for the given mem_domain
* For x86: interate over all the partitions and set the
* required flags in the correct MMU page tables.
*/
partitions_count = 0;
for (partition_index = 0;
partitions_count < total_partitions;
partition_index++) {
/* Get the partition info */
partition = mem_domain->partitions[partition_index];
if (partition.size == 0) {
continue;
}
partitions_count++;
if (page_conf == X86_MEM_DOMAIN_SET_PAGES) {
/* Set the partition attributes */
_x86_mmu_set_flags((void *)partition.start,
partition.size,
partition.attr,
K_MEM_PARTITION_PERM_MASK);
} else {
/* Reset the pages to supervisor RW only */
_x86_mmu_set_flags((void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
}
}
out:
return;
}
/* Load the required parttions of the new incoming thread */
void _x86_mmu_mem_domain_load(struct k_thread *thread)
{
_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_SET_PAGES);
}
/* Destroy or reset the mmu page tables when necessary.
* Needed when either swap takes place or k_mem_domain_destroy is called.
*/
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
{
_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
}
/* Reset/destroy one partition spcified in the argument of the API. */
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
u32_t total_partitions;
struct k_mem_partition partition;
if (domain == NULL) {
goto out;
}
total_partitions = domain->num_partitions;
__ASSERT(partition_id <= total_partitions,
"invalid partitions");
partition = domain->partitions[partition_id];
_x86_mmu_set_flags((void *)partition.start,
partition.size,
K_MEM_PARTITION_P_RW_U_NA,
K_MEM_PARTITION_PERM_MASK);
out:
return;
}
u8_t _arch_mem_domain_max_partitions_get(void)
{
return CONFIG_MAX_DOMAIN_PARTITIONS;
}
#endif /* CONFIG_X86_USERSPACE*/

View file

@ -243,6 +243,61 @@
#endif /* CONFIG_X86_PAE_MODE */
#ifdef CONFIG_X86_USERSPACE
/* Flags which are only available for PAE mode page tables */
#ifdef CONFIG_X86_PAE_MODE
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | \
MMU_ENTRY_USER | \
MMU_ENTRY_EXECUTE_DISABLE)
#define K_MEM_PARTITION_P_RW_U_NA (MMU_ENTRY_WRITE | \
MMU_ENTRY_SUPERVISOR | \
MMU_ENTRY_EXECUTE_DISABLE)
#define K_MEM_PARTITION_P_RO_U_RO (MMU_ENTRY_READ | \
MMU_ENTRY_USER | \
MMU_ENTRY_EXECUTE_DISABLE)
#define K_MEM_PARTITION_P_RO_U_NA (MMU_ENTRY_READ | \
MMU_ENTRY_SUPERVISOR | \
MMU_ENTRY_EXECUTE_DISABLE)
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX (MMU_ENTRY_WRITE | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RWX_U_NA (MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR)
#define K_MEM_PARTITION_P_RX_U_RX (MMU_ENTRY_READ | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RX_U_NA (MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR)
/* memory partition access permission mask */
#define K_MEM_PARTITION_PERM_MASK (MMU_PTE_RW_MASK |\
MMU_PTE_US_MASK |\
MMU_PTE_XD_MASK)
#else /* 32-bit paging mode enabled */
/* memory partition arch/soc independent attribute */
#define K_MEM_PARTITION_P_RW_U_RW (MMU_ENTRY_WRITE | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RW_U_NA (MMU_ENTRY_WRITE | MMU_ENTRY_SUPERVISOR)
#define K_MEM_PARTITION_P_RO_U_RO (MMU_ENTRY_READ | MMU_ENTRY_USER)
#define K_MEM_PARTITION_P_RO_U_NA (MMU_ENTRY_READ | MMU_ENTRY_SUPERVISOR)
/* memory partition access permission mask */
#define K_MEM_PARTITION_PERM_MASK (MMU_PTE_RW_MASK | MMU_PTE_US_MASK)
#endif /* CONFIG_X86_PAE_MODE */
#endif /* CONFIG_X86_USERSPACE */
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
@ -685,6 +740,8 @@ typedef u64_t x86_page_entry_data_t;
typedef u32_t x86_page_entry_data_t;
#endif
typedef x86_page_entry_data_t k_mem_partition_attr_t;
#ifdef CONFIG_X86_PAE_MODE
struct x86_mmu_page_directory_pointer {
union x86_mmu_pae_pdpte entry[512];

View file

@ -807,11 +807,24 @@ void _x86_mmu_get_flags(void *addr,
* @mask Mask indicating which particular bits in the page table entries to
* modify
*/
void _x86_mmu_set_flags(void *ptr,
size_t size,
x86_page_entry_data_t flags,
x86_page_entry_data_t mask);
#ifdef CONFIG_USERSPACE
/**
* @brief Load the memory domain for the thread.
*
* If the memory domain is used this API will configure the page tables
* according to the memory domain partition attributes.
*
* @param thread k_thread structure for the thread which is to configured.
*/
void _x86_mmu_mem_domain_load(struct k_thread *thread);
#endif
#endif /* CONFIG_X86_MMU */
#endif /* !_ASMLANGUAGE */