2017-07-17 12:07:56 +05:30
|
|
|
/*
|
|
|
|
* Copyright (c) 2011-2014 Wind River Systems, Inc.
|
|
|
|
* Copyright (c) 2017 Intel Corporation
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
2017-08-01 15:22:06 -07:00
|
|
|
#include <kernel.h>
|
|
|
|
#include <mmustructs.h>
|
|
|
|
#include <linker/linker-defs.h>
|
2017-07-17 12:07:56 +05:30
|
|
|
|
2017-08-01 15:22:06 -07:00
|
|
|
/* Common regions for all x86 processors.
|
|
|
|
* Peripheral I/O ranges configured at the SOC level
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Mark text and rodata as read-only.
|
|
|
|
* Userspace may read all text and rodata.
|
|
|
|
*/
|
|
|
|
MMU_BOOT_REGION((u32_t)&_image_rom_start, (u32_t)&_image_rom_size,
|
|
|
|
MMU_ENTRY_READ | MMU_ENTRY_USER);
|
|
|
|
|
|
|
|
#ifdef CONFIG_APPLICATION_MEMORY
|
|
|
|
/* User threads by default can read/write app-level memory. */
|
|
|
|
MMU_BOOT_REGION((u32_t)&__app_ram_start, (u32_t)&__app_ram_size,
|
|
|
|
MMU_ENTRY_WRITE | MMU_ENTRY_USER);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* __kernel_ram_size includes all unused memory, which is used for heaps.
|
|
|
|
* User threads cannot access this unless granted at runtime. This is done
|
|
|
|
* automatically for stacks.
|
|
|
|
*/
|
|
|
|
MMU_BOOT_REGION((u32_t)&__kernel_ram_start, (u32_t)&__kernel_ram_size,
|
|
|
|
MMU_ENTRY_WRITE | MMU_ENTRY_RUNTIME_USER);
|
|
|
|
|
2017-07-17 12:07:56 +05:30
|
|
|
|
2017-08-01 13:04:43 -07:00
|
|
|
void _x86_mmu_get_flags(void *addr, u32_t *pde_flags, u32_t *pte_flags)
|
|
|
|
{
|
|
|
|
|
2017-08-23 11:56:20 +05:30
|
|
|
*pde_flags = X86_MMU_GET_PDE(addr)->value & ~MMU_PDE_PAGE_TABLE_MASK;
|
2017-10-16 16:01:33 -07:00
|
|
|
if (*pde_flags & MMU_ENTRY_PRESENT) {
|
|
|
|
*pte_flags = X86_MMU_GET_PTE(addr)->value & ~MMU_PTE_PAGE_MASK;
|
|
|
|
} else {
|
|
|
|
*pte_flags = 0;
|
|
|
|
}
|
2017-08-01 13:04:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-11 13:40:21 -07:00
|
|
|
int _arch_buffer_validate(void *addr, size_t size, int write)
|
2017-07-17 12:07:56 +05:30
|
|
|
{
|
|
|
|
u32_t start_pde_num;
|
|
|
|
u32_t end_pde_num;
|
|
|
|
u32_t starting_pte_num;
|
|
|
|
u32_t ending_pte_num;
|
|
|
|
struct x86_mmu_page_table *pte_address;
|
|
|
|
u32_t pde;
|
|
|
|
u32_t pte;
|
|
|
|
union x86_mmu_pte pte_value;
|
|
|
|
|
|
|
|
start_pde_num = MMU_PDE_NUM(addr);
|
|
|
|
end_pde_num = MMU_PDE_NUM((char *)addr + size - 1);
|
2017-08-09 12:23:00 +05:30
|
|
|
starting_pte_num = MMU_PAGE_NUM((char *)addr);
|
2017-07-17 12:07:56 +05:30
|
|
|
|
|
|
|
/* Iterate for all the pde's the buffer might take up.
|
|
|
|
* (depends on the size of the buffer and start address of the buff)
|
|
|
|
*/
|
|
|
|
for (pde = start_pde_num; pde <= end_pde_num; pde++) {
|
2017-09-11 13:40:21 -07:00
|
|
|
union x86_mmu_pde_pt pde_value = X86_MMU_PD->entry[pde].pt;
|
2017-07-17 12:07:56 +05:30
|
|
|
|
2017-09-11 13:40:21 -07:00
|
|
|
if (!pde_value.p || !pde_value.us || (write && !pde_value.rw)) {
|
|
|
|
return -EPERM;
|
2017-07-17 12:07:56 +05:30
|
|
|
}
|
|
|
|
|
2017-09-11 13:40:21 -07:00
|
|
|
pte_address = X86_MMU_GET_PT_ADDR(addr);
|
2017-07-17 12:07:56 +05:30
|
|
|
|
|
|
|
/* loop over all the possible page tables for the required
|
|
|
|
* size. If the pde is not the last one then the last pte
|
|
|
|
* would be 1023. So each pde will be using all the
|
|
|
|
* page table entires except for the last pde.
|
|
|
|
* For the last pde, pte is calculated using the last
|
|
|
|
* memory address of the buffer.
|
|
|
|
*/
|
|
|
|
if (pde != end_pde_num) {
|
|
|
|
ending_pte_num = 1023;
|
|
|
|
} else {
|
|
|
|
ending_pte_num = MMU_PAGE_NUM((char *)addr + size - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For all the pde's appart from the starting pde, will have
|
|
|
|
* the start pte number as zero.
|
|
|
|
*/
|
|
|
|
if (pde != start_pde_num) {
|
|
|
|
starting_pte_num = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pte_value.value = 0xFFFFFFFF;
|
|
|
|
|
|
|
|
/* Bitwise AND all the pte values. */
|
|
|
|
for (pte = starting_pte_num; pte <= ending_pte_num; pte++) {
|
|
|
|
pte_value.value &= pte_address->entry[pte].value;
|
|
|
|
}
|
|
|
|
|
2017-09-11 13:40:21 -07:00
|
|
|
if (!pte_value.p || !pte_value.us || (write && !pte_value.rw)) {
|
|
|
|
return -EPERM;
|
|
|
|
}
|
2017-07-17 12:07:56 +05:30
|
|
|
}
|
|
|
|
|
2017-09-11 13:40:21 -07:00
|
|
|
return 0;
|
2017-07-17 12:07:56 +05:30
|
|
|
}
|
2017-07-14 17:39:47 -07:00
|
|
|
|
|
|
|
|
|
|
|
static inline void tlb_flush_page(void *addr)
|
|
|
|
{
|
|
|
|
/* Invalidate TLB entries corresponding to the page containing the
|
|
|
|
* specified address
|
|
|
|
*/
|
|
|
|
char *page = (char *)addr;
|
|
|
|
__asm__ ("invlpg %0" :: "m" (*page));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void _x86_mmu_set_flags(void *ptr, size_t size, u32_t flags, u32_t mask)
|
|
|
|
{
|
|
|
|
union x86_mmu_pte *pte;
|
|
|
|
|
|
|
|
u32_t addr = (u32_t)ptr;
|
|
|
|
|
|
|
|
__ASSERT(!(addr & MMU_PAGE_MASK), "unaligned address provided");
|
|
|
|
__ASSERT(!(size & MMU_PAGE_MASK), "unaligned size provided");
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
|
|
|
|
/* TODO we're not generating 4MB entries at the moment */
|
2017-08-23 11:56:20 +05:30
|
|
|
__ASSERT(X86_MMU_GET_4MB_PDE(addr)->ps != 1, "4MB PDE found");
|
2017-07-14 17:39:47 -07:00
|
|
|
|
2017-08-23 11:56:20 +05:30
|
|
|
pte = X86_MMU_GET_PTE(addr);
|
2017-07-14 17:39:47 -07:00
|
|
|
|
|
|
|
pte->value = (pte->value & ~mask) | flags;
|
|
|
|
tlb_flush_page((void *)addr);
|
|
|
|
|
|
|
|
size -= MMU_PAGE_SIZE;
|
|
|
|
addr += MMU_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|