x86: add pagetables test suite
For the moment, we validate the flags on all RAM pages, ensure that NULL is never mapped, and show that dumping page tables doesn't crash. Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
parent
55914da0e5
commit
030456c24e
4 changed files with 175 additions and 0 deletions
13
tests/arch/x86/pagetables/CMakeLists.txt
Normal file
13
tests/arch/x86/pagetables/CMakeLists.txt
Normal file
|
@ -0,0 +1,13 @@
|
|||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
cmake_minimum_required(VERSION 3.13.1)
|
||||
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
|
||||
project(pagetables)
|
||||
|
||||
FILE(GLOB app_sources src/*.c)
|
||||
target_sources(app PRIVATE ${app_sources})
|
||||
|
||||
target_include_directories(app PRIVATE
|
||||
${ZEPHYR_BASE}/kernel/include
|
||||
${ZEPHYR_BASE}/arch/${ARCH}/include
|
||||
)
|
4
tests/arch/x86/pagetables/prj.conf
Normal file
4
tests/arch/x86/pagetables/prj.conf
Normal file
|
@ -0,0 +1,4 @@
|
|||
CONFIG_ZTEST=y
|
||||
CONFIG_TEST_HW_STACK_PROTECTION=n
|
||||
CONFIG_TEST_USERSPACE=y
|
||||
CONFIG_EXCEPTION_DEBUG=y
|
153
tests/arch/x86/pagetables/src/main.c
Normal file
153
tests/arch/x86/pagetables/src/main.c
Normal file
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
* Copyright (c) 2020 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* x86-specific tests for MMU features and page tables
|
||||
*/
|
||||
|
||||
#include <zephyr.h>
|
||||
#include <ztest.h>
|
||||
#include <tc_util.h>
|
||||
#include <arch/x86/mmustructs.h>
|
||||
#include <x86_mmu.h>
|
||||
#include <linker/linker-defs.h>
|
||||
|
||||
#define VM_BASE ((uint8_t *)CONFIG_KERNEL_VM_BASE)
|
||||
#define VM_LIMIT (VM_BASE + KB((size_t)CONFIG_SRAM_SIZE))
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define PT_LEVEL 3
|
||||
#elif CONFIG_X86_PAE
|
||||
#define PT_LEVEL 2
|
||||
#else
|
||||
#define PT_LEVEL 1
|
||||
#endif
|
||||
|
||||
/* Set of flags whose state we will check. Ignore Accessed/Dirty
|
||||
* At leaf level PS bit indicates PAT, but regardless we don't set it
|
||||
*/
|
||||
#define FLAGS_MASK (MMU_P | MMU_RW | MMU_US | MMU_PWT | MMU_PCD | \
|
||||
MMU_G | MMU_PS | MMU_XD)
|
||||
|
||||
#define LPTR(name, suffix) ((uint8_t *)&_CONCAT(name, suffix))
|
||||
#define LSIZE(name, suffix) ((size_t)&_CONCAT(name, suffix))
|
||||
#define IN_REGION(name, virt) \
|
||||
(virt >= LPTR(name, _start) && \
|
||||
virt < (LPTR(name, _start) + LSIZE(name, _size)))
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern char _locore_start[];
|
||||
extern char _locore_size[];
|
||||
extern char _lorodata_start[];
|
||||
extern char _lorodata_size[];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COVERAGE_GCOV
|
||||
extern char __gcov_bss_start[];
|
||||
extern char __gcov_bss_size[];
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Test that MMU flags on RAM virtual address range are set properly
|
||||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
void test_ram_perms(void)
|
||||
{
|
||||
uint8_t *pos;
|
||||
|
||||
for (pos = VM_BASE; pos < VM_LIMIT; pos += CONFIG_MMU_PAGE_SIZE) {
|
||||
int level;
|
||||
pentry_t entry, flags, expected;
|
||||
|
||||
if (pos == NULL) {
|
||||
/* We have another test specifically for NULL page */
|
||||
continue;
|
||||
}
|
||||
|
||||
z_x86_pentry_get(&level, &entry, z_x86_page_tables_get(), pos);
|
||||
|
||||
zassert_true((entry & MMU_P) != 0,
|
||||
"non-present RAM entry");
|
||||
zassert_equal(level, PT_LEVEL, "bigpage found");
|
||||
flags = entry & FLAGS_MASK;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SRAM_REGION_PERMISSIONS)) {
|
||||
expected = MMU_P | MMU_RW;
|
||||
} else if (IN_REGION(_image_text, pos)) {
|
||||
expected = MMU_P | MMU_US;
|
||||
} else if (IN_REGION(_image_rodata, pos)) {
|
||||
expected = MMU_P | MMU_US | MMU_XD;
|
||||
#ifdef CONFIG_COVERAGE_GCOV
|
||||
} else if (IN_REGION(__gcov_bss, pos)) {
|
||||
expected = MMU_P | MMU_RW | MMU_US | MMU_XD;
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
} else if (IN_REGION(_locore, pos)) {
|
||||
if (IS_ENABLED(CONFIG_X86_KPTI)) {
|
||||
expected = MMU_P | MMU_US;
|
||||
} else {
|
||||
expected = MMU_P;
|
||||
}
|
||||
} else if (IN_REGION(_lorodata, pos)) {
|
||||
if (IS_ENABLED(CONFIG_X86_KPTI)) {
|
||||
expected = MMU_P | MMU_US | MMU_XD;
|
||||
} else {
|
||||
expected = MMU_P | MMU_XD;
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
} else {
|
||||
/* We forced CONFIG_HW_STACK_PROTECTION off otherwise
|
||||
* guard pages will have RW cleared. We can relax this
|
||||
* once we start memory-mapping stacks.
|
||||
*/
|
||||
expected = MMU_P | MMU_RW | MMU_XD;
|
||||
}
|
||||
|
||||
zassert_equal(flags, expected,
|
||||
"bad flags " PRI_ENTRY " at %p",
|
||||
flags, pos);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the NULL virtual page is always non-present
|
||||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
void test_null_map(void)
|
||||
{
|
||||
int level;
|
||||
pentry_t entry;
|
||||
|
||||
/* The NULL page must always be non-present */
|
||||
z_x86_pentry_get(&level, &entry, z_x86_page_tables_get(), NULL);
|
||||
zassert_true((entry & MMU_P) == 0, "present NULL entry");
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump kernel's page tables to console
|
||||
*
|
||||
* We don't verify any specific output, but this shouldn't crash
|
||||
*
|
||||
* @ingroup kernel_memprotect_tests
|
||||
*/
|
||||
void test_dump_ptables(void)
|
||||
{
|
||||
z_x86_dump_page_tables(z_x86_page_tables_get());
|
||||
}
|
||||
|
||||
void test_main(void)
|
||||
{
|
||||
ztest_test_suite(x86_pagetables,
|
||||
ztest_unit_test(test_ram_perms),
|
||||
ztest_unit_test(test_null_map),
|
||||
ztest_unit_test(test_dump_ptables)
|
||||
);
|
||||
ztest_run_test_suite(x86_pagetables);
|
||||
}
|
5
tests/arch/x86/pagetables/testcase.yaml
Normal file
5
tests/arch/x86/pagetables/testcase.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
tests:
|
||||
arch.x86.pagetables:
|
||||
arch_whitelist: x86
|
||||
tags: userspace mmu
|
||||
filter: CONFIG_MMU
|
Loading…
Add table
Add a link
Reference in a new issue