tests: mem_protect/mem_map: add data cache manipulations
This adds data cache manipulations, flushing and invalidation, to the tests where buffer content are being written and compared. These tests map different virtual pages to the same physical pages, and write to one of the mapped virtual addresses. Some SoCs may cache the virtual address separately and writes to one virtual address will not be reflected to another virtual address, this failing the comparison. So we need to manually flush the cache after writing to the buffer, and invalidating cache before reading. Note that not all reads and writes need this treatment as some of them only needs to test for access permissions, and not the memory content. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
352fed6b21
commit
f786ecc075
1 changed files with 31 additions and 0 deletions
|
@ -8,6 +8,7 @@
|
|||
#include <zephyr/toolchain.h>
|
||||
#include <mmu.h>
|
||||
#include <zephyr/linker/sections.h>
|
||||
#include <zephyr/cache.h>
|
||||
|
||||
#ifdef CONFIG_DEMAND_PAGING
|
||||
#include <zephyr/kernel/mm/demand_paging.h>
|
||||
|
@ -56,9 +57,19 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
|
|||
{
|
||||
uint8_t *mapped_rw, *mapped_ro;
|
||||
uint8_t *buf = test_page + BUF_OFFSET;
|
||||
uintptr_t aligned_addr;
|
||||
size_t aligned_size;
|
||||
size_t aligned_offset;
|
||||
|
||||
expect_fault = false;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DCACHE)) {
|
||||
/* Flush everything and invalidating all addresses to
|
||||
* prepare fot comparison test below.
|
||||
*/
|
||||
sys_cache_data_flush_and_invd_all();
|
||||
}
|
||||
|
||||
/* Map in a page that allows writes */
|
||||
k_mem_map_phys_bare(&mapped_rw, k_mem_phys_addr(buf),
|
||||
BUF_SIZE, BASE_FLAGS | K_MEM_PERM_RW);
|
||||
|
@ -72,6 +83,17 @@ ZTEST(mem_map, test_k_mem_map_phys_bare_rw)
|
|||
mapped_rw[i] = (uint8_t)(i % 256);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DCACHE)) {
|
||||
/* Flush the data to memory after write. */
|
||||
aligned_offset =
|
||||
k_mem_region_align(&aligned_addr, &aligned_size, (uintptr_t)mapped_rw,
|
||||
BUF_SIZE, CONFIG_MMU_PAGE_SIZE);
|
||||
zassert_equal(aligned_offset, BUF_OFFSET,
|
||||
"unexpected mapped_rw aligned offset: %u != %u", aligned_offset,
|
||||
BUF_OFFSET);
|
||||
sys_cache_data_flush_and_invd_range((void *)aligned_addr, aligned_size);
|
||||
}
|
||||
|
||||
/* Check that the backing buffer contains the expected data. */
|
||||
for (int i = 0; i < BUF_SIZE; i++) {
|
||||
uint8_t expected_val = (uint8_t)(i % 256);
|
||||
|
@ -288,6 +310,10 @@ ZTEST(mem_map_api, test_k_mem_map_unmap)
|
|||
}
|
||||
last_mapped = mapped;
|
||||
|
||||
if (IS_ENABLED(CONFIG_DCACHE)) {
|
||||
sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Page should be zeroed */
|
||||
for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
|
||||
zassert_equal(mapped[i], '\x00', "page not zeroed");
|
||||
|
@ -300,6 +326,11 @@ ZTEST(mem_map_api, test_k_mem_map_unmap)
|
|||
|
||||
/* Show we can write to page without exploding */
|
||||
(void)memset(mapped, '\xFF', CONFIG_MMU_PAGE_SIZE);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DCACHE)) {
|
||||
sys_cache_data_flush_and_invd_range((void *)mapped, CONFIG_MMU_PAGE_SIZE);
|
||||
}
|
||||
|
||||
for (i = 0; i < CONFIG_MMU_PAGE_SIZE; i++) {
|
||||
zassert_true(mapped[i] == '\xFF',
|
||||
"incorrect value 0x%hhx read at index %d",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue