xtensa: add MPU support for kernel mode
This enables support for MPU on Xtensa. Currently this is for kernel mode only. Signed-off-by: Daniel Leung <daniel.leung@intel.com>
This commit is contained in:
parent
da584af9aa
commit
df350c7469
7 changed files with 1403 additions and 0 deletions
|
@ -214,6 +214,34 @@ endif # XTENSA_MMU
|
|||
|
||||
endif # CPU_HAS_MMU
|
||||
|
||||
if CPU_HAS_MPU
|
||||
|
||||
menuconfig XTENSA_MPU
|
||||
bool "Xtensa MPU Support"
|
||||
select MPU
|
||||
select SRAM_REGION_PERMISSIONS
|
||||
select XTENSA_SMALL_VECTOR_TABLE_ENTRY
|
||||
select EXPERIMENTAL
|
||||
# TODO: the target the MPU code developed on (basically sample_controller
|
||||
# plus MPU minus s32c1i) does not have cache or SMP capability.
|
||||
# Need to verify functionalities with targets supporting these.
|
||||
depends on !CACHE && !SMP
|
||||
help
|
||||
Enable support for Xtensa Memory Protection Unit.
|
||||
|
||||
if XTENSA_MPU
|
||||
|
||||
config XTENSA_MPU_DEFAULT_MEM_TYPE
|
||||
hex "Default Memory Type"
|
||||
default 0x18
|
||||
help
|
||||
Default memory type for memory regions: non-cacheable memory,
|
||||
non-shareable, non-bufferable and interruptible.
|
||||
|
||||
endif # XTENSA_MPU
|
||||
|
||||
endif # CPU_HAS_MPU
|
||||
|
||||
config XTENSA_SYSCALL_USE_HELPER
|
||||
bool "Use userspace syscall helper"
|
||||
default y if "$(ZEPHYR_TOOLCHAIN_VARIANT)" = "xt-clang"
|
||||
|
|
|
@ -23,6 +23,7 @@ zephyr_library_sources_ifdef(CONFIG_DEBUG_COREDUMP coredump.c)
|
|||
zephyr_library_sources_ifdef(CONFIG_TIMING_FUNCTIONS timing.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_GDBSTUB gdbstub.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_XTENSA_MMU ptables.c mmu.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_XTENSA_MPU mpu.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S syscall_helper.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_LLEXT elf.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_SMP smp.c)
|
||||
|
|
702
arch/xtensa/core/mpu.c
Normal file
702
arch/xtensa/core/mpu.c
Normal file
|
@ -0,0 +1,702 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/spinlock.h>
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/arch/xtensa/arch_inlines.h>
|
||||
#include <zephyr/arch/xtensa/mpu.h>
|
||||
#include <zephyr/linker/linker-defs.h>
|
||||
#include <zephyr/sys/__assert.h>
|
||||
#include <zephyr/sys/util_macro.h>
|
||||
|
||||
#include <xtensa/corebits.h>
|
||||
#include <xtensa/config/core-matmap.h>
|
||||
#include <xtensa/config/core-isa.h>
|
||||
#include <xtensa_mpu_priv.h>
|
||||
|
||||
extern char _heap_end[];
|
||||
extern char _heap_start[];
|
||||
|
||||
/** MPU foreground map for kernel mode. */
|
||||
static struct xtensa_mpu_map xtensa_mpu_map_fg_kernel;
|
||||
|
||||
/*
|
||||
* Additional information about the MPU maps: foreground and background
|
||||
* maps.
|
||||
*
|
||||
*
|
||||
* Some things to keep in mind:
|
||||
* - Each MPU region is described by TWO entries:
|
||||
* [entry_a_address, entry_b_address). For contiguous memory regions,
|
||||
* this should not much of an issue. However, disjoint memory regions
|
||||
* "waste" another entry to describe the end of those regions.
|
||||
* We might run out of available entries in the MPU map because of
|
||||
* this.
|
||||
* - The last entry is a special case as there is no more "next"
|
||||
* entry in the map. In this case, the end of memory is
|
||||
* the implicit boundary. In another word, the last entry
|
||||
* describes the region between the start address of this entry
|
||||
* and the end of memory.
|
||||
* - Current implementation has following limitations:
|
||||
* - All enabled entries are grouped towards the end of the map.
|
||||
* - Except the last entry which can be disabled. This is
|
||||
* the end of the last foreground region. With a disabled
|
||||
* entry, memory after this will use the background map
|
||||
* for access control.
|
||||
* - No disabled MPU entries allowed in between.
|
||||
*
|
||||
*
|
||||
* For foreground map to be valid, its entries must follow these rules:
|
||||
* - The start addresses must always be in non-descending order.
|
||||
* - The access rights and memory type fields must contain valid values.
|
||||
* - The segment field needs to be correct for each entry.
|
||||
* - MBZ fields must contain only zeroes.
|
||||
* - Although the start address occupies 27 bits of the register,
|
||||
* it does not mean all 27 bits are usable. The macro
|
||||
* XCHAL_MPU_ALIGN_BITS provided by the toolchain indicates
|
||||
* that only bits of and left of this value are valid. This
|
||||
* corresponds to the minimum segment size (MINSEGMENTSIZE)
|
||||
* definied in the processor configuration.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Static definition of all code and data memory regions of the
|
||||
* current Zephyr image. This information must be available and
|
||||
* need to be processed upon MPU initialization.
|
||||
*/
|
||||
static const struct xtensa_mpu_range mpu_zephyr_ranges[] = {
|
||||
/* Region for vector handlers. */
|
||||
{
|
||||
.start = (uintptr_t)XCHAL_VECBASE_RESET_VADDR,
|
||||
/*
|
||||
* There is nothing from the Xtensa overlay about how big
|
||||
* the vector handler region is. So we make an assumption
|
||||
* that vecbase and .text are contiguous.
|
||||
*
|
||||
* SoC can override as needed if this is not the case,
|
||||
* especially if the SoC reset/startup code relocates
|
||||
* vecbase.
|
||||
*/
|
||||
.end = (uintptr_t)__text_region_start,
|
||||
.access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
|
||||
.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
|
||||
},
|
||||
/*
|
||||
* Mark the zephyr execution regions (data, bss, noinit, etc.)
|
||||
* cacheable, read / write and non-executable
|
||||
*/
|
||||
{
|
||||
/* This includes .data, .bss and various kobject sections. */
|
||||
.start = (uintptr_t)_image_ram_start,
|
||||
.end = (uintptr_t)_image_ram_end,
|
||||
.access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
|
||||
.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
|
||||
},
|
||||
#if K_HEAP_MEM_POOL_SIZE > 0
|
||||
/* System heap memory */
|
||||
{
|
||||
.start = (uintptr_t)_heap_start,
|
||||
.end = (uintptr_t)_heap_end,
|
||||
.access_rights = XTENSA_MPU_ACCESS_P_RW_U_NA,
|
||||
.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
|
||||
},
|
||||
#endif
|
||||
/* Mark text segment cacheable, read only and executable */
|
||||
{
|
||||
.start = (uintptr_t)__text_region_start,
|
||||
.end = (uintptr_t)__text_region_end,
|
||||
.access_rights = XTENSA_MPU_ACCESS_P_RX_U_RX,
|
||||
.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
|
||||
},
|
||||
/* Mark rodata segment cacheable, read only and non-executable */
|
||||
{
|
||||
.start = (uintptr_t)__rodata_region_start,
|
||||
.end = (uintptr_t)__rodata_region_end,
|
||||
.access_rights = XTENSA_MPU_ACCESS_P_RO_U_RO,
|
||||
.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the pointer to the entry encompassing @a addr out of an array of MPU entries.
|
||||
*
|
||||
* Returning the entry where @a addr is greater or equal to the entry's start address,
|
||||
* and where @a addr is less than the starting address of the next entry.
|
||||
*
|
||||
* @param[in] entries Array of MPU entries.
|
||||
* @param[in] addr Address to be matched to one background entry.
|
||||
* @param[in] first_enabled_idx The index of the first enabled entry.
|
||||
* Use 0 if not sure.
|
||||
* @param[out] exact Set to true if address matches exactly.
|
||||
* NULL if do not care.
|
||||
* @param[out] entry_idx Set to the index of the entry array if entry is found.
|
||||
* NULL if do not care.
|
||||
*
|
||||
* @return Pointer to the map entry encompassing @a addr, or NULL if no such entry found.
|
||||
*/
|
||||
static const
|
||||
struct xtensa_mpu_entry *check_addr_in_mpu_entries(const struct xtensa_mpu_entry *entries,
|
||||
uintptr_t addr, uint8_t first_enabled_idx,
|
||||
bool *exact, uint8_t *entry_idx)
|
||||
{
|
||||
const struct xtensa_mpu_entry *ret = NULL;
|
||||
uintptr_t s_addr, e_addr;
|
||||
uint8_t idx;
|
||||
|
||||
if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
|
||||
goto out_null;
|
||||
}
|
||||
|
||||
if (addr < xtensa_mpu_entry_start_address_get(&entries[first_enabled_idx])) {
|
||||
/* Before the start address of very first entry. So no match. */
|
||||
goto out_null;
|
||||
}
|
||||
|
||||
/* Loop through the map except the last entry (which is a special case). */
|
||||
for (idx = first_enabled_idx; idx < (XTENSA_MPU_NUM_ENTRIES - 1); idx++) {
|
||||
s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
|
||||
e_addr = xtensa_mpu_entry_start_address_get(&entries[idx + 1]);
|
||||
|
||||
if ((addr >= s_addr) && (addr < e_addr)) {
|
||||
ret = &entries[idx];
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
idx = XTENSA_MPU_NUM_ENTRIES - 1;
|
||||
s_addr = xtensa_mpu_entry_start_address_get(&entries[idx]);
|
||||
if (addr >= s_addr) {
|
||||
/* Last entry encompasses the start address to end of memory. */
|
||||
ret = &entries[idx];
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret != NULL) {
|
||||
if (exact != NULL) {
|
||||
if (addr == s_addr) {
|
||||
*exact = true;
|
||||
} else {
|
||||
*exact = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry_idx != NULL) {
|
||||
*entry_idx = idx;
|
||||
}
|
||||
}
|
||||
|
||||
out_null:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the first enabled MPU entry.
|
||||
*
|
||||
* @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
|
||||
*
|
||||
* @return Index of the first enabled entry.
|
||||
* @retval XTENSA_MPU_NUM_ENTRIES if no entry is enabled.
|
||||
*/
|
||||
static inline uint8_t find_first_enabled_entry(const struct xtensa_mpu_entry *entries)
|
||||
{
|
||||
int first_enabled_idx;
|
||||
|
||||
for (first_enabled_idx = 0; first_enabled_idx < XTENSA_MPU_NUM_ENTRIES;
|
||||
first_enabled_idx++) {
|
||||
if (entries[first_enabled_idx].as.p.enable) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return first_enabled_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two MPU entries.
|
||||
*
|
||||
* This is used by qsort to compare two MPU entries on their ordering
|
||||
* based on starting address.
|
||||
*
|
||||
* @param a First MPU entry.
|
||||
* @param b Second MPU entry.
|
||||
*
|
||||
* @retval -1 First address is less than second address.
|
||||
* @retval 0 First address is equal to second address.
|
||||
* @retval 1 First address is great than second address.
|
||||
*/
|
||||
static int compare_entries(const void *a, const void *b)
|
||||
{
|
||||
struct xtensa_mpu_entry *e_a = (struct xtensa_mpu_entry *)a;
|
||||
struct xtensa_mpu_entry *e_b = (struct xtensa_mpu_entry *)b;
|
||||
|
||||
uintptr_t addr_a = xtensa_mpu_entry_start_address_get(e_a);
|
||||
uintptr_t addr_b = xtensa_mpu_entry_start_address_get(e_b);
|
||||
|
||||
if (addr_a < addr_b) {
|
||||
return -1;
|
||||
} else if (addr_a == addr_b) {
|
||||
return 0;
|
||||
} else {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sort the MPU entries base on starting address.
|
||||
*
|
||||
* This sorts the MPU entries in ascending order of starting address.
|
||||
* After sorting, it rewrites the segment numbers of all entries.
|
||||
*/
|
||||
static void sort_entries(struct xtensa_mpu_entry *entries)
|
||||
{
|
||||
qsort(entries, XTENSA_MPU_NUM_ENTRIES, sizeof(entries[0]), compare_entries);
|
||||
|
||||
for (uint32_t idx = 0; idx < XTENSA_MPU_NUM_ENTRIES; idx++) {
|
||||
/* Segment value must correspond to the index. */
|
||||
entries[idx].at.p.segment = idx;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Consolidate the MPU entries.
|
||||
*
|
||||
* This removes consecutive entries where the attributes are the same.
|
||||
*
|
||||
* @param entries Array of MPU entries with XTENSA_MPU_NUM_ENTRIES elements.
|
||||
* @param first_enabled_idx Index of first enabled entry.
|
||||
*
|
||||
* @return Index of the first enabled entry after consolidation.
|
||||
*/
|
||||
static uint8_t consolidate_entries(struct xtensa_mpu_entry *entries,
|
||||
uint8_t first_enabled_idx)
|
||||
{
|
||||
uint8_t new_first;
|
||||
uint8_t idx_0 = first_enabled_idx;
|
||||
uint8_t idx_1 = first_enabled_idx + 1;
|
||||
bool to_consolidate = false;
|
||||
|
||||
/* For each a pair of entries... */
|
||||
while (idx_1 < XTENSA_MPU_NUM_ENTRIES) {
|
||||
struct xtensa_mpu_entry *entry_0 = &entries[idx_0];
|
||||
struct xtensa_mpu_entry *entry_1 = &entries[idx_1];
|
||||
bool mark_disable_0 = false;
|
||||
bool mark_disable_1 = false;
|
||||
|
||||
if (xtensa_mpu_entries_has_same_attributes(entry_0, entry_1)) {
|
||||
/*
|
||||
* If both entry has same attributes (access_rights and memory type),
|
||||
* they can be consolidated into one by removing the higher indexed
|
||||
* one.
|
||||
*/
|
||||
mark_disable_1 = true;
|
||||
} else if (xtensa_mpu_entries_has_same_address(entry_0, entry_1)) {
|
||||
/*
|
||||
* If both entries have the same address, the higher index
|
||||
* one always override the lower one. So remove the lower indexed
|
||||
* one.
|
||||
*/
|
||||
mark_disable_0 = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Marking an entry as disabled here so it can be removed later.
|
||||
*
|
||||
* The MBZ field of the AS register is re-purposed to indicate that
|
||||
* this is an entry to be removed.
|
||||
*/
|
||||
if (mark_disable_1) {
|
||||
/* Remove the higher indexed entry. */
|
||||
to_consolidate = true;
|
||||
|
||||
entry_1->as.p.mbz = 1U;
|
||||
|
||||
/* Skip ahead for next comparison. */
|
||||
idx_1++;
|
||||
continue;
|
||||
} else if (mark_disable_0) {
|
||||
/* Remove the lower indexed entry. */
|
||||
to_consolidate = true;
|
||||
|
||||
entry_0->as.p.mbz = 1U;
|
||||
}
|
||||
|
||||
idx_0 = idx_1;
|
||||
idx_1++;
|
||||
}
|
||||
|
||||
if (to_consolidate) {
|
||||
uint8_t read_idx = XTENSA_MPU_NUM_ENTRIES - 1;
|
||||
uint8_t write_idx = XTENSA_MPU_NUM_ENTRIES;
|
||||
|
||||
/* Go through the map from the end and copy enabled entries in place. */
|
||||
while (read_idx >= first_enabled_idx) {
|
||||
struct xtensa_mpu_entry *entry_rd = &entries[read_idx];
|
||||
|
||||
if (entry_rd->as.p.mbz != 1U) {
|
||||
struct xtensa_mpu_entry *entry_wr;
|
||||
|
||||
write_idx--;
|
||||
entry_wr = &entries[write_idx];
|
||||
|
||||
*entry_wr = *entry_rd;
|
||||
entry_wr->at.p.segment = write_idx;
|
||||
}
|
||||
|
||||
read_idx--;
|
||||
}
|
||||
|
||||
/* New first enabled entry is where the last written entry is. */
|
||||
new_first = write_idx;
|
||||
|
||||
for (idx_0 = 0; idx_0 < new_first; idx_0++) {
|
||||
struct xtensa_mpu_entry *e = &entries[idx_0];
|
||||
|
||||
/* Shortcut to zero out address and enabled bit. */
|
||||
e->as.raw = 0U;
|
||||
|
||||
/* Segment value must correspond to the index. */
|
||||
e->at.p.segment = idx_0;
|
||||
|
||||
/* No access at all for both kernel and user modes. */
|
||||
e->at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA;
|
||||
|
||||
/* Use default memory type for disabled entries. */
|
||||
e->at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
|
||||
}
|
||||
} else {
|
||||
/* No need to conlidate entries. Map is same as before. */
|
||||
new_first = first_enabled_idx;
|
||||
}
|
||||
|
||||
return new_first;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a memory region to the MPU map.
|
||||
*
|
||||
* This adds a memory region to the MPU map, by setting the appropriate
|
||||
* start and end entries. This may re-use existing entries or add new
|
||||
* entries to the map.
|
||||
*
|
||||
* @param[in,out] map Pointer to MPU map.
|
||||
* @param[in] start_addr Start address of the region.
|
||||
* @param[in] end_addr End address of the region.
|
||||
* @param[in] access_rights Access rights of this region.
|
||||
* @param[in] memory_type Memory type of this region.
|
||||
* @param[out] first_idx Return index of first enabled entry if not NULL.
|
||||
*
|
||||
* @retval 0 Successful in adding the region.
|
||||
* @retval -EINVAL Invalid values in function arguments.
|
||||
*/
|
||||
static int mpu_map_region_add(struct xtensa_mpu_map *map,
|
||||
uintptr_t start_addr, uintptr_t end_addr,
|
||||
uint32_t access_rights, uint32_t memory_type,
|
||||
uint8_t *first_idx)
|
||||
{
|
||||
int ret;
|
||||
bool exact_s, exact_e;
|
||||
uint8_t idx_s, idx_e, first_enabled_idx;
|
||||
struct xtensa_mpu_entry *entry_slot_s, *entry_slot_e, prev_entry;
|
||||
|
||||
struct xtensa_mpu_entry *entries = map->entries;
|
||||
|
||||
if (start_addr >= end_addr) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
first_enabled_idx = find_first_enabled_entry(entries);
|
||||
if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
|
||||
|
||||
/*
|
||||
* If the last entry in the map is not enabled and the start
|
||||
* address is NULL, we can assume the map has not been populated
|
||||
* at all. This is because we group all enabled entries at
|
||||
* the end of map.
|
||||
*/
|
||||
struct xtensa_mpu_entry *last_entry = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
|
||||
|
||||
if (!xtensa_mpu_entry_enable_get(last_entry) &&
|
||||
(xtensa_mpu_entry_start_address_get(last_entry) == 0U)) {
|
||||
/* Empty table, so populate the entries as-is. */
|
||||
if (end_addr == 0xFFFFFFFFU) {
|
||||
/*
|
||||
* Region goes to end of memory, so only need to
|
||||
* program one entry.
|
||||
*/
|
||||
entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
|
||||
|
||||
xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
|
||||
access_rights, memory_type);
|
||||
} else {
|
||||
/*
|
||||
* Populate the last two entries to indicate
|
||||
* a memory region. Notice that the second entry
|
||||
* is not enabled as it is merely marking the end of
|
||||
* a region and is not the starting of another
|
||||
* enabled MPU region.
|
||||
*/
|
||||
entry_slot_s = &entries[XTENSA_MPU_NUM_ENTRIES - 2];
|
||||
entry_slot_e = &entries[XTENSA_MPU_NUM_ENTRIES - 1];
|
||||
|
||||
xtensa_mpu_entry_set(entry_slot_s, start_addr, true,
|
||||
access_rights, memory_type);
|
||||
xtensa_mpu_entry_set(entry_slot_e, end_addr, false,
|
||||
XTENSA_MPU_ACCESS_P_NA_U_NA,
|
||||
CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
first_enabled_idx = consolidate_entries(entries, first_enabled_idx);
|
||||
|
||||
if (first_enabled_idx >= XTENSA_MPU_NUM_ENTRIES) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
entry_slot_s = (struct xtensa_mpu_entry *)
|
||||
check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
|
||||
&exact_s, &idx_s);
|
||||
entry_slot_e = (struct xtensa_mpu_entry *)
|
||||
check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
|
||||
&exact_e, &idx_e);
|
||||
|
||||
__ASSERT_NO_MSG(entry_slot_s != NULL);
|
||||
__ASSERT_NO_MSG(entry_slot_e != NULL);
|
||||
__ASSERT_NO_MSG(start_addr < end_addr);
|
||||
|
||||
if ((entry_slot_s == NULL) || (entry_slot_e == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out if we need to add new slots for either addresses.
|
||||
* If the addresses match exactly the addresses current in map,
|
||||
* we can reuse those entries without adding new one.
|
||||
*/
|
||||
if (!exact_s || !exact_e) {
|
||||
uint8_t needed = (exact_s ? 0 : 1) + (exact_e ? 0 : 1);
|
||||
|
||||
/* Check if there are enough empty slots. */
|
||||
if (first_enabled_idx < needed) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Need to keep track of the attributes of the memory region before
|
||||
* we start adding entries, as we will need to apply the same
|
||||
* attributes to the "ending address" entry to preseve the attributes
|
||||
* of existing map.
|
||||
*/
|
||||
prev_entry = *entry_slot_e;
|
||||
|
||||
/*
|
||||
* Entry for beginning of new region.
|
||||
*
|
||||
* - Use existing entry if start addresses are the same for existing
|
||||
* and incoming region. We can simply reuse the entry.
|
||||
* - Add an entry if incoming region is within existing region.
|
||||
*/
|
||||
if (!exact_s) {
|
||||
/*
|
||||
* Put a new entry before the first enabled entry.
|
||||
* We will sort the entries later.
|
||||
*/
|
||||
first_enabled_idx--;
|
||||
|
||||
entry_slot_s = &entries[first_enabled_idx];
|
||||
}
|
||||
|
||||
xtensa_mpu_entry_set(entry_slot_s, start_addr, true, access_rights, memory_type);
|
||||
|
||||
/*
|
||||
* Entry for ending of region.
|
||||
*
|
||||
* - Add an entry if incoming region is within existing region.
|
||||
* - If the end address matches exactly to existing entry, there is
|
||||
* no need to do anything.
|
||||
*/
|
||||
if (!exact_e) {
|
||||
/*
|
||||
* Put a new entry before the first enabled entry.
|
||||
* We will sort the entries later.
|
||||
*/
|
||||
first_enabled_idx--;
|
||||
|
||||
entry_slot_e = &entries[first_enabled_idx];
|
||||
|
||||
/*
|
||||
* Since we are going to punch a hole in the map,
|
||||
* we need to preserve the attribute of existing region
|
||||
* between the end address and next entry.
|
||||
*/
|
||||
*entry_slot_e = prev_entry;
|
||||
xtensa_mpu_entry_start_address_set(entry_slot_e, end_addr);
|
||||
}
|
||||
|
||||
/* Sort the entries in ascending order of starting address */
|
||||
sort_entries(entries);
|
||||
|
||||
/*
|
||||
* Need to figure out where the start and end entries are as sorting
|
||||
* may change their positions.
|
||||
*/
|
||||
entry_slot_s = (struct xtensa_mpu_entry *)
|
||||
check_addr_in_mpu_entries(entries, start_addr, first_enabled_idx,
|
||||
&exact_s, &idx_s);
|
||||
entry_slot_e = (struct xtensa_mpu_entry *)
|
||||
check_addr_in_mpu_entries(entries, end_addr, first_enabled_idx,
|
||||
&exact_e, &idx_e);
|
||||
|
||||
/* Both must be exact match. */
|
||||
__ASSERT_NO_MSG(exact_s);
|
||||
__ASSERT_NO_MSG(exact_e);
|
||||
|
||||
if (end_addr == 0xFFFFFFFFU) {
|
||||
/*
|
||||
* If end_addr = 0xFFFFFFFFU, entry_slot_e and idx_e both
|
||||
* point to the last slot. Because the incoming region goes
|
||||
* to the end of memory, we simply cheat by including
|
||||
* the last entry by incrementing idx_e so the loop to
|
||||
* update entries will change the attribute of last entry
|
||||
* in map.
|
||||
*/
|
||||
idx_e++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Any existing entries between the "newly" popluated start and
|
||||
* end entries must bear the same attributes. So modify them
|
||||
* here.
|
||||
*/
|
||||
for (int idx = idx_s + 1; idx < idx_e; idx++) {
|
||||
xtensa_mpu_entry_attributes_set(&entries[idx], access_rights, memory_type);
|
||||
}
|
||||
|
||||
if (first_idx != NULL) {
|
||||
*first_idx = first_enabled_idx;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the MPU map to hardware.
|
||||
*
|
||||
* @param map Pointer to foreground MPU map.
|
||||
*/
|
||||
void xtensa_mpu_map_write(struct xtensa_mpu_map *map)
|
||||
{
|
||||
int entry;
|
||||
|
||||
/*
|
||||
* Clear MPU entries first, then write MPU entries in reverse order.
|
||||
*
|
||||
* Remember that the boundary of each memory region is marked by
|
||||
* two consecutive entries, and that the addresses of all entries
|
||||
* must not be in descending order (i.e. equal or increasing).
|
||||
* To ensure this, we clear out the entries first then write them
|
||||
* in reverse order. This avoids any intermediate invalid
|
||||
* configuration with regard to ordering.
|
||||
*/
|
||||
for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
|
||||
__asm__ volatile("wptlb %0, %1\n\t" : : "a"(entry), "a"(0));
|
||||
}
|
||||
|
||||
for (entry = XTENSA_MPU_NUM_ENTRIES - 1; entry >= 0; entry--) {
|
||||
__asm__ volatile("wptlb %0, %1\n\t"
|
||||
: : "a"(map->entries[entry].at), "a"(map->entries[entry].as));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform necessary steps to enable MPU.
|
||||
*/
|
||||
void xtensa_mpu_init(void)
|
||||
{
|
||||
unsigned int entry;
|
||||
uint8_t first_enabled_idx;
|
||||
|
||||
/* Disable all foreground segments before we start configuration. */
|
||||
xtensa_mpu_mpuenb_write(0);
|
||||
|
||||
/*
|
||||
* Clear the foreground MPU map so we can populate it later with valid entries.
|
||||
* Note that we still need to make sure the map is valid, and cannot be totally
|
||||
* zeroed.
|
||||
*/
|
||||
for (entry = 0; entry < XTENSA_MPU_NUM_ENTRIES; entry++) {
|
||||
/* Make sure to zero out everything as a start, especially the MBZ fields. */
|
||||
struct xtensa_mpu_entry ent = {0};
|
||||
|
||||
/* Segment value must correspond to the index. */
|
||||
ent.at.p.segment = entry;
|
||||
|
||||
/* No access at all for both kernel and user modes. */
|
||||
ent.at.p.access_rights = XTENSA_MPU_ACCESS_P_NA_U_NA;
|
||||
|
||||
/* Use default memory type for disabled entries. */
|
||||
ent.at.p.memory_type = CONFIG_XTENSA_MPU_DEFAULT_MEM_TYPE;
|
||||
|
||||
xtensa_mpu_map_fg_kernel.entries[entry] = ent;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add necessary MPU entries for the memory regions of base Zephyr image.
|
||||
*/
|
||||
for (entry = 0; entry < ARRAY_SIZE(mpu_zephyr_ranges); entry++) {
|
||||
const struct xtensa_mpu_range *range = &mpu_zephyr_ranges[entry];
|
||||
|
||||
int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
|
||||
range->start, range->end,
|
||||
range->access_rights, range->memory_type,
|
||||
&first_enabled_idx);
|
||||
|
||||
ARG_UNUSED(ret);
|
||||
__ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
|
||||
(unsigned int)range->start,
|
||||
(unsigned int)range->end,
|
||||
ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now for the entries for memory regions needed by SoC.
|
||||
*/
|
||||
for (entry = 0; entry < xtensa_soc_mpu_ranges_num; entry++) {
|
||||
const struct xtensa_mpu_range *range = &xtensa_soc_mpu_ranges[entry];
|
||||
|
||||
int ret = mpu_map_region_add(&xtensa_mpu_map_fg_kernel,
|
||||
range->start, range->end,
|
||||
range->access_rights, range->memory_type,
|
||||
&first_enabled_idx);
|
||||
|
||||
ARG_UNUSED(ret);
|
||||
__ASSERT(ret == 0, "Unable to add region [0x%08x, 0x%08x): %d",
|
||||
(unsigned int)range->start,
|
||||
(unsigned int)range->end,
|
||||
ret);
|
||||
}
|
||||
|
||||
/* Consolidate entries so we have a compact map at boot. */
|
||||
consolidate_entries(xtensa_mpu_map_fg_kernel.entries, first_enabled_idx);
|
||||
|
||||
/* Write the map into hardware. There is no turning back now. */
|
||||
xtensa_mpu_map_write(&xtensa_mpu_map_fg_kernel);
|
||||
}
|
|
@ -72,6 +72,10 @@ static ALWAYS_INLINE void arch_kernel_init(void)
|
|||
#ifdef CONFIG_XTENSA_MMU
|
||||
xtensa_mmu_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XTENSA_MPU
|
||||
xtensa_mpu_init();
|
||||
#endif
|
||||
}
|
||||
|
||||
void xtensa_switch(void *switch_to, void **switched_from);
|
||||
|
|
415
arch/xtensa/include/xtensa_mpu_priv.h
Normal file
415
arch/xtensa/include/xtensa_mpu_priv.h
Normal file
|
@ -0,0 +1,415 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_
|
||||
#define ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/arch/xtensa/mpu.h>
|
||||
#include <zephyr/sys/util_macro.h>
|
||||
|
||||
#include <xtensa/config/core-isa.h>
|
||||
|
||||
/**
|
||||
* @defgroup xtensa_mpu_internal_apis Xtensa Memory Protection Unit (MPU) Internal APIs
|
||||
* @ingroup xtensa_mpu_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @name Bit shifts and masks for MPU entry registers.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* Number of bits to shift for start address in MPU entry register.
|
||||
*
|
||||
* This is only used for aligning the value to the MPU entry register,
|
||||
* and is different than the hardware alignment requirement.
|
||||
*/
|
||||
#define XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT 5U
|
||||
|
||||
/**
|
||||
* Bit mask of start address in MPU entry register.
|
||||
*
|
||||
* This is only used for aligning the value to the MPU entry register,
|
||||
* and is different than the hardware alignment requirement.
|
||||
*/
|
||||
#define XTENSA_MPU_ENTRY_REG_START_ADDR_MASK 0xFFFFFFE0U
|
||||
|
||||
/** Number of bits to shift for enable bit in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_ENABLE_SHIFT 0U
|
||||
|
||||
/** Bit mask of enable bit in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_ENABLE_MASK BIT(XTENSA_MPU_ENTRY_ENABLE_SHIFT)
|
||||
|
||||
/** Number of bits to shift for lock bit in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_LOCK_SHIFT 1U
|
||||
|
||||
/** Bit mask of lock bit in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_LOCK_MASK BIT(XTENSA_MPU_ENTRY_LOCK_SHIFT)
|
||||
|
||||
/** Number of bits to shift for access rights in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_SHIFT 8U
|
||||
|
||||
/** Bit mask of access rights in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_MASK \
|
||||
(0xFU << XTENSA_MPU_ENTRY_REG_ACCESS_RIGHTS_SHIFT)
|
||||
|
||||
/** Number of bits to shift for memory type in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT 12U
|
||||
|
||||
/** Bit mask of memory type in MPU entry register. */
|
||||
#define XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_MASK \
|
||||
(0x1FFU << XTENSA_MPU_ENTRY_REG_MEMORY_TYPE_SHIFT)
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* Define one MPU entry of type struct xtensa_mpu_entry.
|
||||
*
|
||||
* @note This needs a comma at the end if used in array declaration.
|
||||
*
|
||||
* @param saddr Start address.
|
||||
* @param en Enable bit
|
||||
* @param rights Access rights.
|
||||
* @param memtype Memory type.
|
||||
*/
|
||||
#define XTENSA_MPU_ENTRY(saddr, en, rights, memtype) \
|
||||
{ \
|
||||
.as.p.enable = en, \
|
||||
.as.p.lock = 0, \
|
||||
.as.p.mbz = 0, \
|
||||
.as.p.start_addr = (saddr >> XTENSA_MPU_ENTRY_START_ADDR_SHIFT), \
|
||||
.at.p.segment = 0, \
|
||||
.at.p.mbz1 = 0, \
|
||||
.at.p.access_rights = rights, \
|
||||
.at.p.memory_type = memtype, \
|
||||
.at.p.mbz2 = 0, \
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Read MPUCFG register.
|
||||
*
|
||||
* This returns the bitmask of enabled MPU entries (foreground segments).
|
||||
*
|
||||
* @return Value of MPUCFG register.
|
||||
*/
|
||||
static ALWAYS_INLINE uint32_t xtensa_mpu_mpucfg_read(void)
|
||||
{
|
||||
uint32_t mpucfg;
|
||||
|
||||
__asm__ __volatile__("rsr.mpucfg %0" : "=a" (mpucfg));
|
||||
|
||||
return mpucfg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Read MPUENB register.
|
||||
*
|
||||
* This returns the enable bits for MPU entries.
|
||||
*
|
||||
* @return Value of MPUENB register.
|
||||
*/
|
||||
static ALWAYS_INLINE uint32_t xtensa_mpu_mpuenb_read(void)
|
||||
{
|
||||
uint32_t mpuenb;
|
||||
|
||||
__asm__ __volatile__("rsr.mpuenb %0" : "=a" (mpuenb));
|
||||
|
||||
return mpuenb;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Write MPUENB register.
|
||||
*
|
||||
* This writes the enable bits for MPU entries.
|
||||
*
|
||||
* @param mpuenb Value to be written.
|
||||
*/
|
||||
static ALWAYS_INLINE void xtensa_mpu_mpuenb_write(uint32_t mpuenb)
|
||||
{
|
||||
__asm__ __volatile__("wsr.mpuenb %0" : : "a"(mpuenb));
|
||||
}
|
||||
|
||||
/**
|
||||
* @name MPU entry internal helper functions.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Return the start address encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
*
|
||||
* @return Start address.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
uintptr_t xtensa_mpu_entry_start_address_get(const struct xtensa_mpu_entry *entry)
|
||||
{
|
||||
return (entry->as.p.start_addr << XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the start address encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param addr Start address.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
void xtensa_mpu_entry_start_address_set(struct xtensa_mpu_entry *entry, uintptr_t addr)
|
||||
{
|
||||
entry->as.p.start_addr = addr >> XTENSA_MPU_ENTRY_REG_START_ADDR_SHIFT;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the lock bit encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
*
|
||||
* @retval True Lock bit is set.
|
||||
* @retval False Lock bit is not set.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
bool xtensa_mpu_entry_lock_get(const struct xtensa_mpu_entry *entry)
|
||||
{
|
||||
return entry->as.p.lock != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the lock bit encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param lock True if to lock the MPU entry.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
void xtensa_mpu_entry_lock_set(struct xtensa_mpu_entry *entry, bool lock)
|
||||
{
|
||||
entry->as.p.lock = lock ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the enable bit encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
*
|
||||
* @retval True Enable bit is set.
|
||||
* @retval False Enable bit is not set.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
bool xtensa_mpu_entry_enable_get(const struct xtensa_mpu_entry *entry)
|
||||
{
|
||||
return entry->as.p.enable != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the enable bit encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param en True if to enable the MPU entry.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
void xtensa_mpu_entry_enable_set(struct xtensa_mpu_entry *entry, bool en)
|
||||
{
|
||||
entry->as.p.enable = en ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the access rights encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
*
|
||||
* @return Access right value.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
uint8_t xtensa_mpu_entry_access_rights_get(const struct xtensa_mpu_entry *entry)
|
||||
{
|
||||
return entry->at.p.access_rights;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the lock bit encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param access_rights Access rights to be set.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
void xtensa_mpu_entry_access_rights_set(struct xtensa_mpu_entry *entry, uint8_t access_rights)
|
||||
{
|
||||
entry->at.p.access_rights = access_rights;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the memory type encoded in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
*
|
||||
* @return Memory type value.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
uint16_t xtensa_mpu_entry_memory_type_get(const struct xtensa_mpu_entry *entry)
|
||||
{
|
||||
return entry->at.p.memory_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the memory type in the MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param memory_type Memory type to be set.
|
||||
*/
|
||||
static ALWAYS_INLINE
|
||||
void xtensa_mpu_entry_memory_type_set(struct xtensa_mpu_entry *entry, uint16_t memory_type)
|
||||
{
|
||||
entry->at.p.memory_type = memory_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set both access rights and memory type of a MPU entry.
|
||||
*
|
||||
* @param entry Pointer to the MPU entry.
|
||||
* @param access_rights Access rights value.
|
||||
* @param memory_type Memory type value.
|
||||
*/
|
||||
static inline
|
||||
void xtensa_mpu_entry_attributes_set(struct xtensa_mpu_entry *entry,
|
||||
uint8_t access_rights, uint16_t memory_type)
|
||||
{
|
||||
xtensa_mpu_entry_access_rights_set(entry, access_rights);
|
||||
xtensa_mpu_entry_memory_type_set(entry, memory_type);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set fields in MPU entry so it will be functional.
|
||||
*
|
||||
* This sets the starting address, enable bit, access rights and memory type
|
||||
* of an entry.
|
||||
*
|
||||
* Note that this preserves the valud of the segment field.
|
||||
*
|
||||
* @param entry Pointer to the entry to be manipulated.
|
||||
* @param start_address Start address to be set.
|
||||
* @param enable Whether this entry should be enabled.
|
||||
* @param access_rights Access rights for the entry.
|
||||
* @param memory_type Memory type for the entry.
|
||||
*/
|
||||
static inline
|
||||
void xtensa_mpu_entry_set(struct xtensa_mpu_entry *entry, uintptr_t start_address,
|
||||
bool enable, uint8_t access_rights, uint16_t memory_type)
|
||||
{
|
||||
uint8_t segment = entry->at.p.segment;
|
||||
|
||||
/* Clear out the fields, and make sure MBZ fields are zero. */
|
||||
entry->as.raw = 0;
|
||||
entry->at.raw = 0;
|
||||
|
||||
xtensa_mpu_entry_start_address_set(entry, start_address);
|
||||
xtensa_mpu_entry_enable_set(entry, enable);
|
||||
xtensa_mpu_entry_access_rights_set(entry, access_rights);
|
||||
xtensa_mpu_entry_memory_type_set(entry, memory_type);
|
||||
|
||||
entry->at.p.segment = segment;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test if two MPU entries have same access rights.
|
||||
*
|
||||
* @param entry1 MPU entry #1
|
||||
* @param entry2 MPU entry #2.
|
||||
*
|
||||
* @return True if access rights are the same, false otherwise.
|
||||
*/
|
||||
static inline
|
||||
bool xtensa_mpu_entries_has_same_access_rights(const struct xtensa_mpu_entry *entry1,
|
||||
const struct xtensa_mpu_entry *entry2)
|
||||
{
|
||||
return entry1->at.p.access_rights == entry2->at.p.access_rights;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test if two MPU entries have same memory types.
|
||||
*
|
||||
* @param entry1 MPU entry #1.
|
||||
* @param entry2 MPU entry #2.
|
||||
*
|
||||
* @return True if memory types are the same, false otherwise.
|
||||
*/
|
||||
static inline
|
||||
bool xtensa_mpu_entries_has_same_memory_type(const struct xtensa_mpu_entry *entry1,
|
||||
const struct xtensa_mpu_entry *entry2)
|
||||
{
|
||||
return entry1->at.p.memory_type == entry2->at.p.memory_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test if two MPU entries have same access rights and memory types.
|
||||
*
|
||||
* @param entry1 MPU entry #1.
|
||||
* @param entry2 MPU entry #2.
|
||||
*
|
||||
* @return True if access rights and memory types are the same, false otherwise.
|
||||
*/
|
||||
static inline
|
||||
bool xtensa_mpu_entries_has_same_attributes(const struct xtensa_mpu_entry *entry1,
|
||||
const struct xtensa_mpu_entry *entry2)
|
||||
{
|
||||
return xtensa_mpu_entries_has_same_access_rights(entry1, entry2) &&
|
||||
xtensa_mpu_entries_has_same_memory_type(entry1, entry2);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test if two entries has the same addresses.
|
||||
*
|
||||
* @param entry1 MPU entry #1.
|
||||
* @param entry2 MPU entry #2.
|
||||
*
|
||||
* @return True if they have the same address, false otherwise.
|
||||
*/
|
||||
static inline
|
||||
bool xtensa_mpu_entries_has_same_address(const struct xtensa_mpu_entry *entry1,
|
||||
const struct xtensa_mpu_entry *entry2)
|
||||
{
|
||||
return xtensa_mpu_entry_start_address_get(entry1)
|
||||
== xtensa_mpu_entry_start_address_get(entry2);
|
||||
}
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @name MPU access rights helper functions.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Test if the access rights is valid.
|
||||
*
|
||||
* @param access_rights Access rights value.
|
||||
*
|
||||
* @return True if access rights is valid, false otherwise.
|
||||
*/
|
||||
static ALWAYS_INLINE bool xtensa_mpu_access_rights_is_valid(uint8_t access_rights)
|
||||
{
|
||||
return (access_rights != 1) && (access_rights <= 15);
|
||||
}
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MPU_PRIV_H_ */
|
|
@ -35,7 +35,13 @@
|
|||
|
||||
#include <zephyr/drivers/timer/system_timer.h>
|
||||
|
||||
#ifdef CONFIG_XTENSA_MMU
|
||||
#include <zephyr/arch/xtensa/xtensa_mmu.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XTENSA_MPU
|
||||
#include <zephyr/arch/xtensa/mpu.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @defgroup xtensa_apis Xtensa APIs
|
||||
|
|
247
include/zephyr/arch/xtensa/mpu.h
Normal file
247
include/zephyr/arch/xtensa/mpu.h
Normal file
|
@ -0,0 +1,247 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Intel Corporation
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <zephyr/toolchain.h>
|
||||
#include <zephyr/sys/util_macro.h>
|
||||
|
||||
#include <xtensa/config/core-isa.h>
|
||||
|
||||
#ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H
|
||||
#define ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H
|
||||
|
||||
/**
|
||||
* @defgroup xtensa_mpu_apis Xtensa Memory Protection Unit (MPU) APIs
|
||||
* @ingroup xtensa_apis
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** Number of available entries in the MPU table. */
|
||||
#define XTENSA_MPU_NUM_ENTRIES XCHAL_MPU_ENTRIES
|
||||
|
||||
/**
|
||||
* @name MPU memory region access rights.
|
||||
*
|
||||
* @note These are NOT bit masks, and must be used as whole value.
|
||||
*
|
||||
* @{
|
||||
*/
|
||||
|
||||
/** Kernel and user modes no access. */
|
||||
#define XTENSA_MPU_ACCESS_P_NA_U_NA (0)
|
||||
|
||||
/** Kernel mode execution only. */
|
||||
#define XTENSA_MPU_ACCESS_P_X_U_NA (2)
|
||||
|
||||
/** User mode execution only. */
|
||||
#define XTENSA_MPU_ACCESS_P_NA_U_X (3)
|
||||
|
||||
/** Kernel mode read only. */
|
||||
#define XTENSA_MPU_ACCESS_P_RO_U_NA (4)
|
||||
|
||||
/** Kernel mode read and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RX_U_NA (5)
|
||||
|
||||
/** Kernel mode read and write. */
|
||||
#define XTENSA_MPU_ACCESS_P_RW_U_NA (6)
|
||||
|
||||
/** Kernel mode read, write and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RWX_U_NA (7)
|
||||
|
||||
/** Kernel and user modes write only. */
|
||||
#define XTENSA_MPU_ACCESS_P_WO_U_WO (8)
|
||||
|
||||
/** Kernel mode read, write. User mode read, write and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RW_U_RWX (9)
|
||||
|
||||
/** Kernel mode read and write. User mode read only. */
|
||||
#define XTENSA_MPU_ACCESS_P_RW_U_RO (10)
|
||||
|
||||
/** Kernel mode read, write and execution. User mode read and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RWX_U_RX (11)
|
||||
|
||||
/** Kernel and user modes read only. */
|
||||
#define XTENSA_MPU_ACCESS_P_RO_U_RO (12)
|
||||
|
||||
/** Kernel and user modes read and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RX_U_RX (13)
|
||||
|
||||
/** Kernel and user modes read and write. */
|
||||
#define XTENSA_MPU_ACCESS_P_RW_U_RW (14)
|
||||
|
||||
/** Kernel and user modes read, write and execution. */
|
||||
#define XTENSA_MPU_ACCESS_P_RWX_U_RWX (15)
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
/**
|
||||
* @brief Foreground MPU Entry.
|
||||
*
|
||||
* This holds the as, at register values for one MPU entry which can be
|
||||
* used directly by WPTLB.
|
||||
*/
|
||||
struct xtensa_mpu_entry {
|
||||
/**
|
||||
* Content of as register for WPTLB.
|
||||
*
|
||||
* This contains the start address, the enable bit, and the lock bit.
|
||||
*/
|
||||
union {
|
||||
/** Raw value. */
|
||||
uint32_t raw;
|
||||
|
||||
/** Individual parts. */
|
||||
struct {
|
||||
/**
|
||||
* Enable bit for this entry.
|
||||
*
|
||||
* Modifying this will also modify the corresponding bit of
|
||||
* the MPUENB register.
|
||||
*/
|
||||
uint32_t enable:1;
|
||||
|
||||
/**
|
||||
* Lock bit for this entry.
|
||||
*
|
||||
* Usable only if MPULOCKABLE parameter is enabled in
|
||||
* processor configuration.
|
||||
*
|
||||
* Once set:
|
||||
* - This cannot be cleared until reset.
|
||||
* - This entry can no longer be modified.
|
||||
* - The start address of the next entry also
|
||||
* cannot be modified.
|
||||
*/
|
||||
uint32_t lock:1;
|
||||
|
||||
/** Must be zero. */
|
||||
uint32_t mbz:3;
|
||||
|
||||
/**
|
||||
* Start address of this MPU entry.
|
||||
*
|
||||
* Effective bits in this portion are affected by the minimum
|
||||
* segment size of each MPU entry, ranging from 32 bytes to 4GB.
|
||||
*/
|
||||
uint32_t start_addr:27;
|
||||
} p;
|
||||
} as;
|
||||
|
||||
/**
|
||||
* Content of at register for WPTLB.
|
||||
*
|
||||
* This contains the memory type, access rights, and the segment number.
|
||||
*/
|
||||
union {
|
||||
/** Raw value. */
|
||||
uint32_t raw;
|
||||
|
||||
/** Individual parts. */
|
||||
struct {
|
||||
/** The segment number of this MPU entry. */
|
||||
uint32_t segment:5;
|
||||
|
||||
/** Must be zero (part 1). */
|
||||
uint32_t mbz1:3;
|
||||
|
||||
/**
|
||||
* Access rights associated with this MPU entry.
|
||||
*
|
||||
* This dictates the access right from the start address of
|
||||
* this entry, to the start address of next entry.
|
||||
*
|
||||
* Refer to XTENSA_MPU_ACCESS_* macros for available rights.
|
||||
*/
|
||||
uint32_t access_rights:4;
|
||||
|
||||
/**
|
||||
* Memory type associated with this MPU entry.
|
||||
*
|
||||
* This dictates the memory type from the start address of
|
||||
* this entry, to the start address of next entry.
|
||||
*
|
||||
* This affects how the hardware treats the memory, for example,
|
||||
* cacheable vs non-cacheable, shareable vs non-shareable.
|
||||
* Refer to the Xtensa Instruction Set Architecture (ISA) manual
|
||||
* for general description, and the processor manual for processor
|
||||
* specific information.
|
||||
*/
|
||||
uint32_t memory_type:9;
|
||||
|
||||
/** Must be zero (part 2). */
|
||||
uint32_t mbz2:11;
|
||||
} p;
|
||||
} at;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Struct to hold foreground MPU map and its entries.
|
||||
*/
|
||||
struct xtensa_mpu_map {
|
||||
/**
|
||||
* Array of MPU entries.
|
||||
*/
|
||||
struct xtensa_mpu_entry entries[XTENSA_MPU_NUM_ENTRIES];
|
||||
};
|
||||
|
||||
/**
|
||||
* Struct to describe a memory region [start, end).
|
||||
*/
|
||||
struct xtensa_mpu_range {
|
||||
/** Start address (inclusive) of the memory region. */
|
||||
const uintptr_t start;
|
||||
|
||||
/**
|
||||
* End address (exclusive) of the memory region.
|
||||
*
|
||||
* Use 0xFFFFFFFF for the end of memory.
|
||||
*/
|
||||
const uintptr_t end;
|
||||
|
||||
/** Access rights for the memory region. */
|
||||
const uint8_t access_rights:4;
|
||||
|
||||
/**
|
||||
* Memory type for the region.
|
||||
*
|
||||
* Refer to the Xtensa Instruction Set Architecture (ISA) manual
|
||||
* for general description, and the processor manual for processor
|
||||
* specific information.
|
||||
*/
|
||||
const uint16_t memory_type:9;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* @brief Additional memory regions required by SoC.
|
||||
*
|
||||
* These memory regions will be setup by MPU initialization code at boot.
|
||||
*
|
||||
* Must be defined in the SoC layer.
|
||||
*/
|
||||
extern const struct xtensa_mpu_range xtensa_soc_mpu_ranges[];
|
||||
|
||||
/**
|
||||
* @brief Number of SoC additional memory regions.
|
||||
*
|
||||
* Must be defined in the SoC layer.
|
||||
*/
|
||||
extern const int xtensa_soc_mpu_ranges_num;
|
||||
|
||||
/**
|
||||
* @brief Initialize hardware MPU.
|
||||
*
|
||||
* This initializes the MPU hardware and setup the memory regions at boot.
|
||||
*/
|
||||
void xtensa_mpu_init(void);
|
||||
|
||||
/**
|
||||
* @}
|
||||
*/
|
||||
|
||||
#endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_XTENSA_MPU_H */
|
Loading…
Add table
Add a link
Reference in a new issue