arch: arc: refactor the ARC MPU driver

* separate the ARC MPU driver into 2 parts
  * arc_mpu_v2_internal.h for ARC MPUv2
  * arc_mpu_v3_internal.h for ARC MPUv3
* For ARC MPUv2, keep the main design, but update and optimize the code
* For ARC MPUv3, implement mpu region split to supprt MPU region overlap
* misc updates and bug fixes

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2019-03-05 10:29:16 +08:00 committed by Andrew Boie
commit a1c2159acb
10 changed files with 1250 additions and 966 deletions

View file

@ -7,14 +7,9 @@
#include <device.h>
#include <init.h>
#include <kernel.h>
#include <kernel_structs.h>
#include <soc.h>
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_REGISTER(mpu);
/*
* @brief Configure MPU for the thread
*
@ -25,84 +20,11 @@ LOG_MODULE_REGISTER(mpu);
void configure_mpu_thread(struct k_thread *thread)
{
arc_core_mpu_disable();
#if defined(CONFIG_MPU_STACK_GUARD)
configure_mpu_stack_guard(thread);
#endif
#if defined(CONFIG_USERSPACE)
configure_mpu_user_context(thread);
configure_mpu_mem_domain(thread);
#endif
arc_core_mpu_configure_thread(thread);
arc_core_mpu_enable();
}
#if defined(CONFIG_MPU_STACK_GUARD)
/*
* @brief Configure MPU stack guard
*
* This function configures per thread stack guards reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_stack_guard(struct k_thread *thread)
{
#if defined(CONFIG_USERSPACE)
if ((thread->thread_base.user_options & K_USER) != 0) {
/* the areas before and after the user stack of thread is
* kernel only. These area can be used as stack guard.
* -----------------------
* | kernel only area |
* |---------------------|
* | user stack |
* |---------------------|
* |privilege stack guard|
* |---------------------|
* | privilege stack |
* -----------------------
*/
arc_core_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE);
return;
}
#endif
arc_core_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE);
}
#endif
#if defined(CONFIG_USERSPACE)
/*
* @brief Configure MPU user context
*
* This function configures the thread's user context.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_user_context(struct k_thread *thread)
{
LOG_DBG("configure user thread %p's context", thread);
arc_core_mpu_configure_user_context(thread);
}
/*
* @brief Configure MPU memory domain
*
* This function configures per thread memory domain reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_mem_domain(struct k_thread *thread)
{
LOG_DBG("configure thread %p's domain", thread);
arc_core_mpu_configure_mem_domain(thread);
}
int z_arch_mem_domain_max_partitions_get(void)
{
@ -115,12 +37,9 @@ int z_arch_mem_domain_max_partitions_get(void)
void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
ARG_UNUSED(domain);
arc_core_mpu_disable();
arc_core_mpu_mem_partition_remove(partition_id);
arc_core_mpu_remove_mem_partition(domain, partition_id);
arc_core_mpu_enable();
}
/*
@ -128,7 +47,9 @@ void z_arch_mem_domain_partition_remove(struct k_mem_domain *domain,
*/
void z_arch_mem_domain_configure(struct k_thread *thread)
{
configure_mpu_mem_domain(thread);
arc_core_mpu_disable();
arc_core_mpu_configure_mem_domain(thread);
arc_core_mpu_enable();
}
/*
@ -136,10 +57,8 @@ void z_arch_mem_domain_configure(struct k_thread *thread)
*/
void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
{
ARG_UNUSED(domain);
arc_core_mpu_disable();
arc_core_mpu_configure_mem_domain(NULL);
arc_core_mpu_remove_mem_domain(domain);
arc_core_mpu_enable();
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017 Synopsys.
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -15,39 +15,7 @@
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <logging/log.h>
LOG_MODULE_DECLARE(mpu);
#define AUX_MPU_RDB_VALID_MASK (0x1)
#define AUX_MPU_EN_ENABLE (0x40000000)
#define AUX_MPU_EN_DISABLE (0xBFFFFFFF)
#define AUX_MPU_RDP_REGION_SIZE(bits) \
(((bits - 1) & 0x3) | (((bits - 1) & 0x1C) << 7))
#define AUX_MPU_RDP_ATTR_MASK (0xFFF)
#define _ARC_V2_MPU_EN (0x409)
#define _ARC_V2_MPU_RDB0 (0x422)
#define _ARC_V2_MPU_RDP0 (0x423)
/* aux regs added in MPU version 3 */
#define _ARC_V2_MPU_INDEX (0x448) /* MPU index */
#define _ARC_V2_MPU_RSTART (0x449) /* MPU region start address */
#define _ARC_V2_MPU_REND (0x44A) /* MPU region end address */
#define _ARC_V2_MPU_RPER (0x44B) /* MPU region permission register */
#define _ARC_V2_MPU_PROBE (0x44C) /* MPU probe register */
/* For MPU version 2, the minimum protection region size is 2048 bytes */
/* FOr MPU version 3, the minimum protection region size is 32 bytes */
#if CONFIG_ARC_MPU_VER == 2
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 11
#elif CONFIG_ARC_MPU_VER == 3
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#endif
#define CALC_REGION_END_ADDR(start, size) \
(start + size - (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS))
LOG_MODULE_REGISTER(mpu);
/**
* @brief Get the number of supported MPU regions
@ -72,658 +40,21 @@ static inline u32_t _get_region_attr_by_type(u32_t type)
case THREAD_STACK_USER_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_REGION:
return AUX_MPU_RDP_KW | AUX_MPU_RDP_KR;
return AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR;
case THREAD_APP_DATA_REGION:
return REGION_RAM_ATTR;
case THREAD_STACK_GUARD_REGION:
/* no Write and Execute to guard region */
return AUX_MPU_RDP_UR | AUX_MPU_RDP_KR;
return AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR;
default:
/* Size 0 region */
/* unknown type */
return 0;
}
}
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
/* ARC MPU version 2 and version 3 have different aux reg interface */
#if CONFIG_ARC_MPU_VER == 2
u8_t bits = find_msb_set(size) - 1;
index = 2 * index;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
if ((1 << bits) < size) {
bits++;
}
if (size > 0) {
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits);
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0U;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
#elif CONFIG_ARC_MPU_VER == 3
#define AUX_MPU_RPER_SID1 0x10000
if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
/* all MPU regions SID are the same: 1, the default SID */
if (region_attr) {
region_attr |= (AUX_MPU_RDB_VALID_MASK | AUX_MPU_RDP_S |
AUX_MPU_RPER_SID1);
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
CALC_REGION_END_ADDR(region_addr, size));
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
#endif
}
#if CONFIG_ARC_MPU_VER == 3
static inline s32_t _mpu_probe(u32_t addr)
{
u32_t val;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
/* if no match or multiple regions match, return error */
if (val & 0xC0000000) {
return -1;
} else {
return val;
}
}
#endif
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline u32_t _get_region_index_by_type(u32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
* For ARC MPU v3, each index has the same priority, so the index is
* allocated from small to big. Static regions start from 0, then
* thread related regions.
*/
switch (type) {
#if CONFIG_ARC_MPU_VER == 2
case THREAD_STACK_USER_REGION:
return _get_num_regions() - mpu_config.num_regions
- THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION:
return _get_num_regions() - mpu_config.num_regions - type;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return _get_num_regions() - mpu_config.num_regions - type;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return _get_num_regions() - mpu_config.num_regions - type + 1;
#endif
#elif CONFIG_ARC_MPU_VER == 3
case THREAD_STACK_USER_REGION:
return mpu_config.num_regions + THREAD_STACK_REGION - 1;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION:
return mpu_config.num_regions + type - 1;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return mpu_config.num_regions + type - 1;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return mpu_config.num_regions + type - 2;
#endif
#endif
default:
__ASSERT(0, "Unsupported type");
return 0;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline int _is_enabled_region(u32_t r_index)
{
#if CONFIG_ARC_MPU_VER == 2
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
#elif CONFIG_ARC_MPU_VER == 3
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
#endif
}
/**
* This internal function check if the given buffer in in the region
*/
static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size)
{
#if CONFIG_ARC_MPU_VER == 2
u32_t r_addr_start;
u32_t r_addr_end;
u32_t r_size_lshift;
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
& AUX_MPU_RDP_ATTR_MASK;
r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) < r_addr_end) {
return 1;
}
#elif CONFIG_ARC_MPU_VER == 3
if ((r_index == _mpu_probe(start)) &&
(r_index == _mpu_probe(start + size))) {
return 1;
}
#endif
return 0;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline int _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
#if CONFIG_ARC_MPU_VER == 2
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
#include "arc_mpu_v2_internal.h"
#elif CONFIG_ARC_MPU_VER == 3
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
#include "arc_mpu_v3_internal.h"
#endif
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW)) ==
(AUX_MPU_RDP_UW | AUX_MPU_RDP_KW));
}
return ((r_ap & (AUX_MPU_RDP_UR | AUX_MPU_RDP_KR)) ==
(AUX_MPU_RDP_UR | AUX_MPU_RDP_KR));
}
/* ARC Core MPU Driver API Implementation for ARC MPU */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
#if CONFIG_ARC_MPU_VER == 2
/* Enable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
/* MPU is always enabled, use default region to
* simulate MPU enable
*/
#elif CONFIG_ARC_MPU_VER == 3
#define MPU_ENABLE_ATTR 0
arc_core_mpu_default(MPU_ENABLE_ATTR);
#endif
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
#if CONFIG_ARC_MPU_VER == 2
/* Disable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
#elif CONFIG_ARC_MPU_VER == 3
/* MPU is always enabled, use default region to
* simulate MPU disable
*/
arc_core_mpu_default(REGION_ALL_ATTR);
#endif
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size)
{
u32_t region_index = _get_region_index_by_type(type);
u32_t region_attr = _get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0) {
return;
}
/*
* The new MPU regions are allocated per type before
* the statically configured regions.
*/
#if CONFIG_ARC_MPU_VER == 2
/*
* For ARC MPU v2, MPU regions can be overlapped, smaller
* region index has higher priority.
*/
_region_init(region_index, base, size, region_attr);
#elif CONFIG_ARC_MPU_VER == 3
static s32_t last_index;
s32_t index;
u32_t last_region = _get_num_regions() - 1;
/* use hardware probe to find the region maybe split.
* another way is to look up the mpu_config.mpu_regions
* in software, which is time consuming.
*/
index = _mpu_probe(base);
/* ARC MPU version 3 doesn't support region overlap.
* So it can not be directly used for stack/stack guard protect
* One way to do this is splitting the ram region as follow:
*
* Take THREAD_STACK_GUARD_REGION as example:
* RAM region 0: the ram region before THREAD_STACK_GUARD_REGION, rw
* RAM THREAD_STACK_GUARD_REGION: RO
* RAM region 1: the region after THREAD_STACK_GUARD_REGION, same
* as region 0
* if region_index == index, it means the same thread comes back,
* no need to split
*/
if (index >= 0 && region_index != index) {
/* need to split, only 1 split is allowed */
/* find the correct region to mpu_config.mpu_regions */
if (index == last_region) {
/* already split */
index = last_index;
} else {
/* new split */
last_index = index;
}
_region_init(index,
mpu_config.mpu_regions[index].base,
base - mpu_config.mpu_regions[index].base,
mpu_config.mpu_regions[index].attr);
#if defined(CONFIG_MPU_STACK_GUARD)
if (type != THREAD_STACK_USER_REGION)
/*
* USER REGION is continuous with MPU_STACK_GUARD.
* In current implementation, THREAD_STACK_GUARD_REGION is
* configured before THREAD_STACK_USER_REGION
*/
#endif
_region_init(last_region, base + size,
(mpu_config.mpu_regions[index].base +
mpu_config.mpu_regions[index].size - base - size),
mpu_config.mpu_regions[index].attr);
}
_region_init(region_index, base, size, region_attr);
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(u32_t region_attr)
{
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
(~AUX_MPU_RDP_ATTR_MASK);
region_attr &= AUX_MPU_RDP_ATTR_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param region_attr region attribute
*/
void arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr)
{
if (index >= _get_num_regions()) {
return;
}
region_attr &= AUX_MPU_RDP_ATTR_MASK;
_region_init(index, base, size, region_attr);
}
#if defined(CONFIG_USERSPACE)
void arc_core_mpu_configure_user_context(struct k_thread *thread)
{
u32_t base = (u32_t)thread->stack_obj;
u32_t size = thread->stack_info.size;
/* for kernel threads, no need to configure user context */
if (!(thread->base.user_options & K_USER)) {
#if defined(CONFIG_USERSPACE) && CONFIG_ARC_MPU_VER == 3
/* USERSPACE is handled here, all privileged threads have
* the right to access it.
*/
base = (u32_t)&_app_smem_start;
size = (u32_t)&_app_smem_size;
_region_init(_get_region_index_by_type(THREAD_APP_DATA_REGION),
base, size,
_get_region_attr_by_type(THREAD_APP_DATA_REGION));
#endif
return;
}
arc_core_mpu_configure(THREAD_STACK_USER_REGION, base, size);
#if defined(CONFIG_USERSPACE) && CONFIG_ARC_MPU_VER == 3
/*
* here, need to clear THREAD_APP_DATA_REGION for user thread as it will
* be set by kernel thread to to access app_shared mem. For user thread
* the handling of app_shared mem is done by
* THREAD_DOMAIN_PARTITION_REGION
*/
_region_init(_get_region_index_by_type(THREAD_APP_DATA_REGION)
, 0, 0, 0);
#endif
}
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
s32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL;
if (thread) {
mem_domain = thread->mem_domain_info.mem_domain;
}
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
#if CONFIG_ARC_MPU_VER == 2
for (; region_index >= 0; region_index--) {
#elif CONFIG_ARC_MPU_VER == 3
/*
* Note: For ARC MPU v3, overlapping is not allowed, so the following
* partitions/region may be overlapped with each other or regions in
* mpu_config. This will cause EV_MachineCheck exception (ECR = 0x030600).
* Although split mechanism is used for stack guard region to avoid this,
* it doesn't work for memory domain, because the dynamic region numbers.
* So be careful to avoid the overlap situation.
*/
u32_t num_regions = _get_num_regions() - 1;
for (; region_index < num_regions; region_index++) {
#endif
if (num_partitions && pparts->size) {
LOG_DBG("set region 0x%x 0x%x 0x%x",
region_index, pparts->start, pparts->size);
#if CONFIG_ARC_MPU_VER == 2
_region_init(region_index, pparts->start, pparts->size,
pparts->attr);
#elif CONFIG_ARC_MPU_VER == 3
if ((pparts->attr & (AUX_MPU_RDP_UW | AUX_MPU_RDP_UR))
&& !(thread->base.user_options & K_USER)) {
/*
* privileged thread has access to full application
* shared memory range through THREAD_APP_DATA_REGION.
* no need to set again here.
*/
_region_init(region_index, 0, 0, 0);
} else {
_region_init(region_index, pparts->start,
pparts->size, pparts->attr);
}
#endif
num_partitions--;
} else {
LOG_DBG("disable region 0x%x", region_index);
/* Disable region */
_region_init(region_index, 0, 0, 0);
}
pparts++;
}
}
/**
* @brief configure MPU region for a single memory partition
*
* @param part_index memory partition index
* @param part memory partition info
*/
void arc_core_mpu_configure_mem_partition(u32_t part_index,
struct k_mem_partition *part)
{
u32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("configure partition index: %u", part_index);
if (part) {
LOG_DBG("set region 0x%x 0x%x 0x%x",
region_index + part_index, part->start, part->size);
_region_init(region_index, part->start, part->size,
part->attr);
} else {
LOG_DBG("disable region 0x%x", region_index + part_index);
/* Disable region */
_region_init(region_index + part_index, 0, 0, 0);
}
}
/**
* @brief Reset MPU region for a single memory partition
*
* @param part_index memory partition index
*/
void arc_core_mpu_mem_partition_remove(u32_t part_index)
{
u32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("disable region 0x%x", region_index + part_index);
/* Disable region */
_region_init(region_index + part_index, 0, 0, 0);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
#if CONFIG_ARC_MPU_VER == 2
return _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
#elif CONFIG_ARC_MPU_VER == 3
/*
* Subtract the start of domain partition regions and 1 reserved region
* from total regions will get the maximum number of free regions for
* memory domain partitions.
*/
return _get_num_regions() -
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) - 1;
#endif
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
s32_t r_index;
/*
* For ARC MPU v2, smaller region number takes priority.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*
* For ARC MPU v3, overlapping is not supported.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*/
#if CONFIG_ARC_MPU_VER == 2
for (r_index = 0; r_index < _get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) {
continue;
}
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
#elif CONFIG_ARC_MPU_VER == 3
r_index = _mpu_probe((u32_t)addr);
/* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((u32_t)addr + size)) {
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
#endif
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static void _arc_mpu_config(void)
{
u32_t num_regions;
u32_t i;
num_regions = _get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
return;
}
/* Disable MPU */
arc_core_mpu_disable();
#if CONFIG_ARC_MPU_VER == 2
u32_t r_index;
/*
* the MPU regions are filled in the reverse order.
* According to ARCv2 ISA, the MPU region with smaller
* index has higher priority. The static background MPU
* regions in mpu_config will be in the bottom. Then
* the special type regions will be above.
*
*/
r_index = num_regions - mpu_config.num_regions;
/* clear all the regions first */
for (i = 0U; i < r_index; i++) {
_region_init(i, 0, 0, 0);
}
/* configure the static regions */
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(r_index,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
r_index++;
}
/* default region: no read, write and execute */
arc_core_mpu_default(0);
#elif CONFIG_ARC_MPU_VER == 3
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(i,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
}
for (; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
#endif
/* Enable MPU */
arc_core_mpu_enable();
}
static int arc_mpu_init(struct device *arg)
{
ARG_UNUSED(arg);
_arc_mpu_config();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);

View file

@ -0,0 +1,479 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_
#define AUX_MPU_RDB_VALID_MASK (0x1)
#define AUX_MPU_EN_ENABLE (0x40000000)
#define AUX_MPU_EN_DISABLE (0xBFFFFFFF)
#define AUX_MPU_RDP_REGION_SIZE(bits) \
(((bits - 1) & 0x3) | (((bits - 1) & 0x1C) << 7))
#define AUX_MPU_RDP_ATTR_MASK (0x1FC)
#define AUX_MPU_RDP_SIZE_MASK (0xE03)
#define _ARC_V2_MPU_EN (0x409)
#define _ARC_V2_MPU_RDB0 (0x422)
#define _ARC_V2_MPU_RDP0 (0x423)
/* For MPU version 2, the minimum protection region size is 2048 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 11
/**
* This internal function initializes a MPU region
*/
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
u8_t bits = find_msb_set(size) - 1;
index = 2 * index;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
if ((1 << bits) < size) {
bits++;
}
if (size > 0) {
region_attr &= ~(AUX_MPU_RDP_SIZE_MASK);
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits);
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0U;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline int _get_region_index_by_type(u32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
*/
switch (type) {
case THREAD_STACK_USER_REGION:
return _get_num_regions() - mpu_config.num_regions
- THREAD_STACK_REGION;
case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION:
return _get_num_regions() - mpu_config.num_regions - type;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return _get_num_regions() - mpu_config.num_regions - type;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return _get_num_regions() - mpu_config.num_regions - type + 1;
#endif
default:
__ASSERT(0, "Unsupported type");
return -EINVAL;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline bool _is_enabled_region(u32_t r_index)
{
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
}
/**
* This internal function check if the given buffer in in the region
*/
static inline bool _is_in_region(u32_t r_index, u32_t start, u32_t size)
{
u32_t r_addr_start;
u32_t r_addr_end;
u32_t r_size_lshift;
r_addr_start = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index)
& AUX_MPU_RDP_SIZE_MASK;
r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) < r_addr_end) {
return 1;
}
return 0;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{
s32_t region_index = _get_region_index_by_type(type);
u32_t region_attr = _get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0 || region_index < 0) {
return -EINVAL;
}
/*
* For ARC MPU v2, MPU regions can be overlapped, smaller
* region index has higher priority.
*/
_region_init(region_index, base, size, region_attr);
return 0;
}
/* ARC Core MPU Driver API Implementation for ARC MPUv2 */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
/* Enable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* Disable MPU */
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN,
z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
}
/**
* @brief configure the thread's MPU regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_USERSPACE)
if ((thread->thread_base.user_options & K_USER) != 0) {
/* the areas before and after the user stack of thread is
* kernel only. These area can be used as stack guard.
* -----------------------
* | kernel only area |
* |---------------------|
* | user stack |
* |---------------------|
* |privilege stack guard|
* |---------------------|
* | privilege stack |
* -----------------------
*/
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
} else {
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
}
#else
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
#endif
#endif
#if defined(CONFIG_USERSPACE)
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("user thread %p's stack failed", thread);
return;
}
}
LOG_DBG("configure thread %p's domain", thread);
arc_core_mpu_configure_mem_domain(thread);
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(u32_t region_attr)
{
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
(~AUX_MPU_RDP_ATTR_MASK);
region_attr &= AUX_MPU_RDP_ATTR_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param region_attr region attribute
*/
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr)
{
if (index >= _get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RDP_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL;
if (thread) {
mem_domain = thread->mem_domain_info.mem_domain;
}
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (; region_index >= 0; region_index--) {
if (num_partitions) {
LOG_DBG("set region 0x%x 0x%x 0x%x",
region_index, pparts->start, pparts->size);
_region_init(region_index, pparts->start,
pparts->size, pparts->attr);
num_partitions--;
} else {
/* clear the left mpu entries */
_region_init(region_index, 0, 0, 0);
}
pparts++;
}
}
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
ARG_UNUSED(mem_domain);
int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
for (; region_index >= 0; region_index--) {
_region_init(region_index, 0, 0, 0);
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param domain the target memory domain
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t part_id)
{
ARG_UNUSED(domain);
int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("disable region 0x%x", region_index + part_id);
/* Disable region */
_region_init(region_index + part_id, 0, 0, 0);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
return _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
int r_index;
/*
* For ARC MPU v2, smaller region number takes priority.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*
*/
for (r_index = 0; r_index < _get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) {
continue;
}
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(struct device *arg)
{
ARG_UNUSED(arg);
u32_t num_regions;
u32_t i;
num_regions = _get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
arc_core_mpu_disable();
int r_index;
/*
* the MPU regions are filled in the reverse order.
* According to ARCv2 ISA, the MPU region with smaller
* index has higher priority. The static background MPU
* regions in mpu_config will be in the bottom. Then
* the special type regions will be above.
*
*/
r_index = num_regions - mpu_config.num_regions;
/* clear all the regions first */
for (i = 0U; i < r_index; i++) {
_region_init(i, 0, 0, 0);
}
/* configure the static regions */
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(r_index,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
r_index++;
}
/* default region: no read, write and execute */
arc_core_mpu_default(0);
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V2_INTERNAL_H_ */

View file

@ -0,0 +1,677 @@
/*
* Copyright (c) 2019 Synopsys.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_
#define ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_
#define AUX_MPU_RPER_SID1 0x10000
/* valid mask: SID1+secure+valid */
#define AUX_MPU_RPER_VALID_MASK ((0x1) | AUX_MPU_RPER_SID1 | AUX_MPU_ATTR_S)
#define AUX_MPU_RPER_ATTR_MASK (0x1FF)
#define _ARC_V2_MPU_EN (0x409)
/* aux regs added in MPU version 3 */
#define _ARC_V2_MPU_INDEX (0x448) /* MPU index */
#define _ARC_V2_MPU_RSTART (0x449) /* MPU region start address */
#define _ARC_V2_MPU_REND (0x44A) /* MPU region end address */
#define _ARC_V2_MPU_RPER (0x44B) /* MPU region permission register */
#define _ARC_V2_MPU_PROBE (0x44C) /* MPU probe register */
/* For MPU version 3, the minimum protection region size is 32 bytes */
#define ARC_FEATURE_MPU_ALIGNMENT_BITS 5
#define CALC_REGION_END_ADDR(start, size) \
(start + size - (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS))
#if defined(CONFIG_USERSPACE) && defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard , 1 for user thread, 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 3
#elif defined(CONFIG_USERSPACE) || defined(CONFIG_MPU_STACK_GUARD)
/* 1 for stack guard or user thread stack , 1 for split */
#define MPU_REGION_NUM_FOR_THREAD 2
#else
#define MPU_REGION_NUM_FOR_THREAD 0
#endif
/**
* @brief internal structure holding information of
* memory areas where dynamic MPU programming is allowed.
*/
struct dynamic_region_info {
u8_t index;
u32_t base;
u32_t size;
u32_t attr;
};
#define MPU_DYNAMIC_REGION_AREAS_NUM 2
static u8_t static_regions_num;
static u8_t dynamic_regions_num;
static u8_t dynamic_region_index;
/**
* Global array, holding the MPU region index of
* the memory region inside which dynamic memory
* regions may be configured.
*/
static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
u32_t region_attr)
{
if (size < (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS)) {
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
if (region_attr) {
region_attr &= AUX_MPU_RPER_ATTR_MASK;
region_attr |= AUX_MPU_RPER_VALID_MASK;
}
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
CALC_REGION_END_ADDR(region_addr, size));
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, region_attr);
}
static inline void _region_set_attr(u32_t index, u32_t attr)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RPER, attr |
AUX_MPU_RPER_VALID_MASK);
}
static inline u32_t _region_get_attr(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
}
static inline u32_t _region_get_start(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_RSTART);
}
static inline void _region_set_start(u32_t index, u32_t start)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, start);
}
static inline u32_t _region_get_end(u32_t index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
return z_arc_v2_aux_reg_read(_ARC_V2_MPU_REND) +
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
static inline void _region_set_end(u32_t index, u32_t end)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
z_arc_v2_aux_reg_write(_ARC_V2_MPU_REND, end -
(1 << ARC_FEATURE_MPU_ALIGNMENT_BITS));
}
/**
* This internal function probes the given addr's MPU index.if not
* in MPU, returns error
*/
static inline int _mpu_probe(u32_t addr)
{
u32_t val;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_PROBE, addr);
val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_INDEX);
/* if no match or multiple regions match, return error */
if (val & 0xC0000000) {
return -EINVAL;
} else {
return val;
}
}
/**
* This internal function allocates a dynamic MPU region and returns
* the index or error
*/
static inline int _dynamic_region_allocate_index(void)
{
if (dynamic_region_index >= _get_num_regions()) {
LOG_ERR("no enough mpu entries %d", dynamic_region_index);
return -EINVAL;
}
return dynamic_region_index++;
}
/**
* This internal function checks if MPU region is enabled or not
*/
static inline bool _is_enabled_region(u32_t r_index)
{
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
AUX_MPU_RPER_VALID_MASK) == AUX_MPU_RPER_VALID_MASK);
}
/**
* This internal function checks the area given by (start, size)
* and returns the index if the area match one MPU entry
*/
static inline int _get_region_index(u32_t start, u32_t size)
{
int index = _mpu_probe(start);
if (index > 0 && index == _mpu_probe(start + size - 1)) {
return index;
}
return -EINVAL;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline bool _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = z_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
r_ap &= AUX_MPU_RPER_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW)) ==
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW));
}
return ((r_ap & (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)) ==
(AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR));
}
/* @brief allocate and init a dynamic MPU region
*
* This internal function performs the allocation and initialization of
* a dynamic MPU region
*
* @param base region base
* @param size region size
* @param attr region attribute
* @return <0 failure, >0 allocated dynamic region index
*/
static int _dynamic_region_allocate_and_init(u32_t base, u32_t size,
u32_t attr)
{
int u_region_index = _get_region_index(base, size);
int region_index;
LOG_DBG("Region info: base 0x%x size 0x%x attr 0x%x", base, size, attr);
if (u_region_index == -EINVAL) {
/* no underlying region */
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
/* a new region */
_region_init(region_index, base, size, attr);
}
return region_index;
}
/*
* The new memory region is to be placed inside the underlying
* region, possibly splitting the underlying region into two.
*/
u32_t u_region_start = _region_get_start(u_region_index);
u32_t u_region_end = _region_get_end(u_region_index);
u32_t u_region_attr = _region_get_attr(u_region_index);
u32_t end = base + size;
if ((base == u_region_start) && (end == u_region_end)) {
/* The new region overlaps entirely with the
* underlying region. In this case we simply
* update the partition attributes of the
* underlying region with those of the new
* region.
*/
_region_init(u_region_index, base, size, attr);
region_index = u_region_index;
} else if (base == u_region_start) {
/* The new region starts exactly at the start of the
* underlying region; the start of the underlying
* region needs to be set to the end of the new region.
*/
_region_set_start(u_region_index, base + size);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else if (end == u_region_end) {
/* The new region ends exactly at the end of the
* underlying region; the end of the underlying
* region needs to be set to the start of the
* new region.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
}
} else {
/* The new region lies strictly inside the
* underlying region, which needs to split
* into two regions.
*/
_region_set_end(u_region_index, base);
_region_set_attr(u_region_index, u_region_attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base, size, attr);
region_index = _dynamic_region_allocate_index();
if (region_index > 0) {
_region_init(region_index, base + size,
u_region_end - end, u_region_attr);
}
}
}
return region_index;
}
/* @brief reset the dynamic MPU regions
*
* This internal function performs the reset of dynamic MPU regions
*/
static void _mpu_reset_dynamic_regions(void)
{
u32_t i;
u32_t num_regions = _get_num_regions();
for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
for (i = 0; i < dynamic_regions_num; i++) {
_region_init(
dyn_reg_info[i].index,
dyn_reg_info[i].base,
dyn_reg_info[i].size,
dyn_reg_info[i].attr);
}
/* dynamic regions are after static regions */
dynamic_region_index = static_regions_num;
}
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{
u32_t region_attr = _get_region_attr_by_type(type);
return _dynamic_region_allocate_and_init(base, size, region_attr);
}
/* ARC Core MPU Driver API Implementation for ARC MPUv3 */
/**
* @brief enable the MPU
*/
void arc_core_mpu_enable(void)
{
#define MPU_ENABLE_ATTR 0
arc_core_mpu_default(MPU_ENABLE_ATTR);
}
/**
* @brief disable the MPU
*/
void arc_core_mpu_disable(void)
{
/* MPU is always enabled, use default region to
* simulate MPU disable
*/
arc_core_mpu_default(REGION_ALL_ATTR);
}
/**
* @brief configure the thread's mpu regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread)
{
/* the mpu entries of ARC MPUv3 are divided into 2 parts:
* static entries: global mpu entries, not changed in context switch
* dynamic entries: MPU entries changed in context switch and
* memory domain configure, including:
* MPU entries for user thread stack
* MPU entries for stack guard
* MPU entries for mem domain
* MPU entries for other thread specific regions
* before configuring thread specific mpu entries, need to reset dynamic
* entries
*/
_mpu_reset_dynamic_regions();
#if defined(CONFIG_MPU_STACK_GUARD)
#if defined(CONFIG_USERSPACE)
if ((thread->thread_base.user_options & K_USER) != 0) {
/* the areas before and after the user stack of thread is
* kernel only. These area can be used as stack guard.
* -----------------------
* | kernel only area |
* |---------------------|
* | user stack |
* |---------------------|
* |privilege stack guard|
* |---------------------|
* | privilege stack |
* -----------------------
*/
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->arch.priv_stack_start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
} else {
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
}
#else
if (_mpu_configure(THREAD_STACK_GUARD_REGION,
thread->stack_info.start - STACK_GUARD_SIZE,
STACK_GUARD_SIZE) < 0) {
LOG_ERR("thread %p's stack guard failed", thread);
return;
}
#endif
#endif
#if defined(CONFIG_USERSPACE)
u32_t num_partitions;
struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = thread->mem_domain_info.mem_domain;
/* configure stack region of user thread */
if (thread->base.user_options & K_USER) {
LOG_DBG("configure user thread %p's stack", thread);
if (_mpu_configure(THREAD_STACK_USER_REGION,
(u32_t)thread->stack_obj, thread->stack_info.size) < 0) {
LOG_ERR("thread %p's stack failed", thread);
return;
}
}
/* configure thread's memory domain */
if (mem_domain) {
LOG_DBG("configure thread %p's domain: %p",
thread, mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
num_partitions = 0;
pparts = NULL;
}
for (u32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
if (_dynamic_region_allocate_and_init(pparts->start,
pparts->size, pparts->attr) < 0) {
LOG_ERR(
"thread %p's mem region: %p failed",
thread, pparts);
return;
}
}
pparts++;
}
#endif
}
/**
* @brief configure the default region
*
* @param region_attr region attribute of default region
*/
void arc_core_mpu_default(u32_t region_attr)
{
u32_t val = z_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) &
(~AUX_MPU_RPER_ATTR_MASK);
region_attr &= AUX_MPU_RPER_ATTR_MASK;
z_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
* @param size region size
* @param region_attr region attribute
*/
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr)
{
if (index >= _get_num_regions()) {
return -EINVAL;
}
region_attr &= AUX_MPU_RPER_ATTR_MASK;
_region_init(index, base, size, region_attr);
return 0;
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param thread the thread which has memory domain
*/
void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{
arc_core_mpu_configure_thread(thread);
}
/**
* @brief remove MPU regions for the memory partitions of the memory domain
*
* @param mem_domain the target memory domain
*/
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
{
u32_t num_partitions;
struct k_mem_partition *pparts;
int index;
if (mem_domain) {
LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
LOG_DBG("disable domain partition regions");
num_partitions = 0U;
pparts = NULL;
}
for (u32_t i = 0; i < num_partitions; i++) {
if (pparts->size) {
index = _get_region_index(pparts->start,
pparts->size);
if (index > 0) {
_region_set_attr(index,
REGION_KERNEL_RAM_ATTR);
}
}
pparts++;
}
}
/**
* @brief reset MPU region for a single memory partition
*
* @param partition_id memory partition id
*/
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t partition_id)
{
struct k_mem_partition *partition = &domain->partitions[partition_id];
int region_index = _get_region_index(partition->start,
partition->size);
if (region_index < 0) {
return;
}
LOG_DBG("remove region 0x%x", region_index);
_region_set_attr(region_index, REGION_KERNEL_RAM_ATTR);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
/* consider the worst case: each partition requires split */
return (_get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2;
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
int r_index;
/*
* For ARC MPU v3, overlapping is not supported.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*/
r_index = _mpu_probe((u32_t)addr);
/* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((u32_t)addr + size)) {
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
* @brief MPU default initialization and configuration
*
* This function provides the default configuration mechanism for the Memory
* Protection Unit (MPU).
*/
static int arc_mpu_init(struct device *arg)
{
ARG_UNUSED(arg);
u32_t num_regions;
u32_t i;
num_regions = _get_num_regions();
/* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) {
__ASSERT(0,
"Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, num_regions);
return -EINVAL;
}
/* Disable MPU */
arc_core_mpu_disable();
for (i = 0U; i < mpu_config.num_regions; i++) {
_region_init(i,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
/* record the static region which can be split */
if (mpu_config.mpu_regions[i].attr & REGION_DYNAMIC) {
if (dynamic_regions_num >
MPU_DYNAMIC_REGION_AREAS_NUM) {
LOG_ERR("no enough dynamic regions %d",
dynamic_regions_num);
return -EINVAL;
}
dyn_reg_info[dynamic_regions_num].index = i;
dyn_reg_info[dynamic_regions_num].base =
mpu_config.mpu_regions[i].base;
dyn_reg_info[dynamic_regions_num].size =
mpu_config.mpu_regions[i].size;
dyn_reg_info[dynamic_regions_num].attr =
mpu_config.mpu_regions[i].attr;
dynamic_regions_num++;
}
}
static_regions_num = mpu_config.num_regions;
for (; i < num_regions; i++) {
_region_init(i, 0, 0, 0);
}
/* Enable MPU */
arc_core_mpu_enable();
return 0;
}
SYS_INIT(arc_mpu_init, PRE_KERNEL_1,
CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
#endif /* ZEPHYR_ARCH_ARC_CORE_MPU_ARC_MPU_V3_INTERNAL_H_ */

View file

@ -9,79 +9,35 @@
#include <arch/arc/v2/mpu/arc_mpu.h>
#include <linker/linker-defs.h>
#ifdef CONFIG_USERSPACE
static struct arc_mpu_region mpu_regions[] = {
#if CONFIG_ARC_MPU_VER == 3
/* Region ICCM */
MPU_REGION_ENTRY("IMAGE ROM",
(u32_t) _image_rom_start,
(u32_t) _image_rom_size,
REGION_FLASH_ATTR),
MPU_REGION_ENTRY("KERNEL MEMORY",
(u32_t) __kernel_ram_start,
(u32_t) __kernel_ram_size,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
#else
#if DT_ICCM_SIZE > 0
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
#endif
#if DT_DCCM_SIZE > 0
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
#endif
#if CONFIG_SRAM_SIZE > 0
/* Region DDR RAM */
MPU_REGION_ENTRY("DDR RAM",
CONFIG_SRAM_BASE_ADDRESS,
CONFIG_SRAM_SIZE * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR |
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE),
#endif
#endif /* ARC_MPU_VER == 3 */
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
};
#else /* CONFIG_USERSPACE */
static struct arc_mpu_region mpu_regions[] = {
#if DT_ICCM_SIZE > 0
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
REGION_ROM_ATTR),
#endif
#if DT_DCCM_SIZE > 0
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
REGION_RAM_ATTR),
REGION_KERNEL_RAM_ATTR | REGION_DYNAMIC),
#endif
#if CONFIG_SRAM_SIZE > 0
/* Region DDR RAM */
MPU_REGION_ENTRY("DDR RAM",
CONFIG_SRAM_BASE_ADDRESS,
CONFIG_SRAM_SIZE * 1024,
REGION_ALL_ATTR),
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | AUX_MPU_ATTR_UR |
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE | REGION_DYNAMIC),
#endif
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
REGION_IO_ATTR),
REGION_KERNEL_RAM_ATTR),
};
#endif
struct arc_mpu_config mpu_config = {
.num_regions = ARRAY_SIZE(mpu_regions),

View file

@ -9,63 +9,33 @@
#include <arch/arc/v2/mpu/arc_mpu.h>
#include <linker/linker-defs.h>
#ifdef CONFIG_USERSPACE
static struct arc_mpu_region mpu_regions[] = {
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
REGION_ROM_ATTR),
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
REGION_KERNEL_RAM_ATTR),
/* Region DDR RAM */
MPU_REGION_ENTRY("SRAM",
CONFIG_SRAM_BASE_ADDRESS,
CONFIG_SRAM_SIZE * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR |
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE),
REGION_KERNEL_RAM_ATTR |
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE),
MPU_REGION_ENTRY("FLASH_0",
CONFIG_FLASH_BASE_ADDRESS,
CONFIG_FLASH_SIZE * 1024,
AUX_MPU_RDP_KR |
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE),
REGION_ROM_ATTR),
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
REGION_KERNEL_RAM_ATTR),
};
#else /* CONFIG_USERSPACE */
static struct arc_mpu_region mpu_regions[] = {
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
REGION_RAM_ATTR),
MPU_REGION_ENTRY("FLASH_0",
CONFIG_FLASH_BASE_ADDRESS,
CONFIG_FLASH_SIZE * 1024,
REGION_FLASH_ATTR),
/* Region DDR RAM */
MPU_REGION_ENTRY("SRAM",
CONFIG_SRAM_BASE_ADDRESS,
CONFIG_SRAM_SIZE * 1024,
REGION_ALL_ATTR),
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
REGION_IO_ATTR),
};
#endif
struct arc_mpu_config mpu_config = {
.num_regions = ARRAY_SIZE(mpu_regions),

View file

@ -9,64 +9,27 @@
#include <arch/arc/v2/mpu/arc_mpu.h>
#include <linker/linker-defs.h>
#ifdef CONFIG_USERSPACE
static struct arc_mpu_region mpu_regions[] = {
#if CONFIG_ARC_MPU_VER == 3
/* Region ICCM */
MPU_REGION_ENTRY("IMAGE ROM",
(u32_t) _image_rom_start,
(u32_t) _image_rom_size,
REGION_FLASH_ATTR),
MPU_REGION_ENTRY("KERNEL MEMORY",
(u32_t) __kernel_ram_start,
(u32_t) __kernel_ram_size,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
#else
#if DT_ICCM_SIZE > 0
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
#endif
#if DT_DCCM_SIZE > 0
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
#endif
#endif /* ARC_MPU_VER == 3 */
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR),
};
#else /* CONFIG_USERSPACE */
static struct arc_mpu_region mpu_regions[] = {
#if DT_ICCM_SIZE > 0
/* Region ICCM */
MPU_REGION_ENTRY("ICCM",
DT_ICCM_BASE_ADDRESS,
DT_ICCM_SIZE * 1024,
REGION_FLASH_ATTR),
REGION_ROM_ATTR),
#endif
#if DT_DCCM_SIZE > 0
/* Region DCCM */
MPU_REGION_ENTRY("DCCM",
DT_DCCM_BASE_ADDRESS,
DT_DCCM_SIZE * 1024,
REGION_RAM_ATTR),
REGION_KERNEL_RAM_ATTR | REGION_DYNAMIC),
#endif
/* Region Peripheral */
MPU_REGION_ENTRY("PERIPHERAL",
0xF0000000,
64 * 1024,
REGION_IO_ATTR),
REGION_KERNEL_RAM_ATTR),
};
#endif
struct arc_mpu_config mpu_config = {
.num_regions = ARRAY_SIZE(mpu_regions),

View file

@ -159,34 +159,34 @@ extern "C" {
#ifndef _ASMLANGUAGE
#include <arch/arc/v2/mpu/arc_mpu.h>
#define K_MEM_PARTITION_P_NA_U_NA AUX_MPU_RDP_N
#define K_MEM_PARTITION_P_RW_U_RW (AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RW_U_RO (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RW_U_NA (AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RO_U_RO (AUX_MPU_RDP_UR | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RO_U_NA (AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_NA_U_NA AUX_MPU_ATTR_N
#define K_MEM_PARTITION_P_RW_U_RW (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RW_U_RO (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RW_U_NA (AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RO_U_RO (AUX_MPU_ATTR_UR | AUX_MPU_ATTR_KR)
#define K_MEM_PARTITION_P_RO_U_NA (AUX_MPU_ATTR_KR)
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX (AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_P_RWX_U_RX (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_P_RX_U_RX (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_P_RWX_U_RWX (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_P_RWX_U_RX (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_P_RX_U_RX (AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
attr &= (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW); \
attr &= (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW); \
switch (attr) { \
case (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW): \
case AUX_MPU_RDP_UW: \
case AUX_MPU_RDP_KW: \
case (AUX_MPU_ATTR_UW | AUX_MPU_ATTR_KW): \
case AUX_MPU_ATTR_UW: \
case AUX_MPU_ATTR_KW: \
__is_writable__ = 1; \
break; \
default: \
@ -196,7 +196,7 @@ extern "C" {
__is_writable__; \
})
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
((attr) & (AUX_MPU_RDP_KE | AUX_MPU_RDP_UE))
((attr) & (AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE))
#endif /* _ASMLANGUAGE */

View file

@ -52,6 +52,13 @@ void arc_core_mpu_enable(void);
*/
void arc_core_mpu_disable(void);
/**
* @brief configure the thread's mpu regions
*
* @param thread the target thread
*/
void arc_core_mpu_configure_thread(struct k_thread *thread);
/*
* Before configure the MPU regions, MPU should be disabled
*/
@ -70,52 +77,19 @@ void arc_core_mpu_default(u32_t region_attr);
* @param size size of region
* @param region_attr region attribute
*/
void arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr);
/**
* @brief configure the base address and size for an MPU region
*
* @param type MPU region type
* @param base base address in RAM
* @param size size of the region
*/
void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size);
#endif /* CONFIG_ARC_CORE_MPU */
#if defined(CONFIG_MPU_STACK_GUARD)
/**
* @brief Configure MPU stack guard
*
* This function configures per thread stack guards reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_stack_guard(struct k_thread *thread);
#endif
#if defined(CONFIG_USERSPACE)
void arc_core_mpu_configure_user_context(struct k_thread *thread);
void arc_core_mpu_configure_mem_domain(struct k_thread *thread);
void arc_core_mpu_mem_partition_remove(u32_t part_index);
void arc_core_mpu_configure_mem_partition(u32_t part_index,
struct k_mem_partition *part);
void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain);
void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
u32_t partition_id);
int arc_core_mpu_get_max_domain_partition_regions(void);
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write);
/*
* @brief Configure MPU memory domain
*
* This function configures per thread memory domain reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_mem_domain(struct k_thread *thread);
void configure_mpu_user_context(struct k_thread *thread);
#endif
void configure_mpu_thread(struct k_thread *thread);

View file

@ -8,34 +8,49 @@
#define AUX_MPU_RDP_UE 0x008 /* allow user execution */
#define AUX_MPU_RDP_UW 0x010 /* allow user write */
#define AUX_MPU_RDP_UR 0x020 /* allow user read */
#define AUX_MPU_RDP_KE 0x040 /* only allow kernel execution */
#define AUX_MPU_RDP_KW 0x080 /* only allow kernel write */
#define AUX_MPU_RDP_KR 0x100 /* only allow kernel read */
#define AUX_MPU_RDP_S 0x8000 /* secure */
#define AUX_MPU_RDP_N 0x0000 /* normal */
#define AUX_MPU_ATTR_UE 0x008 /* allow user execution */
#define AUX_MPU_ATTR_UW 0x010 /* allow user write */
#define AUX_MPU_ATTR_UR 0x020 /* allow user read */
#define AUX_MPU_ATTR_KE 0x040 /* only allow kernel execution */
#define AUX_MPU_ATTR_KW 0x080 /* only allow kernel write */
#define AUX_MPU_ATTR_KR 0x100 /* only allow kernel read */
#define AUX_MPU_ATTR_S 0x8000 /* secure */
#define AUX_MPU_ATTR_N 0x0000 /* normal */
/*
* a region is dynamic means it can be split into sub regions.
* This attribute is meaningful for ARC MPUv3 which does not support mpu
* entry overlap. For ARC MPUv2, this attribute will be ignored as it
* supports mpu overlap in hardware.
*/
#define REGION_DYNAMIC 0x800 /* dynamic flag */
/* Some helper defines for common regions */
#define REGION_RAM_ATTR \
(AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define REGION_FLASH_ATTR \
(AUX_MPU_RDP_UE | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_KR)
#define REGION_KERNEL_RAM_ATTR \
(AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_KERNEL_ROM_ATTR \
(AUX_MPU_ATTR_KE | AUX_MPU_ATTR_KR)
#define REGION_RAM_ATTR \
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_ROM_ATTR \
(AUX_MPU_ATTR_UE | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_KR)
#define REGION_IO_ATTR \
(AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR)
#define REGION_ALL_ATTR \
(AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
(AUX_MPU_ATTR_UW | AUX_MPU_ATTR_UR | \
AUX_MPU_ATTR_KW | AUX_MPU_ATTR_KR | \
AUX_MPU_ATTR_KE | AUX_MPU_ATTR_UE)
#define REGION_32B 0x200