arch: arc: Add the intial support of memory domain

Refering the ARM's implementation, the initial support of memory
domain in ARC is added:
* changes in MPU drivers
* changes in Kconfig
* codes to configure memory domain during thread swap
* changes in linker script template
* memory domain related macro definitions

the commited codes are simply tested through
samples/mpu/mem_domain_apis_test.

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2017-12-20 16:48:45 +08:00 committed by Andrew Boie
commit 9a40bf6b7e
9 changed files with 634 additions and 92 deletions

View file

@ -264,6 +264,14 @@ _firq_reschedule:
pop_s r2
#endif
#ifdef CONFIG_USERSPACE
push_s r2
mov r0, r2
bl configure_mpu_mem_domain
pop_s r2
#endif
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _firq_return_from_rirq

View file

@ -9,6 +9,7 @@
#include <kernel.h>
#include <soc.h>
#include <arch/arc/v2/mpu/arc_core_mpu.h>
#include <logging/sys_log.h>
#if defined(CONFIG_MPU_STACK_GUARD)
/*
@ -28,3 +29,61 @@ void configure_mpu_stack_guard(struct k_thread *thread)
arc_core_mpu_enable();
}
#endif
#if defined(CONFIG_USERSPACE)
/*
* @brief Configure MPU memory domain
*
* This function configures per thread memory domain reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_mem_domain(struct k_thread *thread)
{
SYS_LOG_DBG("configure thread %p's domain", thread);
arc_core_mpu_disable();
arc_core_mpu_configure_mem_domain(thread->mem_domain_info.mem_domain);
arc_core_mpu_enable();
}
int _arch_mem_domain_max_partitions_get(void)
{
return arc_core_mpu_get_max_domain_partition_regions();
}
/*
* Reset MPU region for a single memory partition
*/
void _arch_mem_domain_partition_remove(struct k_mem_domain *domain,
u32_t partition_id)
{
ARG_UNUSED(domain);
arc_core_mpu_disable();
arc_core_mpu_mem_partition_remove(partition_id);
arc_core_mpu_enable();
}
/*
* Destroy MPU regions for the mem domain
*/
void _arch_mem_domain_destroy(struct k_mem_domain *domain)
{
ARG_UNUSED(domain);
arc_core_mpu_disable();
arc_core_mpu_configure_mem_domain(NULL);
arc_core_mpu_enable();
}
/*
* Validate the given buffer is user accessible or not
*/
int _arch_buffer_validate(void *addr, size_t size, int write)
{
return arc_core_mpu_buffer_validate(addr, size, write);
}
#endif

View file

@ -47,7 +47,7 @@
/**
* @brief Get the number of supported mpu regions
* @brief Get the number of supported MPU regions
*
*/
static inline u8_t _get_num_regions(void)
@ -59,26 +59,18 @@ static inline u8_t _get_num_regions(void)
return (u8_t)num;
}
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct parameter set.
*/
static inline u32_t _get_region_attr_by_type(u32_t type, u32_t size)
static inline u32_t _get_region_attr_by_type(u32_t type)
{
switch (type) {
case THREAD_STACK_REGION:
return 0;
case THREAD_STACK_GUARD_REGION:
/* no Write and Execute to guard region */
#if CONFIG_ARC_MPU_VER == 2
u8_t bits = find_msb_set(size) + 1;
return AUX_MPU_RDP_REGION_SIZE(bits) |
AUX_MPU_RDP_UR | AUX_MPU_RDP_KR;
#elif CONFIG_ARC_MPU_VER == 3
return AUX_MPU_RDP_UR | AUX_MPU_RDP_KR;
#endif
default:
/* Size 0 region */
return 0;
@ -90,15 +82,23 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
{
/* ARC MPU version 2 and version 3 have different aux reg interface */
#if CONFIG_ARC_MPU_VER == 2
u8_t bits = find_msb_set(size) + 1;
u8_t bits = find_msb_set(size) - 1;
index = 2 * index;
if (bits < ARC_FEATURE_MPU_ALIGNMENT_BITS) {
bits = ARC_FEATURE_MPU_ALIGNMENT_BITS;
}
region_addr |= (AUX_MPU_RDP_REGION_SIZE(bits) |
AUX_MPU_RDB_VALID_MASK);
if ((1 << bits) < size) {
bits++;
}
if (size > 0) {
region_attr |= AUX_MPU_RDP_REGION_SIZE(bits);
region_addr |= AUX_MPU_RDB_VALID_MASK;
} else {
region_addr = 0;
}
_arc_v2_aux_reg_write(_ARC_V2_MPU_RDP0 + index, region_attr);
_arc_v2_aux_reg_write(_ARC_V2_MPU_RDB0 + index, region_addr);
@ -109,11 +109,12 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
size = (1 << ARC_FEATURE_MPU_ALIGNMENT_BITS);
}
/* all mpu regions SID are the same: 1, the default SID */
/* all MPU regions SID are the same: 1, the default SID */
if (region_attr) {
region_attr |= (AUX_MPU_RDB_VALID_MASK | AUX_MPU_RDP_S |
AUX_MPU_RPER_SID1);
}
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, index);
_arc_v2_aux_reg_write(_ARC_V2_MPU_RSTART, region_addr);
_arc_v2_aux_reg_write(_ARC_V2_MPU_REND,
@ -122,7 +123,6 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
#endif
}
#if CONFIG_ARC_MPU_VER == 3
static inline s32_t _mpu_probe(u32_t addr)
{
@ -140,6 +140,136 @@ static inline s32_t _mpu_probe(u32_t addr)
}
#endif
/**
* This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/
static inline u32_t _get_region_index_by_type(u32_t type)
{
/*
* The new MPU regions are allocated per type after the statically
* configured regions. The type is one-indexed rather than
* zero-indexed.
*
* For ARC MPU v2, the smaller index has higher priority, so the
* index is allocated in reverse order. Static regions start from
* the biggest index, then thread related regions.
*
* For ARC MPU v3, each index has the same priority, so the index is
* allocated from small to big. Static regions start from 0, then
* thread related regions.
*/
switch (type) {
#if CONFIG_ARC_MPU_VER == 2
case THREAD_STACK_REGION:
return _get_num_regions() - mpu_config.num_regions - type;
case THREAD_STACK_GUARD_REGION:
return _get_num_regions() - mpu_config.num_regions - type;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return _get_num_regions() - mpu_config.num_regions - type;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return _get_num_regions() - mpu_config.num_regions - type + 1;
#endif
#elif CONFIG_ARC_MPU_VER == 3
case THREAD_STACK_REGION:
return mpu_config.num_regions + type - 1;
case THREAD_STACK_GUARD_REGION:
return mpu_config.num_regions + type - 1;
case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD)
return mpu_config.num_regions + type - 1;
#else
/*
* Start domain partition region from stack guard region
* since stack guard is not enabled.
*/
return mpu_config.num_regions + type - 2;
#endif
#endif
default:
__ASSERT(0, "Unsupported type");
return 0;
}
}
/**
* This internal function checks if region is enabled or not
*/
static inline int _is_enabled_region(u32_t r_index)
{
#if CONFIG_ARC_MPU_VER == 2
return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
#elif CONFIG_ARC_MPU_VER == 3
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
return ((_arc_v2_aux_reg_read(_ARC_V2_MPU_RPER) &
AUX_MPU_RDB_VALID_MASK) == AUX_MPU_RDB_VALID_MASK);
#endif
}
/**
* This internal function check if the given buffer in in the region
*/
static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size)
{
#if CONFIG_ARC_MPU_VER == 2
u32_t r_addr_start;
u32_t r_addr_end;
u32_t r_size_lshift;
r_addr_start = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& (~AUX_MPU_RDB_VALID_MASK);
r_size_lshift = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDB0 + 2 * r_index)
& AUX_MPU_RDP_ATTR_MASK;
r_size_lshift = (r_size_lshift & 0x3) | ((r_size_lshift >> 7) & 0x1C);
r_addr_end = r_addr_start + (1 << (r_size_lshift + 1));
if (start >= r_addr_start && (start + size) < r_addr_end) {
return 1;
}
#elif CONFIG_ARC_MPU_VER == 3
if ((r_index == _mpu_probe(start)) &&
(r_index == _mpu_probe(start + size))) {
return 1;
}
#endif
return 0;
}
/**
* This internal function check if the region is user accessible or not
*/
static inline int _is_user_accessible_region(u32_t r_index, int write)
{
u32_t r_ap;
#if CONFIG_ARC_MPU_VER == 2
r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RDP0 + 2 * r_index);
#elif CONFIG_ARC_MPU_VER == 3
_arc_v2_aux_reg_write(_ARC_V2_MPU_INDEX, r_index);
r_ap = _arc_v2_aux_reg_read(_ARC_V2_MPU_RPER);
#endif
r_ap &= AUX_MPU_RDP_ATTR_MASK;
if (write) {
return ((r_ap & (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW)) ==
(AUX_MPU_RDP_UW | AUX_MPU_RDP_KW));
}
return ((r_ap & (AUX_MPU_RDP_UR | AUX_MPU_RDP_KR)) ==
(AUX_MPU_RDP_UR | AUX_MPU_RDP_KR));
}
/* ARC Core MPU Driver API Implementation for ARC MPU */
/**
@ -153,7 +283,7 @@ void arc_core_mpu_enable(void)
_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) | AUX_MPU_EN_ENABLE);
/* MPU is always enabled, use default region to
* simulate mpu enable
* simulate MPU enable
*/
#elif CONFIG_ARC_MPU_VER == 3
arc_core_mpu_default(0);
@ -171,13 +301,12 @@ void arc_core_mpu_disable(void)
_arc_v2_aux_reg_read(_ARC_V2_MPU_EN) & AUX_MPU_EN_DISABLE);
#elif CONFIG_ARC_MPU_VER == 3
/* MPU is always enabled, use default region to
* simulate mpu disable
* simulate MPU disable
*/
arc_core_mpu_default(REGION_ALL_ATTR);
#endif
}
/**
* @brief configure the base address and size for an MPU region
*
@ -187,10 +316,14 @@ void arc_core_mpu_disable(void)
*/
void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size)
{
u32_t region_index;
u32_t region_attr;
u32_t region_index = _get_region_index_by_type(type);
u32_t region_attr = _get_region_attr_by_type(type);
SYS_LOG_DBG("Region info: 0x%x 0x%x", base, size);
if (region_attr == 0) {
return;
}
/*
* The new MPU regions are allocated per type before
* the statically configured regions.
@ -200,38 +333,12 @@ void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size)
* For ARC MPU v2, MPU regions can be overlapped, smaller
* region index has higher priority.
*/
region_index = _get_num_regions() - mpu_config.num_regions;
if (type > region_index) {
return;
}
region_index -= type;
region_attr = _get_region_attr_by_type(type, size);
if (region_attr == 0) {
return;
}
_region_init(region_index, base, size, region_attr);
#elif CONFIG_ARC_MPU_VER == 3
static s32_t last_index;
s32_t index;
u32_t last_region = _get_num_regions() - 1;
region_index = mpu_config.num_regions + type - 1;
if (region_index > last_region) {
return;
}
region_attr = _get_region_attr_by_type(type, size);
if (region_attr == 0) {
return;
}
/* use hardware probe to find the region maybe split.
* another way is to look up the mpu_config.mpu_regions
@ -290,9 +397,8 @@ void arc_core_mpu_default(u32_t region_attr)
_arc_v2_aux_reg_write(_ARC_V2_MPU_EN, region_attr | val);
}
/**
* @brief configure the mpu region
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
@ -310,6 +416,161 @@ void arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
_region_init(index, base, size, region_attr);
}
#if defined(CONFIG_USERSPACE)
/**
* @brief configure MPU regions for the memory partitions of the memory domain
*
* @param mem_domain memory domain that thread belongs to
*/
void arc_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain)
{
s32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions;
struct k_mem_partition *pparts;
if (mem_domain) {
SYS_LOG_DBG("configure domain: %p", mem_domain);
num_partitions = mem_domain->num_partitions;
pparts = mem_domain->partitions;
} else {
SYS_LOG_DBG("disable domain partition regions");
num_partitions = 0;
pparts = NULL;
}
#if CONFIG_ARC_MPU_VER == 2
for (; region_index >= 0; region_index--) {
#elif CONFIG_ARC_MPU_VER == 3
/*
* Note: For ARC MPU v3, overlapping is not allowed, so the following
* partitions/region may be overlapped with each other or regions in
* mpu_config. This will cause EV_MachineCheck exception (ECR = 0x030600).
* Although split mechanism is used for stack guard region to avoid this,
* it doesn't work for memory domain, because the dynamic region numbers.
* So be careful to avoid the overlap situation.
*/
for (; region_index < _get_num_regions() - 1; region_index++) {
#endif
if (num_partitions && pparts->size) {
SYS_LOG_DBG("set region 0x%x 0x%x 0x%x",
region_index, pparts->start, pparts->size);
_region_init(region_index, pparts->start, pparts->size,
pparts->attr);
num_partitions--;
} else {
SYS_LOG_DBG("disable region 0x%x", region_index);
/* Disable region */
_region_init(region_index, 0, 0, 0);
}
pparts++;
}
}
/**
* @brief configure MPU region for a single memory partition
*
* @param part_index memory partition index
* @param part memory partition info
*/
void arc_core_mpu_configure_mem_partition(u32_t part_index,
struct k_mem_partition *part)
{
u32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
SYS_LOG_DBG("configure partition index: %u", part_index);
if (part) {
SYS_LOG_DBG("set region 0x%x 0x%x 0x%x",
region_index + part_index, part->start, part->size);
_region_init(region_index, part->start, part->size,
part->attr);
} else {
SYS_LOG_DBG("disable region 0x%x", region_index + part_index);
/* Disable region */
_region_init(region_index + part_index, 0, 0, 0);
}
}
/**
* @brief Reset MPU region for a single memory partition
*
* @param part_index memory partition index
*/
void arc_core_mpu_mem_partition_remove(u32_t part_index)
{
u32_t region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
SYS_LOG_DBG("disable region 0x%x", region_index + part_index);
/* Disable region */
_region_init(region_index + part_index, 0, 0, 0);
}
/**
* @brief get the maximum number of free regions for memory domain partitions
*/
int arc_core_mpu_get_max_domain_partition_regions(void)
{
#if CONFIG_ARC_MPU_VER == 2
return _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
#elif CONFIG_ARC_MPU_VER == 3
/*
* Subtract the start of domain partition regions and 1 reserved region
* from total regions will get the maximum number of free regions for
* memory domain partitions.
*/
return _get_num_regions() -
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) - 1;
#endif
}
/**
* @brief validate the given buffer is user accessible or not
*/
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
{
s32_t r_index;
/*
* For ARC MPU v2, smaller region number takes priority.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*
* For ARC MPU v3, overlapping is not supported.
* we can stop the iteration immediately once we find the
* matched region that grants permission or denies access.
*/
#if CONFIG_ARC_MPU_VER == 2
for (r_index = 0; r_index < _get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) {
continue;
}
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
#elif CONFIG_ARC_MPU_VER == 3
r_index = _mpu_probe((u32_t)addr);
/* match and the area is in one region */
if (r_index >= 0 && r_index == _mpu_probe((u32_t)addr + size)) {
if (_is_user_accessible_region(r_index, write)) {
return 0;
} else {
return -EPERM;
}
}
#endif
return -EPERM;
}
#endif /* CONFIG_USERSPACE */
/* ARC MPU Driver Initial Setup */
/*
@ -337,8 +598,8 @@ static void _arc_mpu_config(void)
u32_t r_index;
/*
* the MPU regions are filled in the reverse order.
* According to ARCv2 ISA, the mpu region with smaller
* index has higher priority. The static background mpu
* According to ARCv2 ISA, the MPU region with smaller
* index has higher priority. The static background MPU
* regions in mpu_config will be in the bottom. Then
* the special type regions will be above.
*
@ -346,16 +607,16 @@ static void _arc_mpu_config(void)
r_index = num_regions - mpu_config.num_regions;
/* clear all the regions first */
for (i = 0; i < num_regions; i++) {
for (i = 0; i < r_index; i++) {
_region_init(i, 0, 0, 0);
}
/* configure the static regions */
for (r_index = 0; i < num_regions; i++) {
_region_init(i,
mpu_config.mpu_regions[r_index].base,
mpu_config.mpu_regions[r_index].size,
mpu_config.mpu_regions[r_index].attr);
for (i = 0; i < mpu_config.num_regions; i++) {
_region_init(r_index,
mpu_config.mpu_regions[i].base,
mpu_config.mpu_regions[i].size,
mpu_config.mpu_regions[i].attr);
r_index++;
}

View file

@ -168,6 +168,13 @@ _rirq_common_interrupt_swap:
pop_s r2
#endif
#ifdef CONFIG_USERSPACE
push_s r2
mov r0, r2
bl configure_mpu_mem_domain
pop_s r2
#endif
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq

View file

@ -117,6 +117,13 @@ SECTION_FUNC(TEXT, __swap)
pop_s r2
#endif
#ifdef CONFIG_USERSPACE
push_s r2
mov r0, r2
bl configure_mpu_mem_domain
pop_s r2
#endif
ld_s r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _swap_return_from_rirq

View file

@ -18,3 +18,4 @@ CONFIG_UART_NS16550_PORT_1=y
CONFIG_UART_NS16550_PORT_0=n
CONFIG_UART_INTERRUPT_DRIVEN=y
CONFIG_GPIO=y
CONFIG_ARC_MPU_ENABLE=y

View file

@ -78,7 +78,128 @@ extern "C" {
#define _ARCH_THREAD_STACK_BUFFER(sym) ((char *)(sym + STACK_GUARD_SIZE))
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_MPU
#ifndef _ASMLANGUAGE
#include <arch/arc/v2/mpu/arc_mpu.h>
#define K_MEM_PARTITION_P_NA_U_NA AUX_MPU_RDP_N
#define K_MEM_PARTITION_P_RW_U_RW (AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RW_U_RO (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RW_U_NA (AUX_MPU_RDP_KW | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RO_U_RO (AUX_MPU_RDP_UR | AUX_MPU_RDP_KR)
#define K_MEM_PARTITION_P_RO_U_NA (AUX_MPU_RDP_KR)
/* Execution-allowed attributes */
#define K_MEM_PARTITION_P_RWX_U_RWX (AUX_MPU_RDP_UW | AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_P_RWX_U_RX (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KW | AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_P_RX_U_RX (AUX_MPU_RDP_UR | \
AUX_MPU_RDP_KR | \
AUX_MPU_RDP_KE | AUX_MPU_RDP_UE)
#define K_MEM_PARTITION_IS_WRITABLE(attr) \
({ \
int __is_writable__; \
attr &= (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW); \
switch (attr) { \
case (AUX_MPU_RDP_UW | AUX_MPU_RDP_KW): \
case AUX_MPU_RDP_UW: \
case AUX_MPU_RDP_KW: \
__is_writable__ = 1; \
break; \
default: \
__is_writable__ = 0; \
break; \
} \
__is_writable__; \
})
#define K_MEM_PARTITION_IS_EXECUTABLE(attr) \
((attr) & (AUX_MPU_RDP_KE | AUX_MPU_RDP_UE))
#endif /* _ASMLANGUAGE */
#if CONFIG_ARC_MPU_VER == 2
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT_MSG(!(((size) & ((size) - 1))) && (size) >= STACK_ALIGN \
&& !((u32_t)(start) & ((size) - 1)), \
"the size of the partition must be power of 2" \
" and greater than or equal to the mpu adddress alignment." \
"start address of the partition must align with size.")
#elif CONFIG_ARC_MPU_VER == 3
#define _ARCH_MEM_PARTITION_ALIGN_CHECK(start, size) \
BUILD_ASSERT_MSG((size) % STACK_ALIGN == 0 && (size) >= STACK_ALIGN \
&& (u32_t)(start) % STACK_ALIGN == 0, \
"the size of the partition must align with 32" \
" and greater than or equal to 32." \
"start address of the partition must align with 32.")
#endif
#endif /* CONFIG_ARC_MPU*/
#endif /* CONFIG_USERSPACE */
#ifndef _ASMLANGUAGE
/* Typedef for the k_mem_partition attribute*/
typedef u32_t k_mem_partition_attr_t;
#endif /* _ASMLANGUAGE */
#ifdef CONFIG_USERSPACE
#ifndef _ASMLANGUAGE
/* Syscall invocation macros. arc-specific machine constraints used to ensure
* args land in the proper registers. Currently, they are all stub functions
* just for enabling CONFIG_USERSPACE on arc w/o errors.
*/
static inline u32_t _arch_syscall_invoke6(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t arg6,
u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke5(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t arg5, u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke4(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t arg4, u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke3(u32_t arg1, u32_t arg2, u32_t arg3,
u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke2(u32_t arg1, u32_t arg2, u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke1(u32_t arg1, u32_t call_id)
{
return 0;
}
static inline u32_t _arch_syscall_invoke0(u32_t call_id)
{
return 0;
}
static inline int _arch_is_user_context(void)
{
return 0;
}
#endif /* _ASMLANGUAGE */
#endif /* CONFIG_USERSPACE */
#ifdef __cplusplus
}
#endif

View file

@ -21,6 +21,8 @@
#include <linker/linker-defs.h>
#include <linker/linker-tool.h>
#define KOBJECT_TEXT_AREA 256
/* physical address of RAM */
#ifdef CONFIG_HARVARD
#define ROMABLE_REGION ICCM
@ -81,9 +83,12 @@ SECTIONS {
*(".text.*")
*(.gnu.linkonce.t.*)
_image_text_end = .;
#include <linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_text_end = .;
_image_rodata_start = .;
#include <linker/common-rom.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,) {
@ -98,34 +103,49 @@ SECTIONS {
#include <custom-rodata.ld>
#endif
#include <linker/kobject-rom.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_rodata_end = .;
_image_rom_end = .;
__data_rom_start = ALIGN(4); /* XIP imaged DATA ROM start addr */
GROUP_END(ROMABLE_REGION)
GROUP_START(RAMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,) {
/* when XIP, .text is in ROM, but vector table must be at start of .data */
#ifdef CONFIG_APPLICATION_MEMORY
SECTION_DATA_PROLOGUE(_APP_DATA_SECTION_NAME, (OPTIONAL),)
{
__app_ram_start = .;
__app_data_ram_start = .;
_image_ram_start = .;
__data_ram_start = .;
*(.data)
*(".data.*")
#ifdef CONFIG_CUSTOM_RWDATA_LD
/* Located in project source directory */
#include <custom-rwdata.ld>
#endif
APP_INPUT_SECTION(.data)
APP_INPUT_SECTION(".data.*")
__app_data_ram_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
#include <linker/common-ram.ld>
__app_data_rom_start = LOADADDR(_APP_DATA_SECTION_NAME);
__data_ram_end = .;
SECTION_PROLOGUE(_APP_BSS_SECTION_NAME, (NOLOAD OPTIONAL),)
{
__app_bss_start = .;
APP_INPUT_SECTION(.bss)
APP_INPUT_SECTION(".bss.*")
APP_INPUT_SECTION(COMMON)
__app_bss_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__app_bss_num_words = (__app_bss_end - __app_bss_start) >> 2;
SECTION_PROLOGUE(_APP_NOINIT_SECTION_NAME, (NOLOAD OPTIONAL),)
{
APP_INPUT_SECTION(.noinit)
APP_INPUT_SECTION(".noinit.*")
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
__app_ram_end = .;
#endif /* CONFIG_APPLICATION_MEMORY */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) {
/*
@ -134,9 +154,16 @@ SECTIONS {
*/
. = ALIGN(4);
__bss_start = .;
*(.bss)
*(".bss.*")
COMMON_SYMBOLS
#ifndef CONFIG_APPLICATION_MEMORY
_image_ram_start = .;
#endif
__kernel_ram_start = .;
KERNEL_INPUT_SECTION(.bss)
KERNEL_INPUT_SECTION(".bss.*")
KERNEL_INPUT_SECTION(COMMON)
*(".kernel_bss.*")
/*
* BSP clears this memory in words only and doesn't clear any
* potential left over bytes.
@ -149,27 +176,43 @@ SECTIONS {
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(".noinit.*")
KERNEL_INPUT_SECTION(.noinit)
KERNEL_INPUT_SECTION(".noinit.*")
*(".kernel_noinit.*")
} GROUP_LINK_IN(RAMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,) {
/* when XIP, .text is in ROM, but vector table must be at start of .data */
__data_ram_start = .;
KERNEL_INPUT_SECTION(.data)
KERNEL_INPUT_SECTION(".data.*")
*(".kernel.*")
#ifdef CONFIG_CUSTOM_RWDATA_LD
/* Located in project source directory */
#include <custom-rwdata.ld>
#endif
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/kobject.ld>
__data_ram_end = .;
/* Define linker symbols */
_image_ram_end = .;
_end = .; /* end of image */
__kernel_ram_end = .;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
GROUP_END(RAMABLE_REGION)
/* Data Closely Coupled Memory (DCCM) */
GROUP_START(DCCM)
GROUP_END(DCCM)
SECTION_PROLOGUE(initlevel_error, (OPTIONAL),)
{
DEVICE_INIT_UNDEFINED_SECTION()
}
ASSERT(SIZEOF(initlevel_error) == 0, "Undefined initialization levels used.")
#ifdef CONFIG_CUSTOM_SECTIONS_LD
/* Located in project source directory */
#include <custom-sections.ld>

View file

@ -16,7 +16,7 @@ extern "C" {
* attributes.
*
* Each MPU is different and has a different set of attributes, hence instead
* of having the attributes at this level the arm_mpu_core defines the intent
* of having the attributes at this level the arc_mpu_core defines the intent
* types.
* An intent type (i.e. THREAD_STACK_GUARD) can correspond to a different set
* of operations and attributes for each MPU and it is responsibility of the
@ -27,9 +27,10 @@ extern "C" {
* If one of the operations corresponding to an intent fails the error has to
* be managed inside the MPU driver and not escalated.
*/
/* Thread Stack Region Intent Type */
/* Thread Region Intent Type */
#define THREAD_STACK_REGION 0x1
#define THREAD_STACK_GUARD_REGION 0x2
#define THREAD_DOMAIN_PARTITION_REGION 0x3
#if defined(CONFIG_ARC_CORE_MPU)
/* ARC Core MPU Driver API */
@ -50,7 +51,7 @@ void arc_core_mpu_enable(void);
void arc_core_mpu_disable(void);
/*
* Before configure the MPU regions, mpu should be disabled
* Before configure the MPU regions, MPU should be disabled
*/
/**
* @brief configure the default region
@ -60,7 +61,7 @@ void arc_core_mpu_disable(void);
void arc_core_mpu_default(u32_t region_attr);
/**
* @brief configure the mpu region
* @brief configure the MPU region
*
* @param index MPU region index
* @param base base address
@ -80,6 +81,40 @@ void arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
void arc_core_mpu_configure(u8_t type, u32_t base, u32_t size);
#endif /* CONFIG_ARC_CORE_MPU */
#if defined(CONFIG_MPU_STACK_GUARD)
/**
* @brief Configure MPU stack guard
*
* This function configures per thread stack guards reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_stack_guard(struct k_thread *thread);
#endif
#if defined(CONFIG_USERSPACE)
void arc_core_mpu_configure_mem_domain(struct k_mem_domain *mem_domain);
void arc_core_mpu_mem_partition_remove(u32_t part_index);
void arc_core_mpu_configure_mem_partition(u32_t part_index,
struct k_mem_partition *part);
int arc_core_mpu_get_max_domain_partition_regions(void);
int arc_core_mpu_buffer_validate(void *addr, size_t size, int write);
/*
* @brief Configure MPU memory domain
*
* This function configures per thread memory domain reprogramming the MPU.
* The functionality is meant to be used during context switch.
*
* @param thread thread info data structure.
*/
void configure_mpu_mem_domain(struct k_thread *thread);
#endif
#ifdef __cplusplus
}
#endif