arch: Rename reserved function names

Rename reserved function names in arch/ subdirectory. The Python
script gen_priv_stacks.py was updated to follow the 'z_' prefix
naming.

Signed-off-by: Patrik Flykt <patrik.flykt@intel.com>
This commit is contained in:
Patrik Flykt 2019-03-14 09:20:46 -06:00 committed by Anas Nashif
commit 7c0a245d32
109 changed files with 556 additions and 558 deletions

View file

@ -151,9 +151,9 @@ void _Fault(NANO_ESF *esf)
} }
#endif #endif
vector = _ARC_V2_ECR_VECTOR(ecr); vector = Z_ARC_V2_ECR_VECTOR(ecr);
code = _ARC_V2_ECR_CODE(ecr); code = Z_ARC_V2_ECR_CODE(ecr);
parameter = _ARC_V2_ECR_PARAMETER(ecr); parameter = Z_ARC_V2_ECR_PARAMETER(ecr);
/* exception raised by kernel */ /* exception raised by kernel */

View file

@ -19,7 +19,7 @@
#include <syscall.h> #include <syscall.h>
GTEXT(_Fault) GTEXT(_Fault)
GTEXT(_do_kernel_oops) GTEXT(z_do_kernel_oops)
GTEXT(__reset) GTEXT(__reset)
GTEXT(__memory_error) GTEXT(__memory_error)
GTEXT(__instruction_error) GTEXT(__instruction_error)
@ -35,7 +35,7 @@ GTEXT(__ev_div_zero)
GTEXT(__ev_dc_error) GTEXT(__ev_dc_error)
GTEXT(__ev_maligned) GTEXT(__ev_maligned)
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload); GTEXT(z_irq_do_offload);
#endif #endif
GDATA(exc_nest_count) GDATA(exc_nest_count)
@ -215,7 +215,7 @@ _do_non_syscall_trap:
exc_nest_handle: exc_nest_handle:
push_s r0 push_s r0
jl _irq_do_offload jl z_irq_do_offload
pop sp pop sp

View file

@ -40,7 +40,7 @@ void z_arch_irq_enable(unsigned int irq)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
_arc_v2_irq_unit_int_enable(irq); z_arc_v2_irq_unit_int_enable(irq);
irq_unlock(key); irq_unlock(key);
} }
@ -57,7 +57,7 @@ void z_arch_irq_disable(unsigned int irq)
{ {
unsigned int key = irq_lock(); unsigned int key = irq_lock();
_arc_v2_irq_unit_int_disable(irq); z_arc_v2_irq_unit_int_disable(irq);
irq_unlock(key); irq_unlock(key);
} }
@ -83,7 +83,7 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
__ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS, __ASSERT(prio < CONFIG_NUM_IRQ_PRIO_LEVELS,
"invalid priority %d for irq %d", prio, irq); "invalid priority %d for irq %d", prio, irq);
_arc_v2_irq_unit_prio_set(irq, prio); z_arc_v2_irq_unit_prio_set(irq, prio);
irq_unlock(key); irq_unlock(key);
} }

View file

@ -15,7 +15,7 @@ static irq_offload_routine_t offload_routine;
static void *offload_param; static void *offload_param;
/* Called by trap_s exception handler */ /* Called by trap_s exception handler */
void _irq_do_offload(void) void z_irq_do_offload(void)
{ {
offload_routine(offload_param); offload_routine(offload_param);
} }

View file

@ -21,7 +21,7 @@ LOG_MODULE_REGISTER(mpu);
* @brief Get the number of supported MPU regions * @brief Get the number of supported MPU regions
* *
*/ */
static inline u8_t _get_num_regions(void) static inline u8_t get_num_regions(void)
{ {
u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD); u32_t num = z_arc_v2_aux_reg_read(_ARC_V2_MPU_BUILD);
@ -34,7 +34,7 @@ static inline u8_t _get_num_regions(void)
* This internal function is utilized by the MPU driver to parse the intent * This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct parameter set. * type (i.e. THREAD_STACK_REGION) and return the correct parameter set.
*/ */
static inline u32_t _get_region_attr_by_type(u32_t type) static inline u32_t get_region_attr_by_type(u32_t type)
{ {
switch (type) { switch (type) {
case THREAD_STACK_USER_REGION: case THREAD_STACK_USER_REGION:
@ -52,7 +52,6 @@ static inline u32_t _get_region_attr_by_type(u32_t type)
} }
} }
#if CONFIG_ARC_MPU_VER == 2 #if CONFIG_ARC_MPU_VER == 2
#include "arc_mpu_v2_internal.h" #include "arc_mpu_v2_internal.h"
#elif CONFIG_ARC_MPU_VER == 3 #elif CONFIG_ARC_MPU_VER == 3

View file

@ -57,7 +57,7 @@ static inline void _region_init(u32_t index, u32_t region_addr, u32_t size,
* This internal function is utilized by the MPU driver to parse the intent * This internal function is utilized by the MPU driver to parse the intent
* type (i.e. THREAD_STACK_REGION) and return the correct region index. * type (i.e. THREAD_STACK_REGION) and return the correct region index.
*/ */
static inline int _get_region_index_by_type(u32_t type) static inline int get_region_index_by_type(u32_t type)
{ {
/* /*
* The new MPU regions are allocated per type after the statically * The new MPU regions are allocated per type after the statically
@ -71,21 +71,21 @@ static inline int _get_region_index_by_type(u32_t type)
*/ */
switch (type) { switch (type) {
case THREAD_STACK_USER_REGION: case THREAD_STACK_USER_REGION:
return _get_num_regions() - mpu_config.num_regions return get_num_regions() - mpu_config.num_regions
- THREAD_STACK_REGION; - THREAD_STACK_REGION;
case THREAD_STACK_REGION: case THREAD_STACK_REGION:
case THREAD_APP_DATA_REGION: case THREAD_APP_DATA_REGION:
case THREAD_STACK_GUARD_REGION: case THREAD_STACK_GUARD_REGION:
return _get_num_regions() - mpu_config.num_regions - type; return get_num_regions() - mpu_config.num_regions - type;
case THREAD_DOMAIN_PARTITION_REGION: case THREAD_DOMAIN_PARTITION_REGION:
#if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_MPU_STACK_GUARD)
return _get_num_regions() - mpu_config.num_regions - type; return get_num_regions() - mpu_config.num_regions - type;
#else #else
/* /*
* Start domain partition region from stack guard region * Start domain partition region from stack guard region
* since stack guard is not enabled. * since stack guard is not enabled.
*/ */
return _get_num_regions() - mpu_config.num_regions - type + 1; return get_num_regions() - mpu_config.num_regions - type + 1;
#endif #endif
default: default:
__ASSERT(0, "Unsupported type"); __ASSERT(0, "Unsupported type");
@ -154,8 +154,8 @@ static inline bool _is_user_accessible_region(u32_t r_index, int write)
*/ */
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size) static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{ {
s32_t region_index = _get_region_index_by_type(type); s32_t region_index = get_region_index_by_type(type);
u32_t region_attr = _get_region_attr_by_type(type); u32_t region_attr = get_region_attr_by_type(type);
LOG_DBG("Region info: 0x%x 0x%x", base, size); LOG_DBG("Region info: 0x%x 0x%x", base, size);
@ -283,7 +283,7 @@ void arc_core_mpu_default(u32_t region_attr)
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size, int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr) u32_t region_attr)
{ {
if (index >= _get_num_regions()) { if (index >= get_num_regions()) {
return -EINVAL; return -EINVAL;
} }
@ -304,7 +304,7 @@ int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
void arc_core_mpu_configure_mem_domain(struct k_thread *thread) void arc_core_mpu_configure_mem_domain(struct k_thread *thread)
{ {
int region_index = int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
u32_t num_partitions; u32_t num_partitions;
struct k_mem_partition *pparts; struct k_mem_partition *pparts;
struct k_mem_domain *mem_domain = NULL; struct k_mem_domain *mem_domain = NULL;
@ -348,7 +348,7 @@ void arc_core_mpu_remove_mem_domain(struct k_mem_domain *mem_domain)
ARG_UNUSED(mem_domain); ARG_UNUSED(mem_domain);
int region_index = int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
for (; region_index >= 0; region_index--) { for (; region_index >= 0; region_index--) {
_region_init(region_index, 0, 0, 0); _region_init(region_index, 0, 0, 0);
@ -367,7 +367,7 @@ void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
ARG_UNUSED(domain); ARG_UNUSED(domain);
int region_index = int region_index =
_get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION); get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION);
LOG_DBG("disable region 0x%x", region_index + part_id); LOG_DBG("disable region 0x%x", region_index + part_id);
/* Disable region */ /* Disable region */
@ -379,7 +379,7 @@ void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
*/ */
int arc_core_mpu_get_max_domain_partition_regions(void) int arc_core_mpu_get_max_domain_partition_regions(void)
{ {
return _get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1; return get_region_index_by_type(THREAD_DOMAIN_PARTITION_REGION) + 1;
} }
/** /**
@ -395,7 +395,7 @@ int arc_core_mpu_buffer_validate(void *addr, size_t size, int write)
* matched region that grants permission or denies access. * matched region that grants permission or denies access.
* *
*/ */
for (r_index = 0; r_index < _get_num_regions(); r_index++) { for (r_index = 0; r_index < get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) || if (!_is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) { !_is_in_region(r_index, (u32_t)addr, size)) {
continue; continue;
@ -426,7 +426,7 @@ static int arc_mpu_init(struct device *arg)
u32_t num_regions; u32_t num_regions;
u32_t i; u32_t i;
num_regions = _get_num_regions(); num_regions = get_num_regions();
/* ARC MPU supports up to 16 Regions */ /* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) { if (mpu_config.num_regions > num_regions) {

View file

@ -148,7 +148,7 @@ static inline int _mpu_probe(u32_t addr)
*/ */
static inline int _dynamic_region_allocate_index(void) static inline int _dynamic_region_allocate_index(void)
{ {
if (dynamic_region_index >= _get_num_regions()) { if (dynamic_region_index >= get_num_regions()) {
LOG_ERR("no enough mpu entries %d", dynamic_region_index); LOG_ERR("no enough mpu entries %d", dynamic_region_index);
return -EINVAL; return -EINVAL;
} }
@ -314,7 +314,7 @@ static int _dynamic_region_allocate_and_init(u32_t base, u32_t size,
static void _mpu_reset_dynamic_regions(void) static void _mpu_reset_dynamic_regions(void)
{ {
u32_t i; u32_t i;
u32_t num_regions = _get_num_regions(); u32_t num_regions = get_num_regions();
for (i = static_regions_num; i < num_regions; i++) { for (i = static_regions_num; i < num_regions; i++) {
_region_init(i, 0, 0, 0); _region_init(i, 0, 0, 0);
@ -341,7 +341,7 @@ static void _mpu_reset_dynamic_regions(void)
*/ */
static inline int _mpu_configure(u8_t type, u32_t base, u32_t size) static inline int _mpu_configure(u8_t type, u32_t base, u32_t size)
{ {
u32_t region_attr = _get_region_attr_by_type(type); u32_t region_attr = get_region_attr_by_type(type);
return _dynamic_region_allocate_and_init(base, size, region_attr); return _dynamic_region_allocate_and_init(base, size, region_attr);
} }
@ -493,7 +493,7 @@ void arc_core_mpu_default(u32_t region_attr)
int arc_core_mpu_region(u32_t index, u32_t base, u32_t size, int arc_core_mpu_region(u32_t index, u32_t base, u32_t size,
u32_t region_attr) u32_t region_attr)
{ {
if (index >= _get_num_regions()) { if (index >= get_num_regions()) {
return -EINVAL; return -EINVAL;
} }
@ -576,7 +576,7 @@ void arc_core_mpu_remove_mem_partition(struct k_mem_domain *domain,
int arc_core_mpu_get_max_domain_partition_regions(void) int arc_core_mpu_get_max_domain_partition_regions(void)
{ {
/* consider the worst case: each partition requires split */ /* consider the worst case: each partition requires split */
return (_get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2; return (get_num_regions() - MPU_REGION_NUM_FOR_THREAD) / 2;
} }
/** /**
@ -619,7 +619,7 @@ static int arc_mpu_init(struct device *arg)
u32_t num_regions; u32_t num_regions;
u32_t i; u32_t i;
num_regions = _get_num_regions(); num_regions = get_num_regions();
/* ARC MPU supports up to 16 Regions */ /* ARC MPU supports up to 16 Regions */
if (mpu_config.num_regions > num_regions) { if (mpu_config.num_regions > num_regions) {

View file

@ -117,7 +117,7 @@ extern FUNC_NORETURN void z_cstart(void);
void _PrepC(void) void _PrepC(void)
{ {
_icache_setup(); z_icache_setup();
adjust_vector_table_base(); adjust_vector_table_base();
z_bss_zero(); z_bss_zero();
z_data_copy(); z_data_copy();

View file

@ -141,8 +141,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif #endif
} }
_new_thread_init(thread, pStackMem, stackAdjSize, priority, options); z_new_thread_init(thread, pStackMem, stackAdjSize, priority, options);
/* carve the thread entry struct from the "base" of /* carve the thread entry struct from the "base" of
the privileged stack */ the privileged stack */
@ -152,9 +151,9 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
/* fill init context */ /* fill init context */
pInitCtx->status32 = 0U; pInitCtx->status32 = 0U;
if (options & K_USER) { if (options & K_USER) {
pInitCtx->pc = ((u32_t)_user_thread_entry_wrapper); pInitCtx->pc = ((u32_t)z_user_thread_entry_wrapper);
} else { } else {
pInitCtx->pc = ((u32_t)_thread_entry_wrapper); pInitCtx->pc = ((u32_t)z_thread_entry_wrapper);
} }
/* /*
@ -168,7 +167,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#else /* For no USERSPACE feature */ #else /* For no USERSPACE feature */
stackEnd = pStackMem + stackSize; stackEnd = pStackMem + stackSize;
_new_thread_init(thread, pStackMem, stackSize, priority, options); z_new_thread_init(thread, pStackMem, stackSize, priority, options);
stackAdjEnd = stackEnd; stackAdjEnd = stackEnd;
@ -177,7 +176,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
sizeof(struct init_stack_frame)); sizeof(struct init_stack_frame));
pInitCtx->status32 = 0U; pInitCtx->status32 = 0U;
pInitCtx->pc = ((u32_t)_thread_entry_wrapper); pInitCtx->pc = ((u32_t)z_thread_entry_wrapper);
#endif #endif
#ifdef CONFIG_ARC_HAS_SECURE #ifdef CONFIG_ARC_HAS_SECURE
@ -270,7 +269,7 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
/* need to lock cpu here ? */ /* need to lock cpu here ? */
configure_mpu_thread(_current); configure_mpu_thread(_current);
_arc_userspace_enter(user_entry, p1, p2, p3, z_arc_userspace_enter(user_entry, p1, p2, p3,
(u32_t)_current->stack_obj, (u32_t)_current->stack_obj,
_current->stack_info.size); _current->stack_info.size);
CODE_UNREACHABLE; CODE_UNREACHABLE;

View file

@ -14,7 +14,7 @@
#include <toolchain.h> #include <toolchain.h>
#include <linker/sections.h> #include <linker/sections.h>
GTEXT(_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* /*
* @brief Wrapper for z_thread_entry * @brief Wrapper for z_thread_entry
@ -25,7 +25,7 @@ GTEXT(_thread_entry_wrapper)
* @return N/A * @return N/A
*/ */
SECTION_FUNC(TEXT, _thread_entry_wrapper) SECTION_FUNC(TEXT, z_thread_entry_wrapper)
pop_s r3 pop_s r3
pop_s r2 pop_s r2

View file

@ -44,9 +44,9 @@
mov r13, 0 mov r13, 0
.endm .endm
GTEXT(_arc_userspace_enter) GTEXT(z_arc_userspace_enter)
GTEXT(_arc_do_syscall) GTEXT(_arc_do_syscall)
GTEXT(_user_thread_entry_wrapper) GTEXT(z_user_thread_entry_wrapper)
GTEXT(z_arch_user_string_nlen) GTEXT(z_arch_user_string_nlen)
GTEXT(z_arch_user_string_nlen_fault_start) GTEXT(z_arch_user_string_nlen_fault_start)
GTEXT(z_arch_user_string_nlen_fault_end) GTEXT(z_arch_user_string_nlen_fault_end)
@ -57,7 +57,7 @@ GTEXT(z_arch_user_string_nlen_fixup)
* *
* @return N/A * @return N/A
*/ */
SECTION_FUNC(TEXT, _user_thread_entry_wrapper) SECTION_FUNC(TEXT, z_user_thread_entry_wrapper)
pop_s r3 pop_s r3
pop_s r2 pop_s r2
pop_s r1 pop_s r1
@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, _user_thread_entry_wrapper)
/* /*
* when CONFIG_INIT_STACKS is enable, stack will be initialized * when CONFIG_INIT_STACKS is enable, stack will be initialized
* in _new_thread_init. * in z_new_thread_init.
*/ */
j _arc_go_to_user_space j _arc_go_to_user_space
@ -87,7 +87,7 @@ SECTION_FUNC(TEXT, _user_thread_entry_wrapper)
* not transition back later, unless they are doing system calls. * not transition back later, unless they are doing system calls.
* *
*/ */
SECTION_FUNC(TEXT, _arc_userspace_enter) SECTION_FUNC(TEXT, z_arc_userspace_enter)
/* /*
* In ARCv2, the U bit can only be set through exception return * In ARCv2, the U bit can only be set through exception return
*/ */
@ -151,7 +151,7 @@ _arc_go_to_user_space:
lr r0, [_ARC_V2_STATUS32] lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_U_BIT bset r0, r0, _ARC_V2_STATUS32_U_BIT
mov r1, _thread_entry_wrapper mov r1, z_thread_entry_wrapper
/* fake exception return */ /* fake exception return */
kflag _ARC_V2_STATUS32_AE kflag _ARC_V2_STATUS32_AE

View file

@ -33,7 +33,7 @@ extern "C" {
static ALWAYS_INLINE void kernel_arch_init(void) static ALWAYS_INLINE void kernel_arch_init(void)
{ {
_irq_setup(); z_irq_setup();
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
@ -49,7 +49,7 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
* *
* @return IRQ number * @return IRQ number
*/ */
static ALWAYS_INLINE int _INTERRUPT_CAUSE(void) static ALWAYS_INLINE int Z_INTERRUPT_CAUSE(void)
{ {
u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE); u32_t irq_num = z_arc_v2_aux_reg_read(_ARC_V2_ICAUSE);
@ -58,10 +58,10 @@ static ALWAYS_INLINE int _INTERRUPT_CAUSE(void)
#define z_is_in_isr z_arc_v2_irq_unit_is_in_isr #define z_is_in_isr z_arc_v2_irq_unit_is_in_isr
extern void _thread_entry_wrapper(void); extern void z_thread_entry_wrapper(void);
extern void _user_thread_entry_wrapper(void); extern void z_user_thread_entry_wrapper(void);
extern void _arc_userspace_enter(k_thread_entry_t user_entry, void *p1, extern void z_arc_userspace_enter(k_thread_entry_t user_entry, void *p1,
void *p2, void *p3, u32_t stack, u32_t size); void *p2, void *p3, u32_t stack, u32_t size);
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -24,9 +24,9 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
int _sys_current_irq_key_get(void) int z_sys_current_irq_key_get(void)
{ {
return _INTERRUPT_CAUSE(); return Z_INTERRUPT_CAUSE();
} }
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -34,7 +34,7 @@ extern "C" {
* *
* Enables the i-cache and sets it to direct access mode. * Enables the i-cache and sets it to direct access mode.
*/ */
static ALWAYS_INLINE void _icache_setup(void) static ALWAYS_INLINE void z_icache_setup(void)
{ {
u32_t icache_config = ( u32_t icache_config = (
IC_CACHE_DIRECT | /* direct mapping (one-way assoc.) */ IC_CACHE_DIRECT | /* direct mapping (one-way assoc.) */

View file

@ -36,11 +36,11 @@ extern "C" {
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
/* /*
* _irq_setup * z_irq_setup
* *
* Configures interrupt handling parameters * Configures interrupt handling parameters
*/ */
static ALWAYS_INLINE void _irq_setup(void) static ALWAYS_INLINE void z_irq_setup(void)
{ {
u32_t aux_irq_ctrl_value = ( u32_t aux_irq_ctrl_value = (
_ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */ _ARC_V2_AUX_IRQ_CTRL_LOOP_REGS | /* save lp_xxx registers */

View file

@ -26,7 +26,7 @@ static u8_t static_regions_num;
/** /**
* Get the number of supported MPU regions. * Get the number of supported MPU regions.
*/ */
static inline u8_t _get_num_regions(void) static inline u8_t get_num_regions(void)
{ {
#if defined(CONFIG_CPU_CORTEX_M0PLUS) || \ #if defined(CONFIG_CPU_CORTEX_M0PLUS) || \
defined(CONFIG_CPU_CORTEX_M3) || \ defined(CONFIG_CPU_CORTEX_M3) || \
@ -57,11 +57,11 @@ static inline u8_t _get_num_regions(void)
#error "Unsupported ARM CPU" #error "Unsupported ARM CPU"
#endif #endif
static int _region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const u8_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
/* Attempt to allocate new region index. */ /* Attempt to allocate new region index. */
if (index > (_get_num_regions() - 1)) { if (index > (get_num_regions() - 1)) {
/* No available MPU region index. */ /* No available MPU region index. */
LOG_ERR("Failed to allocate new MPU region %u\n", index); LOG_ERR("Failed to allocate new MPU region %u\n", index);
@ -71,7 +71,7 @@ static int _region_allocate_and_init(const u8_t index,
LOG_DBG("Program MPU region at index 0x%x", index); LOG_DBG("Program MPU region at index 0x%x", index);
/* Program region */ /* Program region */
_region_init(index, region_conf); region_init(index, region_conf);
return index; return index;
} }
@ -79,7 +79,7 @@ static int _region_allocate_and_init(const u8_t index,
/* This internal function programs an MPU region /* This internal function programs an MPU region
* of a given configuration at a given MPU index. * of a given configuration at a given MPU index.
*/ */
static int _mpu_configure_region(const u8_t index, static int mpu_configure_region(const u8_t index,
const struct k_mem_partition *new_region) const struct k_mem_partition *new_region)
{ {
struct arm_mpu_region region_conf; struct arm_mpu_region region_conf;
@ -88,11 +88,11 @@ static int _mpu_configure_region(const u8_t index,
/* Populate internal ARM MPU region configuration structure. */ /* Populate internal ARM MPU region configuration structure. */
region_conf.base = new_region->start; region_conf.base = new_region->start;
_get_region_attr_from_k_mem_partition_info(&region_conf.attr, get_region_attr_from_k_mem_partition_info(&region_conf.attr,
&new_region->attr, new_region->start, new_region->size); &new_region->attr, new_region->start, new_region->size);
/* Allocate and program region */ /* Allocate and program region */
return _region_allocate_and_init(index, return region_allocate_and_init(index,
(const struct arm_mpu_region *)&region_conf); (const struct arm_mpu_region *)&region_conf);
} }
@ -135,20 +135,20 @@ void arm_core_mpu_mem_partition_config_update(
{ {
/* Find the partition. ASSERT if not found. */ /* Find the partition. ASSERT if not found. */
u8_t i; u8_t i;
u8_t reg_index = _get_num_regions(); u8_t reg_index = get_num_regions();
for (i = _get_dyn_region_min_index(); i < _get_num_regions(); i++) { for (i = get_dyn_region_min_index(); i < get_num_regions(); i++) {
if (!_is_enabled_region(i)) { if (!is_enabled_region(i)) {
continue; continue;
} }
u32_t base = _mpu_region_get_base(i); u32_t base = mpu_region_get_base(i);
if (base != partition->start) { if (base != partition->start) {
continue; continue;
} }
u32_t size = _mpu_region_get_size(i); u32_t size = mpu_region_get_size(i);
if (size != partition->size) { if (size != partition->size) {
continue; continue;
@ -158,12 +158,12 @@ void arm_core_mpu_mem_partition_config_update(
reg_index = i; reg_index = i;
break; break;
} }
__ASSERT(reg_index != _get_num_regions(), __ASSERT(reg_index != get_num_regions(),
"Memory domain partition not found\n"); "Memory domain partition not found\n");
/* Modify the permissions */ /* Modify the permissions */
partition->attr = *new_attr; partition->attr = *new_attr;
_mpu_configure_region(reg_index, partition); mpu_configure_region(reg_index, partition);
} }
/** /**
@ -172,7 +172,7 @@ void arm_core_mpu_mem_partition_config_update(
*/ */
int arm_core_mpu_get_max_available_dyn_regions(void) int arm_core_mpu_get_max_available_dyn_regions(void)
{ {
return _get_num_regions() - static_regions_num; return get_num_regions() - static_regions_num;
} }
/** /**
@ -182,7 +182,7 @@ int arm_core_mpu_get_max_available_dyn_regions(void)
*/ */
int arm_core_mpu_buffer_validate(void *addr, size_t size, int write) int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
{ {
return _mpu_buffer_validate(addr, size, write); return mpu_buffer_validate(addr, size, write);
} }
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
@ -194,8 +194,8 @@ void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const u8_t regions_num,
const u32_t background_area_start, const u32_t background_area_end) const u32_t background_area_start, const u32_t background_area_end)
{ {
if (_mpu_configure_static_mpu_regions(static_regions, regions_num, if (mpu_configure_static_mpu_regions(static_regions, regions_num,
background_area_start, background_area_end) == -EINVAL) { background_area_start, background_area_end) == -EINVAL) {
__ASSERT(0, "Configuring %u static MPU regions failed\n", __ASSERT(0, "Configuring %u static MPU regions failed\n",
regions_num); regions_num);
@ -210,8 +210,8 @@ void arm_core_mpu_mark_areas_for_dynamic_regions(
const struct k_mem_partition dyn_region_areas[], const struct k_mem_partition dyn_region_areas[],
const u8_t dyn_region_areas_num) const u8_t dyn_region_areas_num)
{ {
if (_mpu_mark_areas_for_dynamic_regions(dyn_region_areas, if (mpu_mark_areas_for_dynamic_regions(dyn_region_areas,
dyn_region_areas_num) == -EINVAL) { dyn_region_areas_num) == -EINVAL) {
__ASSERT(0, "Marking %u areas for dynamic regions failed\n", __ASSERT(0, "Marking %u areas for dynamic regions failed\n",
dyn_region_areas_num); dyn_region_areas_num);
@ -225,7 +225,7 @@ void arm_core_mpu_mark_areas_for_dynamic_regions(
void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], u8_t regions_num)
{ {
if (_mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
== -EINVAL) { == -EINVAL) {
__ASSERT(0, "Configuring %u dynamic MPU regions failed\n", __ASSERT(0, "Configuring %u dynamic MPU regions failed\n",
@ -245,7 +245,7 @@ static int arm_mpu_init(struct device *arg)
{ {
u32_t r_index; u32_t r_index;
if (mpu_config.num_regions > _get_num_regions()) { if (mpu_config.num_regions > get_num_regions()) {
/* Attempt to configure more MPU regions than /* Attempt to configure more MPU regions than
* what is supported by hardware. As this operation * what is supported by hardware. As this operation
* is executed during system (pre-kernel) initialization, * is executed during system (pre-kernel) initialization,
@ -255,21 +255,21 @@ static int arm_mpu_init(struct device *arg)
__ASSERT(0, __ASSERT(0,
"Request to configure: %u regions (supported: %u)\n", "Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, mpu_config.num_regions,
_get_num_regions() get_num_regions()
); );
return -1; return -1;
} }
LOG_DBG("total region count: %d", _get_num_regions()); LOG_DBG("total region count: %d", get_num_regions());
arm_core_mpu_disable(); arm_core_mpu_disable();
/* Architecture-specific configuration */ /* Architecture-specific configuration */
_mpu_init(); mpu_init();
/* Program fixed regions configured at SOC definition. */ /* Program fixed regions configured at SOC definition. */
for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) {
_region_init(r_index, &mpu_config.mpu_regions[r_index]); region_init(r_index, &mpu_config.mpu_regions[r_index]);
} }
/* Update the number of programmed MPU regions. */ /* Update the number of programmed MPU regions. */

View file

@ -13,7 +13,7 @@
#include <logging/log.h> #include <logging/log.h>
/* Global MPU configuration at system initialization. */ /* Global MPU configuration at system initialization. */
static void _mpu_init(void) static void mpu_init(void)
{ {
/* No specific configuration at init for ARMv7-M MPU. */ /* No specific configuration at init for ARMv7-M MPU. */
} }
@ -23,7 +23,7 @@ static void _mpu_init(void)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void _region_init(const u32_t index, static void region_init(const u32_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
/* Select the region you want to access */ /* Select the region you want to access */
@ -44,7 +44,7 @@ static void _region_init(const u32_t index,
* @param part Pointer to the data structure holding the partition * @param part Pointer to the data structure holding the partition
* information (must be valid). * information (must be valid).
*/ */
static int _mpu_partition_is_valid(const struct k_mem_partition *part) static int mpu_partition_is_valid(const struct k_mem_partition *part)
{ {
/* Partition size must be power-of-two, /* Partition size must be power-of-two,
* and greater or equal to the minimum * and greater or equal to the minimum
@ -69,7 +69,7 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
* power-of-two value, and the returned SIZE field value corresponds * power-of-two value, and the returned SIZE field value corresponds
* to that power-of-two value. * to that power-of-two value.
*/ */
static inline u32_t _size_to_mpu_rasr_size(u32_t size) static inline u32_t size_to_mpu_rasr_size(u32_t size)
{ {
/* The minimal supported region size is 32 bytes */ /* The minimal supported region size is 32 bytes */
if (size <= 32U) { if (size <= 32U) {
@ -94,7 +94,7 @@ static inline u32_t _size_to_mpu_rasr_size(u32_t size)
* region attribute configuration and size and fill-in a driver-specific * region attribute configuration and size and fill-in a driver-specific
* structure with the correct MPU region configuration. * structure with the correct MPU region configuration.
*/ */
static inline void _get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
arm_mpu_region_attr_t *p_attr, arm_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, u32_t base, u32_t size)
{ {
@ -103,7 +103,7 @@ static inline void _get_region_attr_from_k_mem_partition_info(
*/ */
(void) base; (void) base;
p_attr->rasr = attr->rasr_attr | _size_to_mpu_rasr_size(size); p_attr->rasr = attr->rasr_attr | size_to_mpu_rasr_size(size);
} }
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
@ -115,7 +115,7 @@ static inline void _get_region_attr_from_k_mem_partition_info(
* Trivial for ARMv7-M MPU, where dynamic memory areas are programmed * Trivial for ARMv7-M MPU, where dynamic memory areas are programmed
* in MPU regions indices right after the static regions. * in MPU regions indices right after the static regions.
*/ */
static inline int _get_dyn_region_min_index(void) static inline int get_dyn_region_min_index(void)
{ {
return static_regions_num; return static_regions_num;
} }
@ -124,23 +124,23 @@ static inline int _get_dyn_region_min_index(void)
* This internal function converts the SIZE field value of MPU_RASR * This internal function converts the SIZE field value of MPU_RASR
* to the region size (in bytes). * to the region size (in bytes).
*/ */
static inline u32_t _mpu_rasr_size_to_size(u32_t rasr_size) static inline u32_t mpu_rasr_size_to_size(u32_t rasr_size)
{ {
return 1 << (rasr_size + 1); return 1 << (rasr_size + 1);
} }
static inline u32_t _mpu_region_get_base(u32_t index) static inline u32_t mpu_region_get_base(u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_ADDR_Msk; return MPU->RBAR & MPU_RBAR_ADDR_Msk;
} }
static inline u32_t _mpu_region_get_size(u32_t index) static inline u32_t mpu_region_get_size(u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
u32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos; u32_t rasr_size = (MPU->RASR & MPU_RASR_SIZE_Msk) >> MPU_RASR_SIZE_Pos;
return _mpu_rasr_size_to_size(rasr_size); return mpu_rasr_size_to_size(rasr_size);
} }
/** /**
@ -149,7 +149,7 @@ static inline u32_t _mpu_region_get_size(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_enabled_region(u32_t index) static inline int is_enabled_region(u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return (MPU->RASR & MPU_RASR_ENABLE_Msk) ? 1 : 0; return (MPU->RASR & MPU_RASR_ENABLE_Msk) ? 1 : 0;
@ -167,7 +167,7 @@ static inline int _is_enabled_region(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline u32_t _get_region_ap(u32_t r_index) static inline u32_t get_region_ap(u32_t r_index)
{ {
MPU->RNR = r_index; MPU->RNR = r_index;
return (MPU->RASR & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos; return (MPU->RASR & MPU_RASR_AP_Msk) >> MPU_RASR_AP_Pos;
@ -179,7 +179,7 @@ static inline u32_t _get_region_ap(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size) static inline int is_in_region(u32_t r_index, u32_t start, u32_t size)
{ {
u32_t r_addr_start; u32_t r_addr_start;
u32_t r_size_lshift; u32_t r_size_lshift;
@ -204,9 +204,9 @@ static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_user_accessible_region(u32_t r_index, int write) static inline int is_user_accessible_region(u32_t r_index, int write)
{ {
u32_t r_ap = _get_region_ap(r_index); u32_t r_ap = get_region_ap(r_index);
if (write) { if (write) {
@ -220,14 +220,14 @@ static inline int _is_user_accessible_region(u32_t r_index, int write)
* This internal function validates whether a given memory buffer * This internal function validates whether a given memory buffer
* is user accessible or not. * is user accessible or not.
*/ */
static inline int _mpu_buffer_validate(void *addr, size_t size, int write) static inline int mpu_buffer_validate(void *addr, size_t size, int write)
{ {
s32_t r_index; s32_t r_index;
/* Iterate all mpu regions in reversed order */ /* Iterate all mpu regions in reversed order */
for (r_index = _get_num_regions() - 1; r_index >= 0; r_index--) { for (r_index = get_num_regions() - 1; r_index >= 0; r_index--) {
if (!_is_enabled_region(r_index) || if (!is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) { !is_in_region(r_index, (u32_t)addr, size)) {
continue; continue;
} }
@ -236,7 +236,7 @@ static inline int _mpu_buffer_validate(void *addr, size_t size, int write)
* we can stop the iteration immediately once we find the * we can stop the iteration immediately once we find the
* matched region that grants permission or denies access. * matched region that grants permission or denies access.
*/ */
if (_is_user_accessible_region(r_index, write)) { if (is_user_accessible_region(r_index, write)) {
return 0; return 0;
} else { } else {
return -EPERM; return -EPERM;
@ -249,14 +249,14 @@ static inline int _mpu_buffer_validate(void *addr, size_t size, int write)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
static int _mpu_configure_region(const u8_t index, static int mpu_configure_region(const u8_t index,
const struct k_mem_partition *new_region); const struct k_mem_partition *new_region);
/* This internal function programs a set of given MPU regions /* This internal function programs a set of given MPU regions
* over a background memory area, optionally performing a * over a background memory area, optionally performing a
* sanity check of the memory regions to be programmed. * sanity check of the memory regions to be programmed.
*/ */
static int _mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], u8_t regions_num, u8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
@ -270,12 +270,12 @@ static int _mpu_configure_regions(const struct k_mem_partition
/* Non-empty region. */ /* Non-empty region. */
if (do_sanity_check && if (do_sanity_check &&
(!_mpu_partition_is_valid(regions[i]))) { (!mpu_partition_is_valid(regions[i]))) {
LOG_ERR("Partition %u: sanity check failed.", i); LOG_ERR("Partition %u: sanity check failed.", i);
return -EINVAL; return -EINVAL;
} }
reg_index = _mpu_configure_region(reg_index, regions[i]); reg_index = mpu_configure_region(reg_index, regions[i]);
if (reg_index == -EINVAL) { if (reg_index == -EINVAL) {
return reg_index; return reg_index;
@ -296,7 +296,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
* If the static MPU regions configuration has not been successfully * If the static MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const u8_t regions_num,
const u32_t background_area_base, const u32_t background_area_base,
const u32_t background_area_end) const u32_t background_area_end)
@ -309,7 +309,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_base);
ARG_UNUSED(background_area_end); ARG_UNUSED(background_area_end);
mpu_reg_index = _mpu_configure_regions(static_regions, mpu_reg_index = mpu_configure_regions(static_regions,
regions_num, mpu_reg_index, true); regions_num, mpu_reg_index, true);
static_regions_num = mpu_reg_index; static_regions_num = mpu_reg_index;
@ -325,7 +325,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
* If the dynamic MPU regions configuration has not been successfully * If the dynamic MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], u8_t regions_num)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;
@ -334,13 +334,13 @@ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
* programmed on top of existing SRAM region configuration. * programmed on top of existing SRAM region configuration.
*/ */
mpu_reg_index = _mpu_configure_regions(dynamic_regions, mpu_reg_index = mpu_configure_regions(dynamic_regions,
regions_num, mpu_reg_index, false); regions_num, mpu_reg_index, false);
if (mpu_reg_index != -EINVAL) { if (mpu_reg_index != -EINVAL) {
/* Disable the non-programmed MPU regions. */ /* Disable the non-programmed MPU regions. */
for (int i = mpu_reg_index; i < _get_num_regions(); i++) { for (int i = mpu_reg_index; i < get_num_regions(); i++) {
ARM_MPU_ClrRegion(i); ARM_MPU_ClrRegion(i);
} }
} }

View file

@ -31,7 +31,7 @@ static struct dynamic_region_info dyn_reg_info[MPU_DYNAMIC_REGION_AREAS_NUM];
/* Global MPU configuration at system initialization. */ /* Global MPU configuration at system initialization. */
static void _mpu_init(void) static void mpu_init(void)
{ {
/* Configure the cache-ability attributes for all the /* Configure the cache-ability attributes for all the
* different types of memory regions. * different types of memory regions.
@ -57,7 +57,7 @@ static void _mpu_init(void)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void _region_init(const u32_t index, static void region_init(const u32_t index,
const struct arm_mpu_region *region_conf) const struct arm_mpu_region *region_conf)
{ {
ARM_MPU_SetRegion( ARM_MPU_SetRegion(
@ -87,7 +87,7 @@ static void _region_init(const u32_t index,
* @param part Pointer to the data structure holding the partition * @param part Pointer to the data structure holding the partition
* information (must be valid). * information (must be valid).
* */ * */
static int _mpu_partition_is_valid(const struct k_mem_partition *part) static int mpu_partition_is_valid(const struct k_mem_partition *part)
{ {
/* Partition size must be a multiple of the minimum MPU region /* Partition size must be a multiple of the minimum MPU region
* size. Start address of the partition must align with the * size. Start address of the partition must align with the
@ -116,7 +116,7 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
* needs to be enabled. * needs to be enabled.
* *
*/ */
static inline int _get_region_index(u32_t start, u32_t size) static inline int get_region_index(u32_t start, u32_t size)
{ {
u32_t region_start_addr = arm_cmse_mpu_region_get(start); u32_t region_start_addr = arm_cmse_mpu_region_get(start);
u32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1); u32_t region_end_addr = arm_cmse_mpu_region_get(start + size - 1);
@ -130,33 +130,33 @@ static inline int _get_region_index(u32_t start, u32_t size)
return -EINVAL; return -EINVAL;
} }
static inline u32_t _mpu_region_get_base(const u32_t index) static inline u32_t mpu_region_get_base(const u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return MPU->RBAR & MPU_RBAR_BASE_Msk; return MPU->RBAR & MPU_RBAR_BASE_Msk;
} }
static inline void _mpu_region_set_base(const u32_t index, const u32_t base) static inline void mpu_region_set_base(const u32_t index, const u32_t base)
{ {
MPU->RNR = index; MPU->RNR = index;
MPU->RBAR = (MPU->RBAR & (~MPU_RBAR_BASE_Msk)) MPU->RBAR = (MPU->RBAR & (~MPU_RBAR_BASE_Msk))
| (base & MPU_RBAR_BASE_Msk); | (base & MPU_RBAR_BASE_Msk);
} }
static inline u32_t _mpu_region_get_last_addr(const u32_t index) static inline u32_t mpu_region_get_last_addr(const u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
return (MPU->RLAR & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk); return (MPU->RLAR & MPU_RLAR_LIMIT_Msk) | (~MPU_RLAR_LIMIT_Msk);
} }
static inline void _mpu_region_set_limit(const u32_t index, const u32_t limit) static inline void mpu_region_set_limit(const u32_t index, const u32_t limit)
{ {
MPU->RNR = index; MPU->RNR = index;
MPU->RLAR = (MPU->RLAR & (~MPU_RLAR_LIMIT_Msk)) MPU->RLAR = (MPU->RLAR & (~MPU_RLAR_LIMIT_Msk))
| (limit & MPU_RLAR_LIMIT_Msk); | (limit & MPU_RLAR_LIMIT_Msk);
} }
static inline void _mpu_region_get_access_attr(const u32_t index, static inline void mpu_region_get_access_attr(const u32_t index,
arm_mpu_region_attr_t *attr) arm_mpu_region_attr_t *attr)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -167,7 +167,7 @@ static inline void _mpu_region_get_access_attr(const u32_t index,
MPU_RLAR_AttrIndx_Pos; MPU_RLAR_AttrIndx_Pos;
} }
static inline void _mpu_region_get_conf(const u32_t index, static inline void mpu_region_get_conf(const u32_t index,
struct arm_mpu_region *region_conf) struct arm_mpu_region *region_conf)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -177,7 +177,7 @@ static inline void _mpu_region_get_conf(const u32_t index,
* - Share-ability * - Share-ability
* - Access Permissions * - Access Permissions
*/ */
_mpu_region_get_access_attr(index, &region_conf->attr); mpu_region_get_access_attr(index, &region_conf->attr);
/* Region base address */ /* Region base address */
region_conf->base = (MPU->RBAR & MPU_RBAR_BASE_Msk); region_conf->base = (MPU->RBAR & MPU_RBAR_BASE_Msk);
@ -191,7 +191,7 @@ static inline void _mpu_region_get_conf(const u32_t index,
* region attribute configuration and size and fill-in a driver-specific * region attribute configuration and size and fill-in a driver-specific
* structure with the correct MPU region configuration. * structure with the correct MPU region configuration.
*/ */
static inline void _get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
arm_mpu_region_attr_t *p_attr, arm_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, u32_t base, u32_t size)
{ {
@ -213,7 +213,7 @@ static inline void _get_region_attr_from_k_mem_partition_info(
* The function is optimized for the (most common) use-case of a single * The function is optimized for the (most common) use-case of a single
* marked area for dynamic memory regions. * marked area for dynamic memory regions.
*/ */
static inline int _get_dyn_region_min_index(void) static inline int get_dyn_region_min_index(void)
{ {
int dyn_reg_min_index = dyn_reg_info[0].index; int dyn_reg_min_index = dyn_reg_info[0].index;
#if MPU_DYNAMIC_REGION_AREAS_NUM > 1 #if MPU_DYNAMIC_REGION_AREAS_NUM > 1
@ -228,10 +228,10 @@ static inline int _get_dyn_region_min_index(void)
return dyn_reg_min_index; return dyn_reg_min_index;
} }
static inline u32_t _mpu_region_get_size(u32_t index) static inline u32_t mpu_region_get_size(u32_t index)
{ {
return _mpu_region_get_last_addr(index) + 1 return mpu_region_get_last_addr(index) + 1
- _mpu_region_get_base(index); - mpu_region_get_base(index);
} }
/** /**
@ -240,7 +240,7 @@ static inline u32_t _mpu_region_get_size(u32_t index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_enabled_region(u32_t index) static inline int is_enabled_region(u32_t index)
{ {
MPU->RNR = index; MPU->RNR = index;
@ -270,7 +270,7 @@ static inline int _is_enabled_region(u32_t index)
* in case the fast address range check fails. * in case the fast address range check fails.
* *
*/ */
static inline int _mpu_buffer_validate(void *addr, size_t size, int write) static inline int mpu_buffer_validate(void *addr, size_t size, int write)
{ {
u32_t _addr = (u32_t)addr; u32_t _addr = (u32_t)addr;
u32_t _size = (u32_t)size; u32_t _size = (u32_t)size;
@ -312,17 +312,17 @@ static inline int _mpu_buffer_validate(void *addr, size_t size, int write)
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
static int _region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const u8_t index,
const struct arm_mpu_region *region_conf); const struct arm_mpu_region *region_conf);
static int _mpu_configure_region(const u8_t index, static int mpu_configure_region(const u8_t index,
const struct k_mem_partition *new_region); const struct k_mem_partition *new_region);
/* This internal function programs a set of given MPU regions /* This internal function programs a set of given MPU regions
* over a background memory area, optionally performing a * over a background memory area, optionally performing a
* sanity check of the memory regions to be programmed. * sanity check of the memory regions to be programmed.
*/ */
static int _mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], u8_t regions_num, u8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
@ -336,7 +336,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
/* Non-empty region. */ /* Non-empty region. */
if (do_sanity_check && if (do_sanity_check &&
(!_mpu_partition_is_valid(regions[i]))) { (!mpu_partition_is_valid(regions[i]))) {
LOG_ERR("Partition %u: sanity check failed.", i); LOG_ERR("Partition %u: sanity check failed.", i);
return -EINVAL; return -EINVAL;
} }
@ -345,7 +345,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
* inside which the new region will be configured. * inside which the new region will be configured.
*/ */
int u_reg_index = int u_reg_index =
_get_region_index(regions[i]->start, regions[i]->size); get_region_index(regions[i]->start, regions[i]->size);
if ((u_reg_index == -EINVAL) || if ((u_reg_index == -EINVAL) ||
(u_reg_index > (reg_index - 1))) { (u_reg_index > (reg_index - 1))) {
@ -358,8 +358,8 @@ static int _mpu_configure_regions(const struct k_mem_partition
* The new memory region is to be placed inside the underlying * The new memory region is to be placed inside the underlying
* region, possibly splitting the underlying region into two. * region, possibly splitting the underlying region into two.
*/ */
u32_t u_reg_base = _mpu_region_get_base(u_reg_index); u32_t u_reg_base = mpu_region_get_base(u_reg_index);
u32_t u_reg_last = _mpu_region_get_last_addr(u_reg_index); u32_t u_reg_last = mpu_region_get_last_addr(u_reg_index);
u32_t reg_last = regions[i]->start + regions[i]->size - 1; u32_t reg_last = regions[i]->start + regions[i]->size - 1;
if ((regions[i]->start == u_reg_base) && if ((regions[i]->start == u_reg_base) &&
@ -370,17 +370,17 @@ static int _mpu_configure_regions(const struct k_mem_partition
* underlying region with those of the new * underlying region with those of the new
* region. * region.
*/ */
_mpu_configure_region(u_reg_index, regions[i]); mpu_configure_region(u_reg_index, regions[i]);
} else if (regions[i]->start == u_reg_base) { } else if (regions[i]->start == u_reg_base) {
/* The new region starts exactly at the start of the /* The new region starts exactly at the start of the
* underlying region; the start of the underlying * underlying region; the start of the underlying
* region needs to be set to the end of the new region. * region needs to be set to the end of the new region.
*/ */
_mpu_region_set_base(u_reg_index, mpu_region_set_base(u_reg_index,
regions[i]->start + regions[i]->size); regions[i]->start + regions[i]->size);
reg_index = reg_index =
_mpu_configure_region(reg_index, regions[i]); mpu_configure_region(reg_index, regions[i]);
if (reg_index == -EINVAL) { if (reg_index == -EINVAL) {
return reg_index; return reg_index;
@ -393,11 +393,11 @@ static int _mpu_configure_regions(const struct k_mem_partition
* region needs to be set to the start of the * region needs to be set to the start of the
* new region. * new region.
*/ */
_mpu_region_set_limit(u_reg_index, mpu_region_set_limit(u_reg_index,
regions[i]->start - 1); regions[i]->start - 1);
reg_index = reg_index =
_mpu_configure_region(reg_index, regions[i]); mpu_configure_region(reg_index, regions[i]);
if (reg_index == -EINVAL) { if (reg_index == -EINVAL) {
return reg_index; return reg_index;
@ -409,11 +409,11 @@ static int _mpu_configure_regions(const struct k_mem_partition
* underlying region, which needs to split * underlying region, which needs to split
* into two regions. * into two regions.
*/ */
_mpu_region_set_limit(u_reg_index, mpu_region_set_limit(u_reg_index,
regions[i]->start - 1); regions[i]->start - 1);
reg_index = reg_index =
_mpu_configure_region(reg_index, regions[i]); mpu_configure_region(reg_index, regions[i]);
if (reg_index == -EINVAL) { if (reg_index == -EINVAL) {
return reg_index; return reg_index;
@ -426,7 +426,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
*/ */
struct arm_mpu_region fill_region; struct arm_mpu_region fill_region;
_mpu_region_get_access_attr(u_reg_index, mpu_region_get_access_attr(u_reg_index,
&fill_region.attr); &fill_region.attr);
fill_region.base = regions[i]->start + fill_region.base = regions[i]->start +
regions[i]->size; regions[i]->size;
@ -435,7 +435,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
regions[i]->size), (u_reg_last - reg_last)); regions[i]->size), (u_reg_last - reg_last));
reg_index = reg_index =
_region_allocate_and_init(reg_index, region_allocate_and_init(reg_index,
(const struct arm_mpu_region *) (const struct arm_mpu_region *)
&fill_region); &fill_region);
@ -458,7 +458,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
* If the static MPU regions configuration has not been successfully * If the static MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const u8_t regions_num,
const u32_t background_area_base, const u32_t background_area_base,
const u32_t background_area_end) const u32_t background_area_end)
@ -472,7 +472,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_base);
ARG_UNUSED(background_area_end); ARG_UNUSED(background_area_end);
mpu_reg_index = _mpu_configure_regions(static_regions, mpu_reg_index = mpu_configure_regions(static_regions,
regions_num, mpu_reg_index, true); regions_num, mpu_reg_index, true);
static_regions_num = mpu_reg_index; static_regions_num = mpu_reg_index;
@ -484,7 +484,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
* where dynamic region programming is allowed. Return zero on success, or * where dynamic region programming is allowed. Return zero on success, or
* -EINVAL on error. * -EINVAL on error.
*/ */
static int _mpu_mark_areas_for_dynamic_regions( static int mpu_mark_areas_for_dynamic_regions(
const struct k_mem_partition dyn_region_areas[], const struct k_mem_partition dyn_region_areas[],
const u8_t dyn_region_areas_num) const u8_t dyn_region_areas_num)
{ {
@ -500,7 +500,7 @@ static int _mpu_mark_areas_for_dynamic_regions(
/* Retrieve HW MPU region index */ /* Retrieve HW MPU region index */
dyn_reg_info[i].index = dyn_reg_info[i].index =
_get_region_index(dyn_region_areas[i].start, get_region_index(dyn_region_areas[i].start,
dyn_region_areas[i].size); dyn_region_areas[i].size);
if (dyn_reg_info[i].index == -EINVAL) { if (dyn_reg_info[i].index == -EINVAL) {
@ -514,7 +514,7 @@ static int _mpu_mark_areas_for_dynamic_regions(
} }
/* Store default configuration */ /* Store default configuration */
_mpu_region_get_conf(dyn_reg_info[i].index, mpu_region_get_conf(dyn_reg_info[i].index,
&dyn_reg_info[i].region_conf); &dyn_reg_info[i].region_conf);
} }
@ -529,13 +529,13 @@ static int _mpu_mark_areas_for_dynamic_regions(
* If the dynamic MPU regions configuration has not been successfully * If the dynamic MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], u8_t regions_num)
{ {
int mpu_reg_index = static_regions_num; int mpu_reg_index = static_regions_num;
/* Disable all MPU regions except for the static ones. */ /* Disable all MPU regions except for the static ones. */
for (int i = mpu_reg_index; i < _get_num_regions(); i++) { for (int i = mpu_reg_index; i < get_num_regions(); i++) {
ARM_MPU_ClrRegion(i); ARM_MPU_ClrRegion(i);
} }
@ -543,7 +543,7 @@ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
* be programmed. * be programmed.
*/ */
for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) { for (int i = 0; i < MPU_DYNAMIC_REGION_AREAS_NUM; i++) {
_region_init(dyn_reg_info[i].index, region_init(dyn_reg_info[i].index,
&dyn_reg_info[i].region_conf); &dyn_reg_info[i].region_conf);
} }
@ -552,7 +552,7 @@ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
* given boundaries. * given boundaries.
*/ */
mpu_reg_index = _mpu_configure_regions(dynamic_regions, mpu_reg_index = mpu_configure_regions(dynamic_regions,
regions_num, mpu_reg_index, true); regions_num, mpu_reg_index, true);
return mpu_reg_index; return mpu_reg_index;

View file

@ -25,7 +25,7 @@ LOG_MODULE_DECLARE(mpu);
static u8_t static_regions_num; static u8_t static_regions_num;
/* Global MPU configuration at system initialization. */ /* Global MPU configuration at system initialization. */
static void _mpu_init(void) static void mpu_init(void)
{ {
/* Enable clock for the Memory Protection Unit (MPU). */ /* Enable clock for the Memory Protection Unit (MPU). */
CLOCK_EnableClock(kCLOCK_Sysmpu0); CLOCK_EnableClock(kCLOCK_Sysmpu0);
@ -34,7 +34,7 @@ static void _mpu_init(void)
/** /**
* Get the number of supported MPU regions. * Get the number of supported MPU regions.
*/ */
static inline u8_t _get_num_regions(void) static inline u8_t get_num_regions(void)
{ {
return FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT; return FSL_FEATURE_SYSMPU_DESCRIPTOR_COUNT;
} }
@ -47,7 +47,7 @@ static inline u8_t _get_num_regions(void)
* @param part Pointer to the data structure holding the partition * @param part Pointer to the data structure holding the partition
* information (must be valid). * information (must be valid).
*/ */
static int _mpu_partition_is_valid(const struct k_mem_partition *part) static int mpu_partition_is_valid(const struct k_mem_partition *part)
{ {
/* Partition size must be a multiple of the minimum MPU region /* Partition size must be a multiple of the minimum MPU region
* size. Start address of the partition must align with the * size. Start address of the partition must align with the
@ -71,7 +71,7 @@ static int _mpu_partition_is_valid(const struct k_mem_partition *part)
* Note: * Note:
* The caller must provide a valid region index. * The caller must provide a valid region index.
*/ */
static void _region_init(const u32_t index, static void region_init(const u32_t index,
const struct nxp_mpu_region *region_conf) const struct nxp_mpu_region *region_conf)
{ {
u32_t region_base = region_conf->base; u32_t region_base = region_conf->base;
@ -114,11 +114,11 @@ static void _region_init(const u32_t index,
} }
static int _region_allocate_and_init(const u8_t index, static int region_allocate_and_init(const u8_t index,
const struct nxp_mpu_region *region_conf) const struct nxp_mpu_region *region_conf)
{ {
/* Attempt to allocate new region index. */ /* Attempt to allocate new region index. */
if (index > (_get_num_regions() - 1)) { if (index > (get_num_regions() - 1)) {
/* No available MPU region index. */ /* No available MPU region index. */
LOG_ERR("Failed to allocate new MPU region %u\n", index); LOG_ERR("Failed to allocate new MPU region %u\n", index);
@ -128,7 +128,7 @@ static int _region_allocate_and_init(const u8_t index,
LOG_DBG("Program MPU region at index 0x%x", index); LOG_DBG("Program MPU region at index 0x%x", index);
/* Program region */ /* Program region */
_region_init(index, region_conf); region_init(index, region_conf);
return index; return index;
} }
@ -138,7 +138,7 @@ static int _region_allocate_and_init(const u8_t index,
* region attribute configuration and size and fill-in a driver-specific * region attribute configuration and size and fill-in a driver-specific
* structure with the correct MPU region attribute configuration. * structure with the correct MPU region attribute configuration.
*/ */
static inline void _get_region_attr_from_k_mem_partition_info( static inline void get_region_attr_from_k_mem_partition_info(
nxp_mpu_region_attr_t *p_attr, nxp_mpu_region_attr_t *p_attr,
const k_mem_partition_attr_t *attr, u32_t base, u32_t size) const k_mem_partition_attr_t *attr, u32_t base, u32_t size)
{ {
@ -154,7 +154,7 @@ static inline void _get_region_attr_from_k_mem_partition_info(
/* This internal function programs an MPU region /* This internal function programs an MPU region
* of a given configuration at a given MPU index. * of a given configuration at a given MPU index.
*/ */
static int _mpu_configure_region(const u8_t index, static int mpu_configure_region(const u8_t index,
const struct k_mem_partition *new_region) const struct k_mem_partition *new_region)
{ {
struct nxp_mpu_region region_conf; struct nxp_mpu_region region_conf;
@ -164,17 +164,17 @@ static int _mpu_configure_region(const u8_t index,
/* Populate internal NXP MPU region configuration structure. */ /* Populate internal NXP MPU region configuration structure. */
region_conf.base = new_region->start; region_conf.base = new_region->start;
region_conf.end = (new_region->start + new_region->size - 1); region_conf.end = (new_region->start + new_region->size - 1);
_get_region_attr_from_k_mem_partition_info(&region_conf.attr, get_region_attr_from_k_mem_partition_info(&region_conf.attr,
&new_region->attr, new_region->start, new_region->size); &new_region->attr, new_region->start, new_region->size);
/* Allocate and program region */ /* Allocate and program region */
return _region_allocate_and_init(index, return region_allocate_and_init(index,
(const struct nxp_mpu_region *)&region_conf); (const struct nxp_mpu_region *)&region_conf);
} }
#if defined(CONFIG_MPU_STACK_GUARD) #if defined(CONFIG_MPU_STACK_GUARD)
/* This internal function partitions the SRAM MPU region */ /* This internal function partitions the SRAM MPU region */
static int _mpu_sram_partitioning(u8_t index, static int mpu_sram_partitioning(u8_t index,
const struct k_mem_partition *p_region) const struct k_mem_partition *p_region)
{ {
/* /*
@ -203,8 +203,8 @@ static int _mpu_sram_partitioning(u8_t index,
added_sram_region.attr.attr = added_sram_region.attr.attr =
mpu_config.mpu_regions[mpu_config.sram_region].attr.attr; mpu_config.mpu_regions[mpu_config.sram_region].attr.attr;
if (_region_allocate_and_init(index, if (region_allocate_and_init(index,
(const struct nxp_mpu_region *)&added_sram_region) < 0) { (const struct nxp_mpu_region *)&added_sram_region) < 0) {
return -EINVAL; return -EINVAL;
} }
@ -222,7 +222,7 @@ static int _mpu_sram_partitioning(u8_t index,
adjusted_sram_region.attr.attr = adjusted_sram_region.attr.attr =
mpu_config.mpu_regions[mpu_config.sram_region].attr.attr; mpu_config.mpu_regions[mpu_config.sram_region].attr.attr;
_region_init(mpu_config.sram_region, region_init(mpu_config.sram_region,
(const struct nxp_mpu_region *)&adjusted_sram_region); (const struct nxp_mpu_region *)&adjusted_sram_region);
return index; return index;
@ -233,7 +233,7 @@ static int _mpu_sram_partitioning(u8_t index,
* over a background memory area, optionally performing a * over a background memory area, optionally performing a
* sanity check of the memory regions to be programmed. * sanity check of the memory regions to be programmed.
*/ */
static int _mpu_configure_regions(const struct k_mem_partition static int mpu_configure_regions(const struct k_mem_partition
*regions[], u8_t regions_num, u8_t start_reg_index, *regions[], u8_t regions_num, u8_t start_reg_index,
bool do_sanity_check) bool do_sanity_check)
{ {
@ -247,7 +247,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
/* Non-empty region. */ /* Non-empty region. */
if (do_sanity_check && if (do_sanity_check &&
(!_mpu_partition_is_valid(regions[i]))) { (!mpu_partition_is_valid(regions[i]))) {
LOG_ERR("Partition %u: sanity check failed.", i); LOG_ERR("Partition %u: sanity check failed.", i);
return -EINVAL; return -EINVAL;
} }
@ -261,7 +261,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
* be programmed afterwards. * be programmed afterwards.
*/ */
reg_index = reg_index =
_mpu_sram_partitioning(reg_index, regions[i]); mpu_sram_partitioning(reg_index, regions[i]);
} }
#endif /* CONFIG_MPU_STACK_GUARD */ #endif /* CONFIG_MPU_STACK_GUARD */
@ -269,7 +269,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
return reg_index; return reg_index;
} }
reg_index = _mpu_configure_region(reg_index, regions[i]); reg_index = mpu_configure_region(reg_index, regions[i]);
if (reg_index == -EINVAL) { if (reg_index == -EINVAL) {
return reg_index; return reg_index;
@ -290,7 +290,7 @@ static int _mpu_configure_regions(const struct k_mem_partition
* If the static MPU regions configuration has not been successfully * If the static MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_static_mpu_regions(const struct k_mem_partition static int mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const u8_t regions_num,
const u32_t background_area_base, const u32_t background_area_base,
const u32_t background_area_end) const u32_t background_area_end)
@ -303,7 +303,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
ARG_UNUSED(background_area_base); ARG_UNUSED(background_area_base);
ARG_UNUSED(background_area_end); ARG_UNUSED(background_area_end);
mpu_reg_index = _mpu_configure_regions(static_regions, mpu_reg_index = mpu_configure_regions(static_regions,
regions_num, mpu_reg_index, true); regions_num, mpu_reg_index, true);
static_regions_num = mpu_reg_index; static_regions_num = mpu_reg_index;
@ -319,7 +319,7 @@ static int _mpu_configure_static_mpu_regions(const struct k_mem_partition
* If the dynamic MPU regions configuration has not been successfully * If the dynamic MPU regions configuration has not been successfully
* performed, the error signal is propagated to the caller of the function. * performed, the error signal is propagated to the caller of the function.
*/ */
static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition static int mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], u8_t regions_num)
{ {
/* Reset MPU regions inside which dynamic memory regions may /* Reset MPU regions inside which dynamic memory regions may
@ -331,7 +331,7 @@ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
* re-programming perform access in those areas. * re-programming perform access in those areas.
*/ */
arm_core_mpu_disable(); arm_core_mpu_disable();
_region_init(mpu_config.sram_region, (const struct nxp_mpu_region *) region_init(mpu_config.sram_region, (const struct nxp_mpu_region *)
&mpu_config.mpu_regions[mpu_config.sram_region]); &mpu_config.mpu_regions[mpu_config.sram_region]);
arm_core_mpu_enable(); arm_core_mpu_enable();
@ -341,13 +341,13 @@ static int _mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
* programmed on top of existing SRAM region configuration. * programmed on top of existing SRAM region configuration.
*/ */
mpu_reg_index = _mpu_configure_regions(dynamic_regions, mpu_reg_index = mpu_configure_regions(dynamic_regions,
regions_num, mpu_reg_index, false); regions_num, mpu_reg_index, false);
if (mpu_reg_index != -EINVAL) { if (mpu_reg_index != -EINVAL) {
/* Disable the non-programmed MPU regions. */ /* Disable the non-programmed MPU regions. */
for (int i = mpu_reg_index; i < _get_num_regions(); i++) { for (int i = mpu_reg_index; i < get_num_regions(); i++) {
LOG_DBG("disable region 0x%x", i); LOG_DBG("disable region 0x%x", i);
/* Disable region */ /* Disable region */
@ -392,12 +392,12 @@ void arm_core_mpu_disable(void)
#if defined(CONFIG_USERSPACE) #if defined(CONFIG_USERSPACE)
static inline u32_t _mpu_region_get_base(u32_t r_index) static inline u32_t mpu_region_get_base(u32_t r_index)
{ {
return SYSMPU->WORD[r_index][0]; return SYSMPU->WORD[r_index][0];
} }
static inline u32_t _mpu_region_get_size(u32_t r_index) static inline u32_t mpu_region_get_size(u32_t r_index)
{ {
/* <END> + 1 - <BASE> */ /* <END> + 1 - <BASE> */
return (SYSMPU->WORD[r_index][1] + 1) - SYSMPU->WORD[r_index][0]; return (SYSMPU->WORD[r_index][1] + 1) - SYSMPU->WORD[r_index][0];
@ -409,7 +409,7 @@ static inline u32_t _mpu_region_get_size(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_enabled_region(u32_t r_index) static inline int is_enabled_region(u32_t r_index)
{ {
return SYSMPU->WORD[r_index][3] & SYSMPU_WORD_VLD_MASK; return SYSMPU->WORD[r_index][3] & SYSMPU_WORD_VLD_MASK;
} }
@ -420,7 +420,7 @@ static inline int _is_enabled_region(u32_t r_index)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_in_region(u32_t r_index, u32_t start, u32_t size) static inline int is_in_region(u32_t r_index, u32_t start, u32_t size)
{ {
u32_t r_addr_start; u32_t r_addr_start;
u32_t r_addr_end; u32_t r_addr_end;
@ -444,20 +444,20 @@ void arm_core_mpu_mem_partition_config_update(
{ {
/* Find the partition. ASSERT if not found. */ /* Find the partition. ASSERT if not found. */
u8_t i; u8_t i;
u8_t reg_index = _get_num_regions(); u8_t reg_index = get_num_regions();
for (i = static_regions_num; i < _get_num_regions(); i++) { for (i = static_regions_num; i < get_num_regions(); i++) {
if (!_is_enabled_region(i)) { if (!is_enabled_region(i)) {
continue; continue;
} }
u32_t base = _mpu_region_get_base(i); u32_t base = mpu_region_get_base(i);
if (base != partition->start) { if (base != partition->start) {
continue; continue;
} }
u32_t size = _mpu_region_get_size(i); u32_t size = mpu_region_get_size(i);
if (size != partition->size) { if (size != partition->size) {
continue; continue;
@ -467,12 +467,12 @@ void arm_core_mpu_mem_partition_config_update(
reg_index = i; reg_index = i;
break; break;
} }
__ASSERT(reg_index != _get_num_regions(), __ASSERT(reg_index != get_num_regions(),
"Memory domain partition not found\n"); "Memory domain partition not found\n");
/* Modify the permissions */ /* Modify the permissions */
partition->attr = *new_attr; partition->attr = *new_attr;
_mpu_configure_region(reg_index, partition); mpu_configure_region(reg_index, partition);
} }
/** /**
@ -481,7 +481,7 @@ void arm_core_mpu_mem_partition_config_update(
*/ */
int arm_core_mpu_get_max_available_dyn_regions(void) int arm_core_mpu_get_max_available_dyn_regions(void)
{ {
return _get_num_regions() - static_regions_num; return get_num_regions() - static_regions_num;
} }
/** /**
@ -490,7 +490,7 @@ int arm_core_mpu_get_max_available_dyn_regions(void)
* Note: * Note:
* The caller must provide a valid region number. * The caller must provide a valid region number.
*/ */
static inline int _is_user_accessible_region(u32_t r_index, int write) static inline int is_user_accessible_region(u32_t r_index, int write)
{ {
u32_t r_ap = SYSMPU->WORD[r_index][2]; u32_t r_ap = SYSMPU->WORD[r_index][2];
@ -509,9 +509,9 @@ int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
u8_t r_index; u8_t r_index;
/* Iterate through all MPU regions */ /* Iterate through all MPU regions */
for (r_index = 0U; r_index < _get_num_regions(); r_index++) { for (r_index = 0U; r_index < get_num_regions(); r_index++) {
if (!_is_enabled_region(r_index) || if (!is_enabled_region(r_index) ||
!_is_in_region(r_index, (u32_t)addr, size)) { !is_in_region(r_index, (u32_t)addr, size)) {
continue; continue;
} }
@ -520,7 +520,7 @@ int arm_core_mpu_buffer_validate(void *addr, size_t size, int write)
* So we can stop the iteration immediately once we find the * So we can stop the iteration immediately once we find the
* matched region that grants permission. * matched region that grants permission.
*/ */
if (_is_user_accessible_region(r_index, write)) { if (is_user_accessible_region(r_index, write)) {
return 0; return 0;
} }
} }
@ -537,8 +537,8 @@ void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
*static_regions[], const u8_t regions_num, *static_regions[], const u8_t regions_num,
const u32_t background_area_start, const u32_t background_area_end) const u32_t background_area_start, const u32_t background_area_end)
{ {
if (_mpu_configure_static_mpu_regions(static_regions, regions_num, if (mpu_configure_static_mpu_regions(static_regions, regions_num,
background_area_start, background_area_end) == -EINVAL) { background_area_start, background_area_end) == -EINVAL) {
__ASSERT(0, "Configuring %u static MPU regions failed\n", __ASSERT(0, "Configuring %u static MPU regions failed\n",
regions_num); regions_num);
@ -551,7 +551,7 @@ void arm_core_mpu_configure_static_mpu_regions(const struct k_mem_partition
void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition void arm_core_mpu_configure_dynamic_mpu_regions(const struct k_mem_partition
*dynamic_regions[], u8_t regions_num) *dynamic_regions[], u8_t regions_num)
{ {
if (_mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num) if (mpu_configure_dynamic_mpu_regions(dynamic_regions, regions_num)
== -EINVAL) { == -EINVAL) {
__ASSERT(0, "Configuring %u dynamic MPU regions failed\n", __ASSERT(0, "Configuring %u dynamic MPU regions failed\n",
@ -573,7 +573,7 @@ static int nxp_mpu_init(struct device *arg)
u32_t r_index; u32_t r_index;
if (mpu_config.num_regions > _get_num_regions()) { if (mpu_config.num_regions > get_num_regions()) {
/* Attempt to configure more MPU regions than /* Attempt to configure more MPU regions than
* what is supported by hardware. As this operation * what is supported by hardware. As this operation
* may be executed during system (pre-kernel) initialization, * may be executed during system (pre-kernel) initialization,
@ -583,21 +583,21 @@ static int nxp_mpu_init(struct device *arg)
__ASSERT(0, __ASSERT(0,
"Request to configure: %u regions (supported: %u)\n", "Request to configure: %u regions (supported: %u)\n",
mpu_config.num_regions, mpu_config.num_regions,
_get_num_regions() get_num_regions()
); );
return -1; return -1;
} }
LOG_DBG("total region count: %d", _get_num_regions()); LOG_DBG("total region count: %d", get_num_regions());
arm_core_mpu_disable(); arm_core_mpu_disable();
/* Architecture-specific configuration */ /* Architecture-specific configuration */
_mpu_init(); mpu_init();
/* Program fixed regions configured at SOC definition. */ /* Program fixed regions configured at SOC definition. */
for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) { for (r_index = 0U; r_index < mpu_config.num_regions; r_index++) {
_region_init(r_index, &mpu_config.mpu_regions[r_index]); region_init(r_index, &mpu_config.mpu_regions[r_index]);
} }
/* Update the number of programmed MPU regions. */ /* Update the number of programmed MPU regions. */

View file

@ -20,14 +20,14 @@
#include <toolchain.h> #include <toolchain.h>
#include <linker/sections.h> #include <linker/sections.h>
extern void _SysNmiOnReset(void); extern void z_SysNmiOnReset(void);
#if !defined(CONFIG_RUNTIME_NMI) #if !defined(CONFIG_RUNTIME_NMI)
#define handler _SysNmiOnReset #define handler z_SysNmiOnReset
#endif #endif
#ifdef CONFIG_RUNTIME_NMI #ifdef CONFIG_RUNTIME_NMI
typedef void (*_NmiHandler_t)(void); typedef void (*_NmiHandler_t)(void);
static _NmiHandler_t handler = _SysNmiOnReset; static _NmiHandler_t handler = z_SysNmiOnReset;
/** /**
* *
@ -39,7 +39,7 @@ static _NmiHandler_t handler = _SysNmiOnReset;
* @return N/A * @return N/A
*/ */
static void _DefaultHandler(void) static void DefaultHandler(void)
{ {
printk("NMI received! Rebooting...\n"); printk("NMI received! Rebooting...\n");
/* In ARM implementation sys_reboot ignores the parameter */ /* In ARM implementation sys_reboot ignores the parameter */
@ -59,7 +59,7 @@ static void _DefaultHandler(void)
void z_NmiInit(void) void z_NmiInit(void)
{ {
handler = _DefaultHandler; handler = DefaultHandler;
} }
/** /**

View file

@ -20,8 +20,8 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(_SysNmiOnReset) GTEXT(z_SysNmiOnReset)
SECTION_FUNC(TEXT, _SysNmiOnReset) SECTION_FUNC(TEXT, z_SysNmiOnReset)
wfi wfi
b _SysNmiOnReset b z_SysNmiOnReset

View file

@ -161,7 +161,7 @@ extern FUNC_NORETURN void z_cstart(void);
* @return N/A * @return N/A
*/ */
extern void _IntLibInit(void); extern void z_IntLibInit(void);
#ifdef CONFIG_BOOT_TIME_MEASUREMENT #ifdef CONFIG_BOOT_TIME_MEASUREMENT
extern u64_t __start_time_stamp; extern u64_t __start_time_stamp;
@ -183,7 +183,7 @@ void _PrepC(void)
#ifdef CONFIG_BOOT_TIME_MEASUREMENT #ifdef CONFIG_BOOT_TIME_MEASUREMENT
__start_time_stamp = 0U; __start_time_stamp = 0U;
#endif #endif
_IntLibInit(); z_IntLibInit();
z_cstart(); z_cstart();
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }

View file

@ -20,7 +20,7 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(_CpuIdleInit) GTEXT(z_CpuIdleInit)
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
GTEXT(_NanoIdleValGet) GTEXT(_NanoIdleValGet)
GTEXT(_NanoIdleValClear) GTEXT(_NanoIdleValClear)
@ -46,10 +46,10 @@ GTEXT(k_cpu_atomic_idle)
* *
* C function prototype: * C function prototype:
* *
* void _CpuIdleInit (void); * void z_CpuIdleInit (void);
*/ */
SECTION_FUNC(TEXT, _CpuIdleInit) SECTION_FUNC(TEXT, z_CpuIdleInit)
ldr r1, =_SCB_SCR ldr r1, =_SCB_SCR
movs.n r2, #_SCR_INIT_BITS movs.n r2, #_SCR_INIT_BITS
str r2, [r1] str r2, [r1]
@ -179,14 +179,14 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle)
cpsid i cpsid i
/* /*
* No need to set SEVONPEND, it's set once in _CpuIdleInit() and never * No need to set SEVONPEND, it's set once in z_CpuIdleInit() and never
* touched again. * touched again.
*/ */
/* r0: interrupt mask from caller */ /* r0: interrupt mask from caller */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* No BASEPRI, call wfe directly (SEVONPEND set in _CpuIdleInit()) */ /* No BASEPRI, call wfe directly (SEVONPEND set in z_CpuIdleInit()) */
wfe wfe
cmp r0, #0 cmp r0, #0

View file

@ -92,7 +92,7 @@ void z_NanoFatalErrorHandler(unsigned int reason,
z_SysFatalErrorHandler(reason, pEsf); z_SysFatalErrorHandler(reason, pEsf);
} }
void _do_kernel_oops(const NANO_ESF *esf) void z_do_kernel_oops(const NANO_ESF *esf)
{ {
z_NanoFatalErrorHandler(esf->r0, esf); z_NanoFatalErrorHandler(esf->r0, esf);
} }
@ -106,6 +106,6 @@ FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
oops_esf.pc = ssf_contents[3]; oops_esf.pc = ssf_contents[3];
_do_kernel_oops(&oops_esf); z_do_kernel_oops(&oops_esf);
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }

View file

@ -137,7 +137,7 @@
*/ */
#if (CONFIG_FAULT_DUMP == 1) #if (CONFIG_FAULT_DUMP == 1)
static void _FaultShow(const NANO_ESF *esf, int fault) static void FaultShow(const NANO_ESF *esf, int fault)
{ {
PR_EXC("Fault! EXC #%d\n", fault); PR_EXC("Fault! EXC #%d\n", fault);
@ -156,7 +156,7 @@ static void _FaultShow(const NANO_ESF *esf, int fault)
* *
* For Dump level 0, no information needs to be generated. * For Dump level 0, no information needs to be generated.
*/ */
static void _FaultShow(const NANO_ESF *esf, int fault) static void FaultShow(const NANO_ESF *esf, int fault)
{ {
(void)esf; (void)esf;
(void)fault; (void)fault;
@ -176,7 +176,7 @@ static const struct z_exc_handle exceptions[] = {
* *
* @return 1 if error is recoverable, otherwise return 0. * @return 1 if error is recoverable, otherwise return 0.
*/ */
static int _MemoryFaultIsRecoverable(NANO_ESF *esf) static int MemoryFaultIsRecoverable(NANO_ESF *esf)
{ {
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
for (int i = 0; i < ARRAY_SIZE(exceptions); i++) { for (int i = 0; i < ARRAY_SIZE(exceptions); i++) {
@ -211,7 +211,7 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t _MpuFault(NANO_ESF *esf, int fromHardFault) static u32_t MpuFault(NANO_ESF *esf, int fromHardFault)
{ {
u32_t reason = _NANO_ERR_HW_EXCEPTION; u32_t reason = _NANO_ERR_HW_EXCEPTION;
u32_t mmfar = -EINVAL; u32_t mmfar = -EINVAL;
@ -321,7 +321,7 @@ static u32_t _MpuFault(NANO_ESF *esf, int fromHardFault)
SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk; SCB->CFSR |= SCB_CFSR_MEMFAULTSR_Msk;
/* Assess whether system shall ignore/recover from this MPU fault. */ /* Assess whether system shall ignore/recover from this MPU fault. */
if (_MemoryFaultIsRecoverable(esf)) { if (MemoryFaultIsRecoverable(esf)) {
reason = _NANO_ERR_RECOVERABLE; reason = _NANO_ERR_RECOVERABLE;
} }
@ -336,7 +336,7 @@ static u32_t _MpuFault(NANO_ESF *esf, int fromHardFault)
* *
* @return N/A * @return N/A
*/ */
static int _BusFault(NANO_ESF *esf, int fromHardFault) static int BusFault(NANO_ESF *esf, int fromHardFault)
{ {
u32_t reason = _NANO_ERR_HW_EXCEPTION; u32_t reason = _NANO_ERR_HW_EXCEPTION;
@ -477,7 +477,7 @@ static int _BusFault(NANO_ESF *esf, int fromHardFault)
/* clear BFSR sticky bits */ /* clear BFSR sticky bits */
SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk; SCB->CFSR |= SCB_CFSR_BUSFAULTSR_Msk;
if (_MemoryFaultIsRecoverable(esf)) { if (MemoryFaultIsRecoverable(esf)) {
reason = _NANO_ERR_RECOVERABLE; reason = _NANO_ERR_RECOVERABLE;
} }
@ -492,7 +492,7 @@ static int _BusFault(NANO_ESF *esf, int fromHardFault)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t _UsageFault(const NANO_ESF *esf) static u32_t UsageFault(const NANO_ESF *esf)
{ {
u32_t reason = _NANO_ERR_HW_EXCEPTION; u32_t reason = _NANO_ERR_HW_EXCEPTION;
@ -548,7 +548,7 @@ static u32_t _UsageFault(const NANO_ESF *esf)
* *
* @return N/A * @return N/A
*/ */
static void _SecureFault(const NANO_ESF *esf) static void SecureFault(const NANO_ESF *esf)
{ {
PR_FAULT_INFO("***** SECURE FAULT *****\n"); PR_FAULT_INFO("***** SECURE FAULT *****\n");
@ -587,7 +587,7 @@ static void _SecureFault(const NANO_ESF *esf)
* *
* @return N/A * @return N/A
*/ */
static void _DebugMonitor(const NANO_ESF *esf) static void DebugMonitor(const NANO_ESF *esf)
{ {
ARG_UNUSED(esf); ARG_UNUSED(esf);
@ -607,14 +607,14 @@ static void _DebugMonitor(const NANO_ESF *esf)
* *
* @return error code to identify the fatal error reason * @return error code to identify the fatal error reason
*/ */
static u32_t _HardFault(NANO_ESF *esf) static u32_t HardFault(NANO_ESF *esf)
{ {
u32_t reason = _NANO_ERR_HW_EXCEPTION; u32_t reason = _NANO_ERR_HW_EXCEPTION;
PR_FAULT_INFO("***** HARD FAULT *****\n"); PR_FAULT_INFO("***** HARD FAULT *****\n");
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
if (_MemoryFaultIsRecoverable(esf) != 0) { if (MemoryFaultIsRecoverable(esf) != 0) {
reason = _NANO_ERR_RECOVERABLE; reason = _NANO_ERR_RECOVERABLE;
} }
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
@ -623,14 +623,14 @@ static u32_t _HardFault(NANO_ESF *esf)
} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) { } else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
PR_EXC(" Fault escalation (see below)\n"); PR_EXC(" Fault escalation (see below)\n");
if (SCB_MMFSR != 0) { if (SCB_MMFSR != 0) {
reason = _MpuFault(esf, 1); reason = MpuFault(esf, 1);
} else if (SCB_BFSR != 0) { } else if (SCB_BFSR != 0) {
reason = _BusFault(esf, 1); reason = BusFault(esf, 1);
} else if (SCB_UFSR != 0) { } else if (SCB_UFSR != 0) {
reason = _UsageFault(esf); reason = UsageFault(esf);
#if defined(CONFIG_ARM_SECURE_FIRMWARE) #if defined(CONFIG_ARM_SECURE_FIRMWARE)
} else if (SAU->SFSR != 0) { } else if (SAU->SFSR != 0) {
_SecureFault(esf); SecureFault(esf);
#endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE */
} }
} }
@ -649,7 +649,7 @@ static u32_t _HardFault(NANO_ESF *esf)
* *
* @return N/A * @return N/A
*/ */
static void _ReservedException(const NANO_ESF *esf, int fault) static void ReservedException(const NANO_ESF *esf, int fault)
{ {
ARG_UNUSED(esf); ARG_UNUSED(esf);
@ -659,45 +659,45 @@ static void _ReservedException(const NANO_ESF *esf, int fault)
} }
/* Handler function for ARM fault conditions. */ /* Handler function for ARM fault conditions. */
static u32_t _FaultHandle(NANO_ESF *esf, int fault) static u32_t FaultHandle(NANO_ESF *esf, int fault)
{ {
u32_t reason = _NANO_ERR_HW_EXCEPTION; u32_t reason = _NANO_ERR_HW_EXCEPTION;
switch (fault) { switch (fault) {
case 3: case 3:
reason = _HardFault(esf); reason = HardFault(esf);
break; break;
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */ /* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
case 4: case 4:
reason = _MpuFault(esf, 0); reason = MpuFault(esf, 0);
break; break;
case 5: case 5:
reason = _BusFault(esf, 0); reason = BusFault(esf, 0);
break; break;
case 6: case 6:
reason = _UsageFault(esf); reason = UsageFault(esf);
break; break;
#if defined(CONFIG_ARM_SECURE_FIRMWARE) #if defined(CONFIG_ARM_SECURE_FIRMWARE)
case 7: case 7:
_SecureFault(esf); SecureFault(esf);
break; break;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE */
case 12: case 12:
_DebugMonitor(esf); DebugMonitor(esf);
break; break;
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
default: default:
_ReservedException(esf, fault); ReservedException(esf, fault);
break; break;
} }
if (reason != _NANO_ERR_RECOVERABLE) { if (reason != _NANO_ERR_RECOVERABLE) {
/* Dump generic information about the fault. */ /* Dump generic information about the fault. */
_FaultShow(esf, fault); FaultShow(esf, fault);
} }
return reason; return reason;
@ -711,7 +711,7 @@ static u32_t _FaultHandle(NANO_ESF *esf, int fault)
* *
* @param secure_esf Pointer to the secure stack frame. * @param secure_esf Pointer to the secure stack frame.
*/ */
static void _SecureStackDump(const NANO_ESF *secure_esf) static void SecureStackDump(const NANO_ESF *secure_esf)
{ {
/* /*
* In case a Non-Secure exception interrupted the Secure * In case a Non-Secure exception interrupted the Secure
@ -747,7 +747,7 @@ static void _SecureStackDump(const NANO_ESF *secure_esf)
PR_FAULT_INFO(" S instruction address: 0x%x\n", sec_ret_addr); PR_FAULT_INFO(" S instruction address: 0x%x\n", sec_ret_addr);
} }
#define SECURE_STACK_DUMP(esf) _SecureStackDump(esf) #define SECURE_STACK_DUMP(esf) SecureStackDump(esf)
#else #else
/* We do not dump the Secure stack information for lower dump levels. */ /* We do not dump the Secure stack information for lower dump levels. */
#define SECURE_STACK_DUMP(esf) #define SECURE_STACK_DUMP(esf)
@ -859,7 +859,7 @@ void _Fault(NANO_ESF *esf, u32_t exc_return)
(void) exc_return; (void) exc_return;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE */
reason = _FaultHandle(esf, fault); reason = FaultHandle(esf, fault);
if (reason == _NANO_ERR_RECOVERABLE) { if (reason == _NANO_ERR_RECOVERABLE) {
return; return;
@ -880,7 +880,7 @@ _exit_fatal:
* *
* @return N/A * @return N/A
*/ */
void _FaultInit(void) void z_FaultInit(void)
{ {
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)

View file

@ -33,7 +33,7 @@
* @return N/A * @return N/A
*/ */
void _IntLibInit(void) void z_IntLibInit(void)
{ {
int irq = 0; int irq = 0;

View file

@ -15,7 +15,7 @@ volatile irq_offload_routine_t offload_routine;
static void *offload_param; static void *offload_param;
/* Called by __svc */ /* Called by __svc */
void _irq_do_offload(void) void z_irq_do_offload(void)
{ {
offload_routine(offload_param); offload_routine(offload_param);
} }

View file

@ -22,8 +22,8 @@ _ASM_FILE_PROLOGUE
GTEXT(__svc) GTEXT(__svc)
GTEXT(__pendsv) GTEXT(__pendsv)
GTEXT(_do_kernel_oops) GTEXT(z_do_kernel_oops)
GTEXT(_arm_do_syscall) GTEXT(z_arm_do_syscall)
GDATA(_k_neg_eagain) GDATA(_k_neg_eagain)
GDATA(_kernel) GDATA(_kernel)
@ -303,7 +303,7 @@ _stack_frame_endif:
#if CONFIG_IRQ_OFFLOAD #if CONFIG_IRQ_OFFLOAD
push {r0, lr} push {r0, lr}
bl _irq_do_offload /* call C routine which executes the offload */ bl z_irq_do_offload /* call C routine which executes the offload */
pop {r0, r1} pop {r0, r1}
mov lr, r1 mov lr, r1
#endif /* CONFIG_IRQ_OFFLOAD */ #endif /* CONFIG_IRQ_OFFLOAD */
@ -313,7 +313,7 @@ _stack_frame_endif:
_oops: _oops:
push {r0, lr} push {r0, lr}
bl _do_kernel_oops bl z_do_kernel_oops
pop {r0, pc} pop {r0, pc}
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
@ -369,7 +369,7 @@ SECTION_FUNC(TEXT, __svc)
#if CONFIG_IRQ_OFFLOAD #if CONFIG_IRQ_OFFLOAD
push {r0, lr} push {r0, lr}
bl _irq_do_offload /* call C routine which executes the offload */ bl z_irq_do_offload /* call C routine which executes the offload */
pop {r0, lr} pop {r0, lr}
/* exception return is done in _IntExit() */ /* exception return is done in _IntExit() */
@ -378,7 +378,7 @@ SECTION_FUNC(TEXT, __svc)
_oops: _oops:
push {r0, lr} push {r0, lr}
bl _do_kernel_oops bl z_do_kernel_oops
pop {r0, pc} pop {r0, pc}
#if CONFIG_USERSPACE #if CONFIG_USERSPACE
@ -405,8 +405,8 @@ _oops:
*/ */
_do_syscall: _do_syscall:
ldr r8, [r0, #24] /* grab address of PC from stack frame */ ldr r8, [r0, #24] /* grab address of PC from stack frame */
ldr r1, =_arm_do_syscall ldr r1, =z_arm_do_syscall
str r1, [r0, #24] /* overwrite the PC to point to _arm_do_syscall */ str r1, [r0, #24] /* overwrite the PC to point to z_arm_do_syscall */
/* validate syscall limit, only set priv mode if valid */ /* validate syscall limit, only set priv mode if valid */
ldr ip, =K_SYSCALL_LIMIT ldr ip, =K_SYSCALL_LIMIT
@ -437,7 +437,7 @@ valid_syscall_id:
isb isb
pop {r0, r1} pop {r0, r1}
/* return from SVC to the modified LR - _arm_do_syscall */ /* return from SVC to the modified LR - z_arm_do_syscall */
bx lr bx lr
#endif #endif

View file

@ -17,7 +17,7 @@
#include <wait_q.h> #include <wait_q.h>
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
extern u8_t *_k_priv_stack_find(void *obj); extern u8_t *z_priv_stack_find(void *obj);
#endif #endif
/** /**
@ -97,7 +97,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct __esf *pInitCtx; struct __esf *pInitCtx;
_new_thread_init(thread, pStackMem, stackSize, priority, z_new_thread_init(thread, pStackMem, stackSize, priority,
options); options);
/* carve the thread entry struct from the "base" of the stack */ /* carve the thread entry struct from the "base" of the stack */
@ -151,9 +151,9 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
/* Set up privileged stack before entering user mode */ /* Set up privileged stack before entering user mode */
_current->arch.priv_stack_start = _current->arch.priv_stack_start =
(u32_t)_k_priv_stack_find(_current->stack_obj); (u32_t)z_priv_stack_find(_current->stack_obj);
_arm_userspace_enter(user_entry, p1, p2, p3, z_arm_userspace_enter(user_entry, p1, p2, p3,
(u32_t)_current->stack_info.start, (u32_t)_current->stack_info.start,
_current->stack_info.size); _current->stack_info.size);
CODE_UNREACHABLE; CODE_UNREACHABLE;

View file

@ -16,8 +16,8 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(_arm_userspace_enter) GTEXT(z_arm_userspace_enter)
GTEXT(_arm_do_syscall) GTEXT(z_arm_do_syscall)
GTEXT(z_arch_user_string_nlen) GTEXT(z_arch_user_string_nlen)
GTEXT(z_arch_user_string_nlen_fault_start) GTEXT(z_arch_user_string_nlen_fault_start)
GTEXT(z_arch_user_string_nlen_fault_end) GTEXT(z_arch_user_string_nlen_fault_end)
@ -36,7 +36,7 @@ GDATA(_k_syscall_table)
* not transition back later, unless they are doing system calls. * not transition back later, unless they are doing system calls.
* *
*/ */
SECTION_FUNC(TEXT,_arm_userspace_enter) SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* move user_entry to lr */ /* move user_entry to lr */
mov lr, r0 mov lr, r0
@ -77,7 +77,7 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
* *
* Note that the risk for overflow is higher if using the normal thread * Note that the risk for overflow is higher if using the normal thread
* stack, since we do not control how much stack is actually left, when * stack, since we do not control how much stack is actually left, when
* user invokes _arm_userspace_enter(). * user invokes z_arm_userspace_enter().
*/ */
push {r0,r1,r2,r3,ip,lr} push {r0,r1,r2,r3,ip,lr}
ldr r0, =_kernel ldr r0, =_kernel
@ -177,7 +177,7 @@ SECTION_FUNC(TEXT,_arm_userspace_enter)
* 4) Restoring stack and calling back to the caller of the SVC * 4) Restoring stack and calling back to the caller of the SVC
* *
*/ */
SECTION_FUNC(TEXT, _arm_do_syscall) SECTION_FUNC(TEXT, z_arm_do_syscall)
/* /*
* r0-r5 contain arguments * r0-r5 contain arguments
* r6 contains call_id * r6 contains call_id

View file

@ -49,7 +49,7 @@ extern volatile irq_offload_routine_t offload_routine;
* *
* @return 1 if in ISR, 0 if not. * @return 1 if in ISR, 0 if not.
*/ */
static ALWAYS_INLINE bool _IsInIsr(void) static ALWAYS_INLINE bool z_IsInIsr(void)
{ {
u32_t vector = __get_IPSR(); u32_t vector = __get_IPSR();
@ -93,7 +93,7 @@ static ALWAYS_INLINE bool _IsInIsr(void)
* *
* @return N/A * @return N/A
*/ */
static ALWAYS_INLINE void _ExcSetup(void) static ALWAYS_INLINE void z_ExcSetup(void)
{ {
NVIC_SetPriority(PendSV_IRQn, 0xff); NVIC_SetPriority(PendSV_IRQn, 0xff);

View file

@ -37,7 +37,7 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
* *
* @return N/A * @return N/A
*/ */
static ALWAYS_INLINE void _InterruptStackSetup(void) static ALWAYS_INLINE void z_InterruptStackSetup(void)
{ {
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) && \ #if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) && \
defined(CONFIG_USERSPACE) defined(CONFIG_USERSPACE)

View file

@ -27,8 +27,8 @@ extern "C" {
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
extern void _FaultInit(void); extern void z_FaultInit(void);
extern void _CpuIdleInit(void); extern void z_CpuIdleInit(void);
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
extern void z_arch_configure_static_mpu_regions(void); extern void z_arch_configure_static_mpu_regions(void);
extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread); extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
@ -36,10 +36,10 @@ extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
static ALWAYS_INLINE void kernel_arch_init(void) static ALWAYS_INLINE void kernel_arch_init(void)
{ {
_InterruptStackSetup(); z_InterruptStackSetup();
_ExcSetup(); z_ExcSetup();
_FaultInit(); z_FaultInit();
_CpuIdleInit(); z_CpuIdleInit();
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
@ -134,9 +134,9 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
extern void k_cpu_atomic_idle(unsigned int key); extern void k_cpu_atomic_idle(unsigned int key);
#define z_is_in_isr() _IsInIsr() #define z_is_in_isr() z_IsInIsr()
extern FUNC_NORETURN void _arm_userspace_enter(k_thread_entry_t user_entry, extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3, void *p1, void *p2, void *p3,
u32_t stack_end, u32_t stack_end,
u32_t stack_start); u32_t stack_start);

View file

@ -25,7 +25,7 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
int _sys_current_irq_key_get(void) int z_sys_current_irq_key_get(void)
{ {
return __get_IPSR(); return __get_IPSR();
} }

View file

@ -15,23 +15,23 @@
* text to memory, such as a boot copier or runtime synthesis of code. If the * text to memory, such as a boot copier or runtime synthesis of code. If the
* new text was written with instructions that do not bypass cache memories, * new text was written with instructions that do not bypass cache memories,
* this should immediately be followed by an invocation of * this should immediately be followed by an invocation of
* _nios2_dcache_flush_all() so that cached instruction data is committed to * z_nios2_dcache_flush_all() so that cached instruction data is committed to
* RAM. * RAM.
* *
* See Chapter 9 of the Nios II Gen 2 Software Developer's Handbook for more * See Chapter 9 of the Nios II Gen 2 Software Developer's Handbook for more
* information on cache considerations. * information on cache considerations.
*/ */
#if ALT_CPU_ICACHE_SIZE > 0 #if ALT_CPU_ICACHE_SIZE > 0
void _nios2_icache_flush_all(void) void z_nios2_icache_flush_all(void)
{ {
u32_t i; u32_t i;
for (i = 0U; i < ALT_CPU_ICACHE_SIZE; i += ALT_CPU_ICACHE_LINE_SIZE) { for (i = 0U; i < ALT_CPU_ICACHE_SIZE; i += ALT_CPU_ICACHE_LINE_SIZE) {
_nios2_icache_flush(i); z_nios2_icache_flush(i);
} }
/* Get rid of any stale instructions in the pipeline */ /* Get rid of any stale instructions in the pipeline */
_nios2_pipeline_flush(); z_nios2_pipeline_flush();
} }
#endif #endif
@ -51,26 +51,26 @@ void _nios2_icache_flush_all(void)
* information on cache considerations. * information on cache considerations.
*/ */
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void _nios2_dcache_flush_all(void) void z_nios2_dcache_flush_all(void)
{ {
u32_t i; u32_t i;
for (i = 0U; i < ALT_CPU_DCACHE_SIZE; i += ALT_CPU_DCACHE_LINE_SIZE) { for (i = 0U; i < ALT_CPU_DCACHE_SIZE; i += ALT_CPU_DCACHE_LINE_SIZE) {
_nios2_dcache_flush(i); z_nios2_dcache_flush(i);
} }
} }
#endif #endif
/* /*
* _nios2_dcache_flush_no_writeback() is called to flush the data cache for a * z_nios2_dcache_flush_no_writeback() is called to flush the data cache for a
* memory region of length "len" bytes, starting at address "start". * memory region of length "len" bytes, starting at address "start".
* *
* Any dirty lines in the data cache are NOT written back to memory. * Any dirty lines in the data cache are NOT written back to memory.
* Make sure you really want this behavior. If you aren't 100% sure, * Make sure you really want this behavior. If you aren't 100% sure,
* use the _nios2_dcache_flush() routine instead. * use the z_nios2_dcache_flush() routine instead.
*/ */
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void _nios2_dcache_flush_no_writeback(void *start, u32_t len) void z_nios2_dcache_flush_no_writeback(void *start, u32_t len)
{ {
u8_t *i; u8_t *i;
u8_t *end = ((char *) start) + len; u8_t *end = ((char *) start) + len;

View file

@ -15,7 +15,7 @@ GTEXT(_exception)
GTEXT(_Fault) GTEXT(_Fault)
GTEXT(__swap) GTEXT(__swap)
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
GTEXT(_irq_do_offload) GTEXT(z_irq_do_offload)
GTEXT(_offload_routine) GTEXT(_offload_routine)
#endif #endif

View file

@ -174,7 +174,7 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
u32_t exc_reg, badaddr_reg, eccftl; u32_t exc_reg, badaddr_reg, eccftl;
enum nios2_exception_cause cause; enum nios2_exception_cause cause;
exc_reg = _nios2_creg_read(NIOS2_CR_EXCEPTION); exc_reg = z_nios2_creg_read(NIOS2_CR_EXCEPTION);
/* Bit 31 indicates potentially fatal ECC error */ /* Bit 31 indicates potentially fatal ECC error */
eccftl = (exc_reg & NIOS2_EXCEPTION_REG_ECCFTL_MASK) != 0U; eccftl = (exc_reg & NIOS2_EXCEPTION_REG_ECCFTL_MASK) != 0U;
@ -188,7 +188,7 @@ FUNC_NORETURN void _Fault(const NANO_ESF *esf)
printk("reason: %s\n", cause_str(cause)); printk("reason: %s\n", cause_str(cause));
#endif #endif
if (BIT(cause) & NIOS2_BADADDR_CAUSE_MASK) { if (BIT(cause) & NIOS2_BADADDR_CAUSE_MASK) {
badaddr_reg = _nios2_creg_read(NIOS2_CR_BADADDR); badaddr_reg = z_nios2_creg_read(NIOS2_CR_BADADDR);
printk("Badaddr: 0x%x\n", badaddr_reg); printk("Badaddr: 0x%x\n", badaddr_reg);
} }
#endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */ #endif /* ALT_CPU_HAS_EXTRA_EXCEPTION_INFO */
@ -246,7 +246,7 @@ hang_system:
#endif #endif
#ifdef ALT_CPU_HAS_DEBUG_STUB #ifdef ALT_CPU_HAS_DEBUG_STUB
_nios2_break(); z_nios2_break();
#endif #endif
for (;;) { for (;;) {
k_cpu_idle(); k_cpu_idle();

View file

@ -25,7 +25,7 @@ void z_irq_spurious(void *unused)
{ {
ARG_UNUSED(unused); ARG_UNUSED(unused);
printk("Spurious interrupt detected! ipending: %x\n", printk("Spurious interrupt detected! ipending: %x\n",
_nios2_creg_read(NIOS2_CR_IPENDING)); z_nios2_creg_read(NIOS2_CR_IPENDING));
z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf); z_NanoFatalErrorHandler(_NANO_ERR_SPURIOUS_INT, &_default_esf);
} }
@ -37,9 +37,9 @@ void z_arch_irq_enable(unsigned int irq)
key = irq_lock(); key = irq_lock();
ienable = _nios2_creg_read(NIOS2_CR_IENABLE); ienable = z_nios2_creg_read(NIOS2_CR_IENABLE);
ienable |= BIT(irq); ienable |= BIT(irq);
_nios2_creg_write(NIOS2_CR_IENABLE, ienable); z_nios2_creg_write(NIOS2_CR_IENABLE, ienable);
irq_unlock(key); irq_unlock(key);
}; };
@ -53,9 +53,9 @@ void z_arch_irq_disable(unsigned int irq)
key = irq_lock(); key = irq_lock();
ienable = _nios2_creg_read(NIOS2_CR_IENABLE); ienable = z_nios2_creg_read(NIOS2_CR_IENABLE);
ienable &= ~BIT(irq); ienable &= ~BIT(irq);
_nios2_creg_write(NIOS2_CR_IENABLE, ienable); z_nios2_creg_write(NIOS2_CR_IENABLE, ienable);
irq_unlock(key); irq_unlock(key);
}; };
@ -80,7 +80,7 @@ void _enter_irq(u32_t ipending)
_kernel.nested++; _kernel.nested++;
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
_irq_do_offload(); z_irq_do_offload();
#endif #endif
while (ipending) { while (ipending) {

View file

@ -15,7 +15,7 @@ static volatile void *offload_param;
* Just in case the offload routine itself generates an unhandled * Just in case the offload routine itself generates an unhandled
* exception, clear the offload_routine global before executing. * exception, clear the offload_routine global before executing.
*/ */
void _irq_do_offload(void) void z_irq_do_offload(void)
{ {
irq_offload_routine_t tmp; irq_offload_routine_t tmp;

View file

@ -39,13 +39,13 @@ void _PrepC(void)
/* In most XIP scenarios we copy the exception code into RAM, so need /* In most XIP scenarios we copy the exception code into RAM, so need
* to flush instruction cache. * to flush instruction cache.
*/ */
_nios2_icache_flush_all(); z_nios2_icache_flush_all();
#if ALT_CPU_ICACHE_SIZE > 0 #if ALT_CPU_ICACHE_SIZE > 0
/* Only need to flush the data cache here if there actually is an /* Only need to flush the data cache here if there actually is an
* instruction cache, so that the cached instruction data written is * instruction cache, so that the cached instruction data written is
* actually committed. * actually committed.
*/ */
_nios2_dcache_flush_all(); z_nios2_dcache_flush_all();
#endif #endif
#endif #endif
z_cstart(); z_cstart();

View file

@ -10,7 +10,7 @@
/* exports */ /* exports */
GTEXT(__swap) GTEXT(__swap)
GTEXT(_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* imports */ /* imports */
GTEXT(z_sys_trace_thread_switched_in) GTEXT(z_sys_trace_thread_switched_in)
@ -171,9 +171,9 @@ no_unlock:
ret ret
/* void _thread_entry_wrapper(void) /* void z_thread_entry_wrapper(void)
*/ */
SECTION_FUNC(TEXT, _thread_entry_wrapper) SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/* This all corresponds to struct init_stack_frame defined in /* This all corresponds to struct init_stack_frame defined in
* thread.c. We need to take this stuff off the stack and put * thread.c. We need to take this stuff off the stack and put
* it in the apporpriate registers * it in the apporpriate registers

View file

@ -14,12 +14,12 @@
* to z_thread_entry() since this arch puts the first four arguments * to z_thread_entry() since this arch puts the first four arguments
* in r4-r7 and not on the stack * in r4-r7 and not on the stack
*/ */
void _thread_entry_wrapper(k_thread_entry_t, void *, void *, void *); void z_thread_entry_wrapper(k_thread_entry_t, void *, void *, void *);
struct init_stack_frame { struct init_stack_frame {
/* top of the stack / most recently pushed */ /* top of the stack / most recently pushed */
/* Used by _thread_entry_wrapper. pulls these off the stack and /* Used by z_thread_entry_wrapper. pulls these off the stack and
* into argument registers before calling z_thread_entry() * into argument registers before calling z_thread_entry()
*/ */
k_thread_entry_t entry_point; k_thread_entry_t entry_point;
@ -41,7 +41,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct init_stack_frame *iframe; struct init_stack_frame *iframe;
_new_thread_init(thread, stack_memory, stack_size, priority, options); z_new_thread_init(thread, stack_memory, stack_size, priority, options);
/* Initial stack frame data, stored at the base of the stack */ /* Initial stack frame data, stored at the base of the stack */
iframe = (struct init_stack_frame *) iframe = (struct init_stack_frame *)
@ -54,7 +54,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
iframe->arg3 = arg3; iframe->arg3 = arg3;
thread->callee_saved.sp = (u32_t)iframe; thread->callee_saved.sp = (u32_t)iframe;
thread->callee_saved.ra = (u32_t)_thread_entry_wrapper; thread->callee_saved.ra = (u32_t)z_thread_entry_wrapper;
thread->callee_saved.key = NIOS2_STATUS_PIE_MSK; thread->callee_saved.key = NIOS2_STATUS_PIE_MSK;
/* Leave the rest of thread->callee_saved junk */ /* Leave the rest of thread->callee_saved junk */
} }

View file

@ -44,21 +44,21 @@ z_set_thread_return_value(struct k_thread *thread, unsigned int value)
#define z_is_in_isr() (_kernel.nested != 0U) #define z_is_in_isr() (_kernel.nested != 0U)
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
void _irq_do_offload(void); void z_irq_do_offload(void);
#endif #endif
#if ALT_CPU_ICACHE_SIZE > 0 #if ALT_CPU_ICACHE_SIZE > 0
void _nios2_icache_flush_all(void); void z_nios2_icache_flush_all(void);
#else #else
#define _nios2_icache_flush_all() do { } while (0) #define z_nios2_icache_flush_all() do { } while (0)
#endif #endif
#if ALT_CPU_DCACHE_SIZE > 0 #if ALT_CPU_DCACHE_SIZE > 0
void _nios2_dcache_flush_all(void); void z_nios2_dcache_flush_all(void);
void _nios2_dcache_flush_no_writeback(void *start, u32_t len); void z_nios2_dcache_flush_no_writeback(void *start, u32_t len);
#else #else
#define _nios2_dcache_flush_all() do { } while (0) #define z_nios2_dcache_flush_all() do { } while (0)
#define _nios2_dcache_flush_no_writeback(x, y) do { } while (0) #define z_nios2_dcache_flush_no_writeback(x, y) do { } while (0)
#endif #endif
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -26,11 +26,11 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
static inline int _sys_current_irq_key_get(void) static inline int z_sys_current_irq_key_get(void)
{ {
u32_t ipending; u32_t ipending;
ipending = _nios2_creg_read(NIOS2_CR_IPENDING); ipending = z_nios2_creg_read(NIOS2_CR_IPENDING);
return find_lsb_set(ipending) - 1; return find_lsb_set(ipending) - 1;
} }

View file

@ -166,7 +166,7 @@ static void posix_let_run(int next_allowed_th)
* Note that as we hold the mutex, they are going to be blocked until * Note that as we hold the mutex, they are going to be blocked until
* we reach our own posix_wait_until_allowed() while loop * we reach our own posix_wait_until_allowed() while loop
*/ */
_SAFE_CALL(pthread_cond_broadcast(&cond_threads)); PC_SAFE_CALL(pthread_cond_broadcast(&cond_threads));
} }
@ -175,7 +175,7 @@ static void posix_preexit_cleanup(void)
/* /*
* Release the mutex so the next allowed thread can run * Release the mutex so the next allowed thread can run
*/ */
_SAFE_CALL(pthread_mutex_unlock(&mtx_threads)); PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
/* We detach ourselves so nobody needs to join to us */ /* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self()); pthread_detach(pthread_self());
@ -246,7 +246,7 @@ static void posix_cleanup_handler(void *arg)
#endif #endif
_SAFE_CALL(pthread_mutex_unlock(&mtx_threads)); PC_SAFE_CALL(pthread_mutex_unlock(&mtx_threads));
/* We detach ourselves so nobody needs to join to us */ /* We detach ourselves so nobody needs to join to us */
pthread_detach(pthread_self()); pthread_detach(pthread_self());
@ -271,7 +271,7 @@ static void *posix_thread_starter(void *arg)
* We block until all other running threads reach the while loop * We block until all other running threads reach the while loop
* in posix_wait_until_allowed() and they release the mutex * in posix_wait_until_allowed() and they release the mutex
*/ */
_SAFE_CALL(pthread_mutex_lock(&mtx_threads)); PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
/* /*
* The program may have been finished before this thread ever got to run * The program may have been finished before this thread ever got to run
@ -372,7 +372,7 @@ void posix_new_thread(posix_thread_status_t *ptr)
threads_table[t_slot].thead_cnt = thread_create_count++; threads_table[t_slot].thead_cnt = thread_create_count++;
ptr->thread_idx = t_slot; ptr->thread_idx = t_slot;
_SAFE_CALL(pthread_create(&threads_table[t_slot].thread, PC_SAFE_CALL(pthread_create(&threads_table[t_slot].thread,
NULL, NULL,
posix_thread_starter, posix_thread_starter,
(void *)ptr)); (void *)ptr));
@ -403,7 +403,7 @@ void posix_init_multithreading(void)
threads_table_size = PC_ALLOC_CHUNK_SIZE; threads_table_size = PC_ALLOC_CHUNK_SIZE;
_SAFE_CALL(pthread_mutex_lock(&mtx_threads)); PC_SAFE_CALL(pthread_mutex_lock(&mtx_threads));
} }
/** /**

View file

@ -57,7 +57,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
posix_thread_status_t *thread_status; posix_thread_status_t *thread_status;
_new_thread_init(thread, stack_memory, stack_size, priority, options); z_new_thread_init(thread, stack_memory, stack_size, priority, options);
/* We store it in the same place where normal archs store the /* We store it in the same place where normal archs store the
* "initial stack frame" * "initial stack frame"

View file

@ -9,13 +9,13 @@
#include "toolchain.h" #include "toolchain.h"
#define _SAFE_CALL(a) _safe_call(a, #a) #define PC_SAFE_CALL(a) pc_safe_call(a, #a)
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
static inline void _safe_call(int test, const char *test_str) static inline void pc_safe_call(int test, const char *test_str)
{ {
/* LCOV_EXCL_START */ /* See Note1 */ /* LCOV_EXCL_START */ /* See Note1 */
if (unlikely(test)) { if (unlikely(test)) {
@ -34,6 +34,6 @@ static inline void _safe_call(int test, const char *test_str)
/* /*
* Note 1: * Note 1:
* *
* All checks for the host pthreads functions which are wrapped by _SAFE_CALL * All checks for the host pthreads functions which are wrapped by PC_SAFE_CALL
* are meant to never fail, and therefore will not be covered. * are meant to never fail, and therefore will not be covered.
*/ */

View file

@ -27,7 +27,7 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
static inline int _sys_current_irq_key_get(void) static inline int z_sys_current_irq_key_get(void)
{ {
return posix_get_current_irq(); return posix_get_current_irq();
} }

View file

@ -17,7 +17,7 @@ static volatile void *offload_param;
* Just in case the offload routine itself generates an unhandled * Just in case the offload routine itself generates an unhandled
* exception, clear the offload_routine global before executing. * exception, clear the offload_routine global before executing.
*/ */
void _irq_do_offload(void) void z_irq_do_offload(void)
{ {
irq_offload_routine_t tmp; irq_offload_routine_t tmp;

View file

@ -218,12 +218,12 @@ on_irq_stack:
beqz t1, call_irq beqz t1, call_irq
/* /*
* Call _irq_do_offload to handle IRQ offloading. * Call z_irq_do_offload to handle IRQ offloading.
* Set return address to on_thread_stack in order to jump there * Set return address to on_thread_stack in order to jump there
* upon returning from _irq_do_offload * upon returning from z_irq_do_offload
*/ */
la ra, on_thread_stack la ra, on_thread_stack
tail _irq_do_offload tail z_irq_do_offload
call_irq: call_irq:
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING

View file

@ -10,7 +10,7 @@
/* exports */ /* exports */
GTEXT(__swap) GTEXT(__swap)
GTEXT(_thread_entry_wrapper) GTEXT(z_thread_entry_wrapper)
/* Use ABI name of registers for the sake of simplicity */ /* Use ABI name of registers for the sake of simplicity */
@ -103,11 +103,11 @@ SECTION_FUNC(exception.other, __swap)
/* /*
* void _thread_entry_wrapper(k_thread_entry_t, void *, void *, void *) * void z_thread_entry_wrapper(k_thread_entry_t, void *, void *, void *)
*/ */
SECTION_FUNC(TEXT, _thread_entry_wrapper) SECTION_FUNC(TEXT, z_thread_entry_wrapper)
/* /*
* _thread_entry_wrapper is called for every new thread upon the return * z_thread_entry_wrapper is called for every new thread upon the return
* of __swap or ISR. Its address, as well as its input function * of __swap or ISR. Its address, as well as its input function
* arguments thread_entry_t, void *, void *, void * are restored from * arguments thread_entry_t, void *, void *, void * are restored from
* the thread stack (initialized via function _thread). * the thread stack (initialized via function _thread).

View file

@ -10,7 +10,7 @@
#include <wait_q.h> #include <wait_q.h>
#include <string.h> #include <string.h>
void _thread_entry_wrapper(k_thread_entry_t thread, void z_thread_entry_wrapper(k_thread_entry_t thread,
void *arg1, void *arg1,
void *arg2, void *arg2,
void *arg3); void *arg3);
@ -25,7 +25,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
struct __esf *stack_init; struct __esf *stack_init;
_new_thread_init(thread, stack_memory, stack_size, priority, options); z_new_thread_init(thread, stack_memory, stack_size, priority, options);
/* Initial stack frame for thread */ /* Initial stack frame for thread */
stack_init = (struct __esf *) stack_init = (struct __esf *)
@ -50,18 +50,18 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* within the RISCV32 architecture implementation, initially set: * within the RISCV32 architecture implementation, initially set:
* 1) MSTATUS to SOC_MSTATUS_DEF_RESTORE in the thread stack to enable * 1) MSTATUS to SOC_MSTATUS_DEF_RESTORE in the thread stack to enable
* interrupts when the newly created thread will be scheduled; * interrupts when the newly created thread will be scheduled;
* 2) MEPC to the address of the _thread_entry_wrapper in the thread * 2) MEPC to the address of the z_thread_entry_wrapper in the thread
* stack. * stack.
* Hence, when going out of an interrupt/exception/context-switch, * Hence, when going out of an interrupt/exception/context-switch,
* after scheduling the newly created thread: * after scheduling the newly created thread:
* 1) interrupts will be enabled, as the MSTATUS register will be * 1) interrupts will be enabled, as the MSTATUS register will be
* restored following the MSTATUS value set within the thread stack; * restored following the MSTATUS value set within the thread stack;
* 2) the core will jump to _thread_entry_wrapper, as the program * 2) the core will jump to z_thread_entry_wrapper, as the program
* counter will be restored following the MEPC value set within the * counter will be restored following the MEPC value set within the
* thread stack. * thread stack.
*/ */
stack_init->mstatus = SOC_MSTATUS_DEF_RESTORE; stack_init->mstatus = SOC_MSTATUS_DEF_RESTORE;
stack_init->mepc = (u32_t)_thread_entry_wrapper; stack_init->mepc = (u32_t)z_thread_entry_wrapper;
thread->callee_saved.sp = (u32_t)stack_init; thread->callee_saved.sp = (u32_t)stack_init;
} }

View file

@ -44,7 +44,7 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
#define z_is_in_isr() (_kernel.nested != 0U) #define z_is_in_isr() (_kernel.nested != 0U)
#ifdef CONFIG_IRQ_OFFLOAD #ifdef CONFIG_IRQ_OFFLOAD
int _irq_do_offload(void); int z_irq_do_offload(void);
#endif #endif
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -26,7 +26,7 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
static inline int _sys_current_irq_key_get(void) static inline int z_sys_current_irq_key_get(void)
{ {
u32_t mcause; u32_t mcause;

View file

@ -63,10 +63,10 @@ _sys_cache_flush_sig(_cache_flush_clflush)
_sys_cache_flush_t *sys_cache_flush; _sys_cache_flush_t *sys_cache_flush;
static void init_cache_flush(void) static void init_cache_flush(void)
{ {
if (_is_clflush_available()) { if (z_is_clflush_available()) {
sys_cache_flush = _cache_flush_clflush; sys_cache_flush = _cache_flush_clflush;
} else { } else {
sys_cache_flush = _cache_flush_wbinvd; sys_cache_flush = z_cache_flush_wbinvd;
} }
} }
#else #else
@ -83,7 +83,7 @@ FUNC_ALIAS(_cache_flush_clflush, sys_cache_flush, void);
size_t sys_cache_line_size; size_t sys_cache_line_size;
static void init_cache_line_size(void) static void init_cache_line_size(void)
{ {
sys_cache_line_size = _cache_line_size_get(); sys_cache_line_size = z_cache_line_size_get();
} }
#else #else
#define init_cache_line_size() do { } while ((0)) #define init_cache_line_size() do { } while ((0))

View file

@ -16,12 +16,12 @@
#if defined(CONFIG_CLFLUSH_DETECT) #if defined(CONFIG_CLFLUSH_DETECT)
#define CACHE_FLUSH_NAME _cache_flush_wbinvd #define CACHE_FLUSH_NAME z_cache_flush_wbinvd
#define CPUID_CFLSH_BIT (1 << 19) #define CPUID_CFLSH_BIT (1 << 19)
GTEXT(_is_clflush_available) GTEXT(z_is_clflush_available)
SECTION_FUNC(TEXT, _is_clflush_available) SECTION_FUNC(TEXT, z_is_clflush_available)
pushl %ebx pushl %ebx
movl $1, %eax movl $1, %eax
cpuid cpuid
@ -62,9 +62,9 @@ SECTION_FUNC(TEXT, CACHE_FLUSH_NAME)
#define CPUID_CACHE_LINE_MASK (0xff << 8) #define CPUID_CACHE_LINE_MASK (0xff << 8)
GTEXT(_cache_line_size_get) GTEXT(z_cache_line_size_get)
SECTION_FUNC(TEXT, _cache_line_size_get) SECTION_FUNC(TEXT, z_cache_line_size_get)
pushl %ebx pushl %ebx
movl $1, %eax movl $1, %eax
cpuid cpuid

View file

@ -26,7 +26,7 @@
GTEXT(_kernel_oops_handler) GTEXT(_kernel_oops_handler)
/* externs (internal APIs) */ /* externs (internal APIs) */
GTEXT(_do_kernel_oops) GTEXT(z_do_kernel_oops)
/** /**
* *
@ -230,6 +230,6 @@ nestedException:
#if CONFIG_X86_KERNEL_OOPS #if CONFIG_X86_KERNEL_OOPS
SECTION_FUNC(TEXT, _kernel_oops_handler) SECTION_FUNC(TEXT, _kernel_oops_handler)
push $0 /* dummy error code */ push $0 /* dummy error code */
push $_do_kernel_oops push $z_do_kernel_oops
jmp _exception_enter jmp _exception_enter
#endif #endif

View file

@ -24,7 +24,7 @@
#include <exc_handle.h> #include <exc_handle.h>
#include <logging/log_ctrl.h> #include <logging/log_ctrl.h>
__weak void _debug_fatal_hook(const NANO_ESF *esf) { ARG_UNUSED(esf); } __weak void z_debug_fatal_hook(const NANO_ESF *esf) { ARG_UNUSED(esf); }
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
/** /**
@ -139,7 +139,7 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
{ {
LOG_PANIC(); LOG_PANIC();
_debug_fatal_hook(pEsf); z_debug_fatal_hook(pEsf);
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
@ -150,7 +150,7 @@ FUNC_NORETURN void z_NanoFatalErrorHandler(unsigned int reason,
break; break;
case _NANO_ERR_SPURIOUS_INT: { case _NANO_ERR_SPURIOUS_INT: {
int vector = _irq_controller_isr_vector_get(); int vector = z_irq_controller_isr_vector_get();
printk("***** Unhandled interrupt vector "); printk("***** Unhandled interrupt vector ");
if (vector >= 0) { if (vector >= 0) {
@ -229,7 +229,7 @@ FUNC_NORETURN void z_arch_syscall_oops(void *ssf_ptr)
} }
#ifdef CONFIG_X86_KERNEL_OOPS #ifdef CONFIG_X86_KERNEL_OOPS
FUNC_NORETURN void _do_kernel_oops(const NANO_ESF *esf) FUNC_NORETURN void z_do_kernel_oops(const NANO_ESF *esf)
{ {
u32_t *stack_ptr = (u32_t *)esf->esp; u32_t *stack_ptr = (u32_t *)esf->esp;
z_NanoFatalErrorHandler(*stack_ptr, esf); z_NanoFatalErrorHandler(*stack_ptr, esf);
@ -289,11 +289,11 @@ FUNC_NORETURN void handle_exc_##vector(const NANO_ESF *pEsf) \
generic_exc_handle(vector, pEsf); \ generic_exc_handle(vector, pEsf); \
} }
#define _EXC_FUNC_CODE(vector) \ #define Z_EXC_FUNC_CODE(vector) \
_EXC_FUNC(vector) \ _EXC_FUNC(vector) \
_EXCEPTION_CONNECT_CODE(handle_exc_##vector, vector) _EXCEPTION_CONNECT_CODE(handle_exc_##vector, vector)
#define _EXC_FUNC_NOCODE(vector) \ #define Z_EXC_FUNC_NOCODE(vector) \
_EXC_FUNC(vector) \ _EXC_FUNC(vector) \
_EXCEPTION_CONNECT_NOCODE(handle_exc_##vector, vector) _EXCEPTION_CONNECT_NOCODE(handle_exc_##vector, vector)
@ -301,10 +301,10 @@ FUNC_NORETURN void handle_exc_##vector(const NANO_ESF *pEsf) \
* the handle_exc_##vector * the handle_exc_##vector
*/ */
#define EXC_FUNC_NOCODE(vector) \ #define EXC_FUNC_NOCODE(vector) \
_EXC_FUNC_NOCODE(vector) Z_EXC_FUNC_NOCODE(vector)
#define EXC_FUNC_CODE(vector) \ #define EXC_FUNC_CODE(vector) \
_EXC_FUNC_CODE(vector) Z_EXC_FUNC_CODE(vector)
EXC_FUNC_NOCODE(IV_DIVIDE_ERROR); EXC_FUNC_NOCODE(IV_DIVIDE_ERROR);
EXC_FUNC_NOCODE(IV_NON_MASKABLE_INTERRUPT); EXC_FUNC_NOCODE(IV_NON_MASKABLE_INTERRUPT);
@ -426,12 +426,12 @@ _EXCEPTION_CONNECT_CODE(page_fault_handler, IV_PAGE_FAULT);
static __noinit volatile NANO_ESF _df_esf; static __noinit volatile NANO_ESF _df_esf;
/* Very tiny stack; just enough for the bogus error code pushed by the CPU /* Very tiny stack; just enough for the bogus error code pushed by the CPU
* and a frame pointer push by the compiler. All _df_handler_top does is * and a frame pointer push by the compiler. All df_handler_top does is
* shuffle some data around with 'mov' statements and then 'iret'. * shuffle some data around with 'mov' statements and then 'iret'.
*/ */
static __noinit char _df_stack[8]; static __noinit char _df_stack[8];
static FUNC_NORETURN __used void _df_handler_top(void); static FUNC_NORETURN __used void df_handler_top(void);
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
extern char z_trampoline_stack_end[]; extern char z_trampoline_stack_end[];
@ -457,18 +457,18 @@ struct task_state_segment _df_tss = {
.ds = DATA_SEG, .ds = DATA_SEG,
.es = DATA_SEG, .es = DATA_SEG,
.ss = DATA_SEG, .ss = DATA_SEG,
.eip = (u32_t)_df_handler_top, .eip = (u32_t)df_handler_top,
.cr3 = (u32_t)&z_x86_kernel_pdpt .cr3 = (u32_t)&z_x86_kernel_pdpt
}; };
static FUNC_NORETURN __used void _df_handler_bottom(void) static FUNC_NORETURN __used void df_handler_bottom(void)
{ {
/* We're back in the main hardware task on the interrupt stack */ /* We're back in the main hardware task on the interrupt stack */
int reason = _NANO_ERR_CPU_EXCEPTION; int reason = _NANO_ERR_CPU_EXCEPTION;
/* Restore the top half so it is runnable again */ /* Restore the top half so it is runnable again */
_df_tss.esp = (u32_t)(_df_stack + sizeof(_df_stack)); _df_tss.esp = (u32_t)(_df_stack + sizeof(_df_stack));
_df_tss.eip = (u32_t)_df_handler_top; _df_tss.eip = (u32_t)df_handler_top;
printk("***** Double Fault *****\n"); printk("***** Double Fault *****\n");
#ifdef CONFIG_THREAD_STACK_INFO #ifdef CONFIG_THREAD_STACK_INFO
@ -479,7 +479,7 @@ static FUNC_NORETURN __used void _df_handler_bottom(void)
z_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf); z_NanoFatalErrorHandler(reason, (NANO_ESF *)&_df_esf);
} }
static FUNC_NORETURN __used void _df_handler_top(void) static FUNC_NORETURN __used void df_handler_top(void)
{ {
/* State of the system when the double-fault forced a task switch /* State of the system when the double-fault forced a task switch
* will be in _main_tss. Set up a NANO_ESF and copy system state into * will be in _main_tss. Set up a NANO_ESF and copy system state into
@ -505,12 +505,12 @@ static FUNC_NORETURN __used void _df_handler_top(void)
_main_tss.ds = DATA_SEG; _main_tss.ds = DATA_SEG;
_main_tss.es = DATA_SEG; _main_tss.es = DATA_SEG;
_main_tss.ss = DATA_SEG; _main_tss.ss = DATA_SEG;
_main_tss.eip = (u32_t)_df_handler_bottom; _main_tss.eip = (u32_t)df_handler_bottom;
_main_tss.cr3 = (u32_t)&z_x86_kernel_pdpt; _main_tss.cr3 = (u32_t)&z_x86_kernel_pdpt;
_main_tss.eflags = 0U; _main_tss.eflags = 0U;
/* NT bit is set in EFLAGS so we will task switch back to _main_tss /* NT bit is set in EFLAGS so we will task switch back to _main_tss
* and run _df_handler_bottom * and run df_handler_bottom
*/ */
__asm__ volatile ("iret"); __asm__ volatile ("iret");
CODE_UNREACHABLE; CODE_UNREACHABLE;

View file

@ -57,15 +57,15 @@ extern u32_t _sse_mxcsr_default_value;
* specified thread control block. The SSE registers are saved only if the * specified thread control block. The SSE registers are saved only if the
* thread is actually using them. * thread is actually using them.
*/ */
static void _FpCtxSave(struct k_thread *thread) static void FpCtxSave(struct k_thread *thread)
{ {
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
if ((thread->base.user_options & K_SSE_REGS) != 0) { if ((thread->base.user_options & K_SSE_REGS) != 0) {
_do_fp_and_sse_regs_save(&thread->arch.preempFloatReg); z_do_fp_and_sse_regs_save(&thread->arch.preempFloatReg);
return; return;
} }
#endif #endif
_do_fp_regs_save(&thread->arch.preempFloatReg); z_do_fp_regs_save(&thread->arch.preempFloatReg);
} }
/* /*
@ -74,12 +74,12 @@ static void _FpCtxSave(struct k_thread *thread)
* This routine initializes the system's "live" floating point context. * This routine initializes the system's "live" floating point context.
* The SSE registers are initialized only if the thread is actually using them. * The SSE registers are initialized only if the thread is actually using them.
*/ */
static inline void _FpCtxInit(struct k_thread *thread) static inline void FpCtxInit(struct k_thread *thread)
{ {
_do_fp_regs_init(); z_do_fp_regs_init();
#ifdef CONFIG_SSE #ifdef CONFIG_SSE
if ((thread->base.user_options & K_SSE_REGS) != 0) { if ((thread->base.user_options & K_SSE_REGS) != 0) {
_do_sse_regs_init(); z_do_sse_regs_init();
} }
#endif #endif
} }
@ -123,13 +123,13 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
fp_owner = _kernel.current_fp; fp_owner = _kernel.current_fp;
if (fp_owner != NULL) { if (fp_owner != NULL) {
if ((fp_owner->base.thread_state & _INT_OR_EXC_MASK) != 0) { if ((fp_owner->base.thread_state & _INT_OR_EXC_MASK) != 0) {
_FpCtxSave(fp_owner); FpCtxSave(fp_owner);
} }
} }
/* Now create a virgin FP context */ /* Now create a virgin FP context */
_FpCtxInit(thread); FpCtxInit(thread);
/* Associate the new FP context with the specified thread */ /* Associate the new FP context with the specified thread */
@ -157,7 +157,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
*/ */
_kernel.current_fp = thread; _kernel.current_fp = thread;
_FpAccessDisable(); z_FpAccessDisable();
} else { } else {
/* /*
* We are FP-capable (and thus had FPU ownership on * We are FP-capable (and thus had FPU ownership on
@ -176,7 +176,7 @@ void k_float_enable(struct k_thread *thread, unsigned int options)
* handling an interrupt or exception.) * handling an interrupt or exception.)
*/ */
_FpCtxSave(thread); FpCtxSave(thread);
} }
} }
@ -205,7 +205,7 @@ void k_float_disable(struct k_thread *thread)
thread->base.user_options &= ~_FP_USER_MASK; thread->base.user_options &= ~_FP_USER_MASK;
if (thread == _current) { if (thread == _current) {
_FpAccessDisable(); z_FpAccessDisable();
_kernel.current_fp = (struct k_thread *)0; _kernel.current_fp = (struct k_thread *)0;
} else { } else {
if (_kernel.current_fp == thread) { if (_kernel.current_fp == thread) {

View file

@ -23,8 +23,8 @@
/* exports (internal APIs) */ /* exports (internal APIs) */
GTEXT(_interrupt_enter) GTEXT(_interrupt_enter)
GTEXT(_SpuriousIntNoErrCodeHandler) GTEXT(z_SpuriousIntNoErrCodeHandler)
GTEXT(_SpuriousIntHandler) GTEXT(z_SpuriousIntHandler)
GTEXT(_irq_sw_handler) GTEXT(_irq_sw_handler)
GTEXT(z_dynamic_stubs_begin) GTEXT(z_dynamic_stubs_begin)
@ -389,11 +389,11 @@ handle_idle:
/** /**
* *
* _SpuriousIntHandler - * z_SpuriousIntHandler -
* @brief Spurious interrupt handler stubs * @brief Spurious interrupt handler stubs
* *
* Interrupt-gate descriptors are statically created for all slots in the IDT * Interrupt-gate descriptors are statically created for all slots in the IDT
* that point to _SpuriousIntHandler() or _SpuriousIntNoErrCodeHandler(). The * that point to z_SpuriousIntHandler() or z_SpuriousIntNoErrCodeHandler(). The
* former stub is connected to exception vectors where the processor pushes an * former stub is connected to exception vectors where the processor pushes an
* error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP * error code onto the stack (or kernel stack) in addition to the EFLAGS/CS/EIP
* records. * records.
@ -408,23 +408,23 @@ handle_idle:
* *
* C function prototype: * C function prototype:
* *
* void _SpuriousIntHandler (void); * void z_SpuriousIntHandler (void);
* *
* INTERNAL * INTERNAL
* The gen_idt tool creates an interrupt-gate descriptor for all * The gen_idt tool creates an interrupt-gate descriptor for all
* connections. The processor will automatically clear the IF bit * connections. The processor will automatically clear the IF bit
* in the EFLAGS register upon execution of the handler, * in the EFLAGS register upon execution of the handler,
* thus _SpuriousIntNoErrCodeHandler()/_SpuriousIntHandler() shall be * thus z_SpuriousIntNoErrCodeHandler()/z_SpuriousIntHandler() shall be
* invoked with interrupts disabled. * invoked with interrupts disabled.
*/ */
SECTION_FUNC(TEXT, _SpuriousIntNoErrCodeHandler) SECTION_FUNC(TEXT, z_SpuriousIntNoErrCodeHandler)
pushl $0 /* push dummy err code onto stk */ pushl $0 /* push dummy err code onto stk */
/* fall through to _SpuriousIntHandler */ /* fall through to z_SpuriousIntHandler */
SECTION_FUNC(TEXT, _SpuriousIntHandler) SECTION_FUNC(TEXT, z_SpuriousIntHandler)
cld /* Clear direction flag */ cld /* Clear direction flag */
@ -464,7 +464,7 @@ SECTION_FUNC(TEXT, _SpuriousIntHandler)
#if CONFIG_IRQ_OFFLOAD #if CONFIG_IRQ_OFFLOAD
SECTION_FUNC(TEXT, _irq_sw_handler) SECTION_FUNC(TEXT, _irq_sw_handler)
push $0 push $0
push $_irq_do_offload push $z_irq_do_offload
jmp _interrupt_enter jmp _interrupt_enter
#endif #endif

View file

@ -24,19 +24,19 @@
#include <kswap.h> #include <kswap.h>
#include <arch/x86/segmentation.h> #include <arch/x86/segmentation.h>
extern void _SpuriousIntHandler(void *handler); extern void z_SpuriousIntHandler(void *handler);
extern void _SpuriousIntNoErrCodeHandler(void *handler); extern void z_SpuriousIntNoErrCodeHandler(void *handler);
/* /*
* Place the addresses of the spurious interrupt handlers into the intList * Place the addresses of the spurious interrupt handlers into the intList
* section. The genIdt tool can then populate any unused vectors with * section. The genIdt tool can then populate any unused vectors with
* these routines. * these routines.
*/ */
void *__attribute__((section(".spurIsr"))) MK_ISR_NAME(_SpuriousIntHandler) = void *__attribute__((section(".spurIsr"))) MK_ISR_NAME(z_SpuriousIntHandler) =
&_SpuriousIntHandler; &z_SpuriousIntHandler;
void *__attribute__((section(".spurNoErrIsr"))) void *__attribute__((section(".spurNoErrIsr")))
MK_ISR_NAME(_SpuriousIntNoErrCodeHandler) = MK_ISR_NAME(z_SpuriousIntNoErrCodeHandler) =
&_SpuriousIntNoErrCodeHandler; &z_SpuriousIntNoErrCodeHandler;
/* FIXME: IRQ direct inline functions have to be placed here and not in /* FIXME: IRQ direct inline functions have to be placed here and not in
* arch/cpu.h as inline functions due to nasty circular dependency between * arch/cpu.h as inline functions due to nasty circular dependency between
@ -72,7 +72,7 @@ void z_arch_isr_direct_header(void)
void z_arch_isr_direct_footer(int swap) void z_arch_isr_direct_footer(int swap)
{ {
_irq_controller_eoi(); z_irq_controller_eoi();
z_int_latency_stop(); z_int_latency_stop();
sys_trace_isr_exit(); sys_trace_isr_exit();
--_kernel.nested; --_kernel.nested;
@ -249,11 +249,11 @@ static void idt_vector_install(int vector, void *irq_handler)
int key; int key;
key = irq_lock(); key = irq_lock();
_init_irq_gate(&z_x86_idt.entries[vector], CODE_SEG, z_init_irq_gate(&z_x86_idt.entries[vector], CODE_SEG,
(u32_t)irq_handler, 0); (u32_t)irq_handler, 0);
#ifdef CONFIG_MVIC #ifdef CONFIG_MVIC
/* MVIC requires IDT be reloaded if the entries table is ever changed */ /* MVIC requires IDT be reloaded if the entries table is ever changed */
_set_idt(&z_x86_idt); z_set_idt(&z_x86_idt);
#endif #endif
irq_unlock(key); irq_unlock(key);
} }

View file

@ -20,7 +20,7 @@ static irq_offload_routine_t offload_routine;
static void *offload_param; static void *offload_param;
/* Called by asm stub */ /* Called by asm stub */
void _irq_do_offload(void) void z_irq_do_offload(void)
{ {
offload_routine(offload_param); offload_routine(offload_param);
} }

View file

@ -58,9 +58,9 @@ static int spec_ctrl_init(struct device *dev)
} }
#endif #endif
if (enable_bits != 0U) { if (enable_bits != 0U) {
u64_t cur = _x86_msr_read(IA32_SPEC_CTRL_MSR); u64_t cur = z_x86_msr_read(IA32_SPEC_CTRL_MSR);
_x86_msr_write(IA32_SPEC_CTRL_MSR, z_x86_msr_write(IA32_SPEC_CTRL_MSR,
cur | enable_bits); cur | enable_bits);
} }

View file

@ -24,7 +24,7 @@
/* exports (internal APIs) */ /* exports (internal APIs) */
GTEXT(__swap) GTEXT(__swap)
GTEXT(_x86_thread_entry_wrapper) GTEXT(z_x86_thread_entry_wrapper)
GTEXT(_x86_user_thread_entry_wrapper) GTEXT(_x86_user_thread_entry_wrapper)
/* externs */ /* externs */
@ -479,7 +479,7 @@ time_read_not_needed:
* @return this routine does NOT return. * @return this routine does NOT return.
*/ */
SECTION_FUNC(TEXT, _x86_thread_entry_wrapper) SECTION_FUNC(TEXT, z_x86_thread_entry_wrapper)
#ifdef CONFIG_X86_IAMCU #ifdef CONFIG_X86_IAMCU
/* IAMCU calling convention has first 3 arguments supplied in /* IAMCU calling convention has first 3 arguments supplied in
* registers not the stack * registers not the stack

View file

@ -69,7 +69,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
Z_ASSERT_VALID_PRIO(priority, entry); Z_ASSERT_VALID_PRIO(priority, entry);
stack_buf = K_THREAD_STACK_BUFFER(stack); stack_buf = K_THREAD_STACK_BUFFER(stack);
_new_thread_init(thread, stack_buf, stack_size, priority, options); z_new_thread_init(thread, stack_buf, stack_size, priority, options);
#if CONFIG_X86_USERSPACE #if CONFIG_X86_USERSPACE
if ((options & K_USER) == 0U) { if ((options & K_USER) == 0U) {
@ -103,7 +103,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
if ((options & K_USER) != 0U) { if ((options & K_USER) != 0U) {
#ifdef _THREAD_WRAPPER_REQUIRED #ifdef _THREAD_WRAPPER_REQUIRED
initial_frame->edi = (u32_t)z_arch_user_mode_enter; initial_frame->edi = (u32_t)z_arch_user_mode_enter;
initial_frame->thread_entry = _x86_thread_entry_wrapper; initial_frame->thread_entry = z_x86_thread_entry_wrapper;
#else #else
initial_frame->thread_entry = z_arch_user_mode_enter; initial_frame->thread_entry = z_arch_user_mode_enter;
#endif /* _THREAD_WRAPPER_REQUIRED */ #endif /* _THREAD_WRAPPER_REQUIRED */
@ -112,7 +112,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
{ {
#ifdef _THREAD_WRAPPER_REQUIRED #ifdef _THREAD_WRAPPER_REQUIRED
initial_frame->edi = (u32_t)z_thread_entry; initial_frame->edi = (u32_t)z_thread_entry;
initial_frame->thread_entry = _x86_thread_entry_wrapper; initial_frame->thread_entry = z_x86_thread_entry_wrapper;
#else #else
initial_frame->thread_entry = z_thread_entry; initial_frame->thread_entry = z_thread_entry;
#endif #endif
@ -190,18 +190,18 @@ FUNC_NORETURN void z_arch_user_mode_enter(k_thread_entry_t user_entry,
(MMU_PTE_P_MASK | MMU_PTE_RW_MASK | (MMU_PTE_P_MASK | MMU_PTE_RW_MASK |
MMU_PTE_US_MASK)); MMU_PTE_US_MASK));
_x86_userspace_enter(user_entry, p1, p2, p3, stack_end, z_x86_userspace_enter(user_entry, p1, p2, p3, stack_end,
_current->stack_info.start); _current->stack_info.start);
CODE_UNREACHABLE; CODE_UNREACHABLE;
} }
/* Implemented in userspace.S */ /* Implemented in userspace.S */
extern void _x86_syscall_entry_stub(void); extern void z_x86_syscall_entry_stub(void);
/* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that /* Syscalls invoked by 'int 0x80'. Installed in the IDT at DPL=3 so that
* userspace can invoke it. * userspace can invoke it.
*/ */
NANO_CPU_INT_REGISTER(_x86_syscall_entry_stub, -1, -1, 0x80, 3); NANO_CPU_INT_REGISTER(z_x86_syscall_entry_stub, -1, -1, 0x80, 3);
#endif /* CONFIG_X86_USERSPACE */ #endif /* CONFIG_X86_USERSPACE */

View file

@ -11,8 +11,8 @@
#include <syscall.h> #include <syscall.h>
/* Exports */ /* Exports */
GTEXT(_x86_syscall_entry_stub) GTEXT(z_x86_syscall_entry_stub)
GTEXT(_x86_userspace_enter) GTEXT(z_x86_userspace_enter)
GTEXT(z_arch_user_string_nlen) GTEXT(z_arch_user_string_nlen)
GTEXT(z_arch_user_string_nlen_fault_start) GTEXT(z_arch_user_string_nlen_fault_start)
GTEXT(z_arch_user_string_nlen_fault_end) GTEXT(z_arch_user_string_nlen_fault_end)
@ -151,7 +151,7 @@ SECTION_FUNC(TEXT, z_x86_trampoline_to_user_always)
* unless KPTI is enabled, in which case we're on the trampoline stack and * unless KPTI is enabled, in which case we're on the trampoline stack and
* need to get off it before enabling interrupts. * need to get off it before enabling interrupts.
*/ */
SECTION_FUNC(TEXT, _x86_syscall_entry_stub) SECTION_FUNC(TEXT, z_x86_syscall_entry_stub)
#ifdef CONFIG_X86_KPTI #ifdef CONFIG_X86_KPTI
/* Stash these regs as we need to use them */ /* Stash these regs as we need to use them */
pushl %esi pushl %esi
@ -295,14 +295,14 @@ z_arch_user_string_nlen_fixup:
ret ret
/* FUNC_NORETURN void _x86_userspace_enter(k_thread_entry_t user_entry, /* FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
* void *p1, void *p2, void *p3, * void *p1, void *p2, void *p3,
* u32_t stack_end, * u32_t stack_end,
* u32_t stack_start) * u32_t stack_start)
* *
* A one-way trip to userspace. * A one-way trip to userspace.
*/ */
SECTION_FUNC(TEXT, _x86_userspace_enter) SECTION_FUNC(TEXT, z_x86_userspace_enter)
pop %esi /* Discard return address on stack */ pop %esi /* Discard return address on stack */
/* Fetch parameters on the stack */ /* Fetch parameters on the stack */

View file

@ -264,7 +264,7 @@ static inline void activate_partition(struct k_mem_partition *partition)
#define X86_MEM_DOMAIN_SET_PAGES (0U) #define X86_MEM_DOMAIN_SET_PAGES (0U)
#define X86_MEM_DOMAIN_RESET_PAGES (1U) #define X86_MEM_DOMAIN_RESET_PAGES (1U)
/* Pass 1 to page_conf if reset of mem domain pages is needed else pass a 0*/ /* Pass 1 to page_conf if reset of mem domain pages is needed else pass a 0*/
static inline void _x86_mem_domain_pages_update(struct k_mem_domain *mem_domain, static inline void x86_mem_domain_pages_update(struct k_mem_domain *mem_domain,
u32_t page_conf) u32_t page_conf)
{ {
u32_t partition_index; u32_t partition_index;
@ -309,7 +309,7 @@ out:
/* Load the partitions of the thread. */ /* Load the partitions of the thread. */
void z_arch_mem_domain_configure(struct k_thread *thread) void z_arch_mem_domain_configure(struct k_thread *thread)
{ {
_x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain, x86_mem_domain_pages_update(thread->mem_domain_info.mem_domain,
X86_MEM_DOMAIN_SET_PAGES); X86_MEM_DOMAIN_SET_PAGES);
} }
@ -318,7 +318,7 @@ void z_arch_mem_domain_configure(struct k_thread *thread)
*/ */
void z_arch_mem_domain_destroy(struct k_mem_domain *domain) void z_arch_mem_domain_destroy(struct k_mem_domain *domain)
{ {
_x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES); x86_mem_domain_pages_update(domain, X86_MEM_DOMAIN_RESET_PAGES);
} }
/* Reset/destroy one partition spcified in the argument of the API. */ /* Reset/destroy one partition spcified in the argument of the API. */

View file

@ -51,7 +51,7 @@ static inline unsigned int EflagsGet(void)
* *
* @return N/A * @return N/A
*/ */
static inline void _FpAccessDisable(void) static inline void z_FpAccessDisable(void)
{ {
void *tempReg; void *tempReg;
@ -72,11 +72,11 @@ static inline void _FpAccessDisable(void)
* This routine saves the system's "live" non-integer context into the * This routine saves the system's "live" non-integer context into the
* specified area. If the specified thread supports SSE then * specified area. If the specified thread supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved. * x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
* Function is invoked by _FpCtxSave(struct k_thread *thread) * Function is invoked by FpCtxSave(struct k_thread *thread)
* *
* @return N/A * @return N/A
*/ */
static inline void _do_fp_regs_save(void *preemp_float_reg) static inline void z_do_fp_regs_save(void *preemp_float_reg)
{ {
__asm__ volatile("fnsave (%0);\n\t" __asm__ volatile("fnsave (%0);\n\t"
: :
@ -92,11 +92,11 @@ static inline void _do_fp_regs_save(void *preemp_float_reg)
* This routine saves the system's "live" non-integer context into the * This routine saves the system's "live" non-integer context into the
* specified area. If the specified thread supports SSE then * specified area. If the specified thread supports SSE then
* x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved. * x87/MMX/SSEx thread info is saved, otherwise only x87/MMX thread is saved.
* Function is invoked by _FpCtxSave(struct k_thread *thread) * Function is invoked by FpCtxSave(struct k_thread *thread)
* *
* @return N/A * @return N/A
*/ */
static inline void _do_fp_and_sse_regs_save(void *preemp_float_reg) static inline void z_do_fp_and_sse_regs_save(void *preemp_float_reg)
{ {
__asm__ volatile("fxsave (%0);\n\t" __asm__ volatile("fxsave (%0);\n\t"
: :
@ -113,7 +113,7 @@ static inline void _do_fp_and_sse_regs_save(void *preemp_float_reg)
* *
* @return N/A * @return N/A
*/ */
static inline void _do_fp_regs_init(void) static inline void z_do_fp_regs_init(void)
{ {
__asm__ volatile("fninit\n\t"); __asm__ volatile("fninit\n\t");
} }
@ -127,7 +127,7 @@ static inline void _do_fp_regs_init(void)
* *
* @return N/A * @return N/A
*/ */
static inline void _do_sse_regs_init(void) static inline void z_do_sse_regs_init(void)
{ {
__asm__ volatile("ldmxcsr _sse_mxcsr_default_value\n\t"); __asm__ volatile("ldmxcsr _sse_mxcsr_default_value\n\t");
} }

View file

@ -13,9 +13,9 @@
extern "C" { extern "C" {
#endif #endif
extern int _is_clflush_available(void); extern int z_is_clflush_available(void);
extern void _cache_flush_wbinvd(vaddr_t addr, size_t len); extern void z_cache_flush_wbinvd(vaddr_t addr, size_t len);
extern size_t _cache_line_size_get(void); extern size_t z_cache_line_size_get(void);
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -41,7 +41,7 @@
#endif #endif
/* Some configurations require that the stack/registers be adjusted before /* Some configurations require that the stack/registers be adjusted before
* z_thread_entry. See discussion in swap.S for _x86_thread_entry_wrapper() * z_thread_entry. See discussion in swap.S for z_x86_thread_entry_wrapper()
*/ */
#if defined(CONFIG_X86_IAMCU) || defined(CONFIG_DEBUG_INFO) #if defined(CONFIG_X86_IAMCU) || defined(CONFIG_DEBUG_INFO)
#define _THREAD_WRAPPER_REQUIRED #define _THREAD_WRAPPER_REQUIRED
@ -98,8 +98,8 @@
#define IV_INTEL_RESERVED_END 31 #define IV_INTEL_RESERVED_END 31
/* /*
* Model specific register (MSR) definitions. Use the _x86_msr_read() and * Model specific register (MSR) definitions. Use the z_x86_msr_read() and
* _x86_msr_write() primitives to read/write the MSRs. Only the so-called * z_x86_msr_write() primitives to read/write the MSRs. Only the so-called
* "Architectural MSRs" are listed, i.e. the subset of MSRs and associated * "Architectural MSRs" are listed, i.e. the subset of MSRs and associated
* bit fields which will not change on future processor generations. * bit fields which will not change on future processor generations.
*/ */
@ -398,7 +398,7 @@
#include <misc/util.h> #include <misc/util.h>
#ifdef _THREAD_WRAPPER_REQUIRED #ifdef _THREAD_WRAPPER_REQUIRED
extern void _x86_thread_entry_wrapper(k_thread_entry_t entry, extern void z_x86_thread_entry_wrapper(k_thread_entry_t entry,
void *p1, void *p2, void *p3); void *p1, void *p2, void *p3);
#endif /* _THREAD_WRAPPER_REQUIRED */ #endif /* _THREAD_WRAPPER_REQUIRED */

View file

@ -77,7 +77,7 @@ extern void k_cpu_atomic_idle(unsigned int key);
* *
* @return N/A * @return N/A
*/ */
static inline void _x86_msr_write(unsigned int msr, u64_t data) static inline void z_x86_msr_write(unsigned int msr, u64_t data)
{ {
u32_t high = data >> 32; u32_t high = data >> 32;
u32_t low = data & 0xFFFFFFFF; u32_t low = data & 0xFFFFFFFF;
@ -95,7 +95,7 @@ static inline void _x86_msr_write(unsigned int msr, u64_t data)
* *
* @return N/A * @return N/A
*/ */
static inline u64_t _x86_msr_read(unsigned int msr) static inline u64_t z_x86_msr_read(unsigned int msr)
{ {
u64_t ret; u64_t ret;
@ -109,16 +109,16 @@ static inline u64_t _x86_msr_read(unsigned int msr)
static inline u32_t read_x2apic(unsigned int reg) static inline u32_t read_x2apic(unsigned int reg)
{ {
return _x86_msr_read(MSR_X2APIC_BASE + reg); return z_x86_msr_read(MSR_X2APIC_BASE + reg);
} }
static inline void write_x2apic(unsigned int reg, u32_t val) static inline void write_x2apic(unsigned int reg, u32_t val)
{ {
_x86_msr_write(MSR_X2APIC_BASE + reg, val); z_x86_msr_write(MSR_X2APIC_BASE + reg, val);
} }
#endif #endif
extern FUNC_NORETURN void _x86_userspace_enter(k_thread_entry_t user_entry, extern FUNC_NORETURN void z_x86_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3, void *p1, void *p2, void *p3,
u32_t stack_end, u32_t stack_end,
u32_t stack_start); u32_t stack_start);

View file

@ -251,11 +251,11 @@ struct mmu_region {
.flags = permission_flags, \ .flags = permission_flags, \
} }
#define _MMU_BOOT_REGION(id, addr, region_size, permission_flags) \ #define Z_MMU_BOOT_REGION(id, addr, region_size, permission_flags) \
__MMU_BOOT_REGION(id, addr, region_size, permission_flags) __MMU_BOOT_REGION(id, addr, region_size, permission_flags)
#define MMU_BOOT_REGION(addr, region_size, permission_flags) \ #define MMU_BOOT_REGION(addr, region_size, permission_flags) \
_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags) Z_MMU_BOOT_REGION(__COUNTER__, addr, region_size, permission_flags)
/* /*
* The following defines the format of a 64-bit page directory pointer entry * The following defines the format of a 64-bit page directory pointer entry

View file

@ -26,9 +26,9 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
static inline int _sys_current_irq_key_get(void) static inline int z_sys_current_irq_key_get(void)
{ {
return _irq_controller_isr_vector_get(); return z_irq_controller_isr_vector_get();
} }
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -59,19 +59,19 @@ void handler_f3(void *arg, int err)
printf("end f3 handler\n"); printf("end f3 handler\n");
} }
void _unhandled_vector(int vector, int err, struct xuk_entry_frame *f) void z_unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
{ {
(void)f; (void)f;
_putchar = putchar; z_putchar = putchar;
printf("Unhandled vector %d (err %xh) on CPU%d\n", printf("Unhandled vector %d (err %xh) on CPU%d\n",
vector, err, (int)(long)xuk_get_f_ptr()); vector, err, (int)(long)xuk_get_f_ptr());
} }
void _isr_entry(void) void z_isr_entry(void)
{ {
} }
void *_isr_exit_restore_stack(void *interrupted) void *z_isr_exit_restore_stack(void *interrupted)
{ {
/* Somewhat hacky test of the ISR exit modes. Two ways of /* Somewhat hacky test of the ISR exit modes. Two ways of
* specifying "this stack", one of which does the full spill * specifying "this stack", one of which does the full spill
@ -129,9 +129,9 @@ void test_local_ipi(void)
}; };
} }
void _cpu_start(int cpu) void z_cpu_start(int cpu)
{ {
_putchar = putchar; z_putchar = putchar;
printf("Entering demo kernel\n"); printf("Entering demo kernel\n");
/* Make sure the FS/GS pointers work, then set F to store our /* Make sure the FS/GS pointers work, then set F to store our

View file

@ -17,7 +17,7 @@ struct _pfr {
}; };
/* Set this function pointer to something that generates output */ /* Set this function pointer to something that generates output */
static void (*_putchar)(int c); static void (*z_putchar)(int c);
static void pc(struct _pfr *r, int c) static void pc(struct _pfr *r, int c)
{ {
@ -25,7 +25,7 @@ static void pc(struct _pfr *r, int c)
if (r->idx <= r->len) if (r->idx <= r->len)
r->buf[r->idx] = c; r->buf[r->idx] = c;
} else { } else {
_putchar(c); z_putchar(c);
} }
r->idx++; r->idx++;
} }
@ -56,7 +56,7 @@ static void endrec(struct _pfr *r)
r->buf[r->idx] = 0; r->buf[r->idx] = 0;
} }
static int _vpf(struct _pfr *r, const char *f, va_list ap) static int vpf(struct _pfr *r, const char *f, va_list ap)
{ {
for (/**/; *f; f++) { for (/**/; *f; f++) {
if (*f != '%') { if (*f != '%') {
@ -109,7 +109,7 @@ static int _vpf(struct _pfr *r, const char *f, va_list ap)
#define CALL_VPF(rec) \ #define CALL_VPF(rec) \
va_list ap; \ va_list ap; \
va_start(ap, f); \ va_start(ap, f); \
int ret = _vpf(&r, f, ap); \ int ret = vpf(&r, f, ap); \
va_end(ap); \ va_end(ap); \
return ret return ret

View file

@ -9,7 +9,7 @@
#define _PORT 0x3f8 #define _PORT 0x3f8
static inline void _serout(int c) static inline void serout(int c)
{ {
while (!(ioport_in8(_PORT + 5) & 0x20)) { while (!(ioport_in8(_PORT + 5) & 0x20)) {
} }
@ -19,9 +19,9 @@ static inline void _serout(int c)
static inline void serial_putc(int c) static inline void serial_putc(int c)
{ {
if (c == '\n') { if (c == '\n') {
_serout('\r'); serout('\r');
} }
_serout(c); serout(c);
} }
static inline void serial_puts(const char *s) static inline void serial_puts(const char *s)

View file

@ -7,7 +7,7 @@
/* Super-primitive VGA text console output-only "terminal" driver */ /* Super-primitive VGA text console output-only "terminal" driver */
static inline unsigned short *_vga_row(int row) static inline unsigned short *vga_row(int row)
{ {
return ((unsigned short *)0xb8000) + 80 * row; return ((unsigned short *)0xb8000) + 80 * row;
} }
@ -18,7 +18,7 @@ static inline unsigned short *_vga_row(int row)
*/ */
static inline void vga_put(int ch, int color, int row, int col) static inline void vga_put(int ch, int color, int row, int col)
{ {
unsigned short *rp = _vga_row(row); unsigned short *rp = vga_row(row);
rp[col] = (color << 8) | ch; rp[col] = (color << 8) | ch;
} }
@ -28,11 +28,11 @@ static inline void vgacon_putc(char c)
if (_shared.vgacol == 80) { if (_shared.vgacol == 80) {
for (int r = 0; r < 24; r++) { for (int r = 0; r < 24; r++) {
for (int c = 0; c < 80; c++) { for (int c = 0; c < 80; c++) {
_vga_row(r)[c] = _vga_row(r+1)[c]; vga_row(r)[c] = vga_row(r+1)[c];
} }
} }
for (int c = 0; c < 80; c++) { for (int c = 0; c < 80; c++) {
_vga_row(24)[c] = 0x9000; vga_row(24)[c] = 0x9000;
} }
_shared.vgacol = 0; _shared.vgacol = 0;
} }

View file

@ -32,7 +32,7 @@ void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
char *base = K_THREAD_STACK_BUFFER(stack); char *base = K_THREAD_STACK_BUFFER(stack);
char *top = base + sz; char *top = base + sz;
_new_thread_init(t, base, sz, prio, opts); z_new_thread_init(t, base, sz, prio, opts);
t->switch_handle = (void *)xuk_setup_stack((long) top, t->switch_handle = (void *)xuk_setup_stack((long) top,
(void *)z_thread_entry, (void *)z_thread_entry,
@ -46,7 +46,7 @@ void k_cpu_idle(void)
__asm__ volatile("sti; hlt"); __asm__ volatile("sti; hlt");
} }
void _unhandled_vector(int vector, int err, struct xuk_entry_frame *f) void z_unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
{ {
/* Yes, there are five regsiters missing. See notes on /* Yes, there are five regsiters missing. See notes on
* xuk_entry_frame/xuk_stack_frame. * xuk_entry_frame/xuk_stack_frame.
@ -62,12 +62,12 @@ void _unhandled_vector(int vector, int err, struct xuk_entry_frame *f)
z_NanoFatalErrorHandler(x86_64_except_reason, NULL); z_NanoFatalErrorHandler(x86_64_except_reason, NULL);
} }
void _isr_entry(void) void z_isr_entry(void)
{ {
z_arch_curr_cpu()->nested++; z_arch_curr_cpu()->nested++;
} }
void *_isr_exit_restore_stack(void *interrupted) void *z_isr_exit_restore_stack(void *interrupted)
{ {
bool nested = (--z_arch_curr_cpu()->nested) > 0; bool nested = (--z_arch_curr_cpu()->nested) > 0;
void *next = z_get_next_switch_handle(interrupted); void *next = z_get_next_switch_handle(interrupted);
@ -139,7 +139,7 @@ void z_arch_sched_ipi(void)
/* Called from xuk layer on actual CPU start */ /* Called from xuk layer on actual CPU start */
void _cpu_start(int cpu) void z_cpu_start(int cpu)
{ {
xuk_set_f_ptr(cpu, &_kernel.cpus[cpu]); xuk_set_f_ptr(cpu, &_kernel.cpus[cpu]);

View file

@ -163,7 +163,7 @@ void cstart(unsigned int magic, unsigned int arg)
shared_init(); shared_init();
#ifdef CONFIG_XUK_DEBUG #ifdef CONFIG_XUK_DEBUG
serial_init(); serial_init();
_putchar = putchar; z_putchar = putchar;
#endif #endif
printf("Entering stub32 on boot cpu, magic %xh stack ~%xh\n", printf("Entering stub32 on boot cpu, magic %xh stack ~%xh\n",

View file

@ -174,7 +174,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp,
struct vhandler *h = &vector_handlers[vector]; struct vhandler *h = &vector_handlers[vector];
struct xuk_entry_frame *frame = (void *)rsp; struct xuk_entry_frame *frame = (void *)rsp;
_isr_entry(); z_isr_entry();
/* Set current priority in CR8 to the currently-serviced IRQ /* Set current priority in CR8 to the currently-serviced IRQ
* and re-enable interrupts * and re-enable interrupts
@ -189,7 +189,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp,
if (h->fn) { if (h->fn) {
h->fn(h->arg, err); h->fn(h->arg, err);
} else { } else {
_unhandled_vector(vector, err, frame); z_unhandled_vector(vector, err, frame);
} }
/* Mask interrupts to finish processing (they'll get restored /* Mask interrupts to finish processing (they'll get restored
@ -208,7 +208,7 @@ long _isr_c_top(unsigned long vecret, unsigned long rsp,
* hook doesn't want to switch, it will return null and never * hook doesn't want to switch, it will return null and never
* save the value of the pointer. * save the value of the pointer.
*/ */
return (long)_isr_exit_restore_stack((void *)(rsp - 48)); return (long)z_isr_exit_restore_stack((void *)(rsp - 48));
} }
static long choose_isr_entry(int vector) static long choose_isr_entry(int vector)
@ -515,7 +515,7 @@ void _cstart64(int cpu_id)
} }
#ifdef CONFIG_XUK_DEBUG #ifdef CONFIG_XUK_DEBUG
_putchar = putchar; z_putchar = putchar;
#endif #endif
printf("\n==\nHello from 64 bit C code on CPU%d (stack ~%xh)\n", printf("\n==\nHello from 64 bit C code on CPU%d (stack ~%xh)\n",
cpu_id, (int)(long)&cpu_id); cpu_id, (int)(long)&cpu_id);
@ -587,8 +587,8 @@ void _cstart64(int cpu_id)
smp_init(); smp_init();
} }
printf("Calling _cpu_start on CPU %d\n", cpu_id); printf("Calling z_cpu_start on CPU %d\n", cpu_id);
_cpu_start(cpu_id); z_cpu_start(cpu_id);
} }
long xuk_setup_stack(long sp, void *fn, unsigned int eflags, long xuk_setup_stack(long sp, void *fn, unsigned int eflags,

View file

@ -151,22 +151,22 @@ void xuk_start_cpu(int cpu, unsigned int stack);
/* OS CPU startup entry point, running on the stack returned by /* OS CPU startup entry point, running on the stack returned by
* init_cpu_stack() * init_cpu_stack()
*/ */
void _cpu_start(int cpu); void z_cpu_start(int cpu);
/* Called on receipt of an unregistered interrupt/exception. Passes /* Called on receipt of an unregistered interrupt/exception. Passes
* the vector number and the CPU error code, if any. * the vector number and the CPU error code, if any.
*/ */
void _unhandled_vector(int vector, int err, struct xuk_entry_frame *f); void z_unhandled_vector(int vector, int err, struct xuk_entry_frame *f);
/* Called on ISR entry before nested interrupts are enabled so the OS /* Called on ISR entry before nested interrupts are enabled so the OS
* can arrange bookeeping. Really should be exposed as an inline and * can arrange bookeeping. Really should be exposed as an inline and
* not a function call; cycles on interrupt entry are precious. * not a function call; cycles on interrupt entry are precious.
*/ */
void _isr_entry(void); void z_isr_entry(void);
/* Called on ISR exit to choose a next thread to run. The argument is /* Called on ISR exit to choose a next thread to run. The argument is
* a context pointer to the thread that was interrupted. * a context pointer to the thread that was interrupted.
*/ */
void *_isr_exit_restore_stack(void *interrupted); void *z_isr_exit_restore_stack(void *interrupted);
#endif /* _XUK_H */ #endif /* _XUK_H */

View file

@ -51,7 +51,7 @@ void z_arch_irq_disable(unsigned int irq);
void z_arch_irq_enable(unsigned int irq); void z_arch_irq_enable(unsigned int irq);
/* Not a standard Zephyr function, but probably will be */ /* Not a standard Zephyr function, but probably will be */
static inline unsigned long long _arch_k_cycle_get_64(void) static inline unsigned long long z_arch_k_cycle_get_64(void)
{ {
unsigned int hi, lo; unsigned int hi, lo;
@ -65,13 +65,13 @@ static inline unsigned int z_arch_k_cycle_get_32(void)
extern u32_t z_timer_cycle_get_32(void); extern u32_t z_timer_cycle_get_32(void);
return z_timer_cycle_get_32(); return z_timer_cycle_get_32();
#else #else
return (u32_t)_arch_k_cycle_get_64(); return (u32_t)z_arch_k_cycle_get_64();
#endif #endif
} }
#define z_is_in_isr() (z_arch_curr_cpu()->nested != 0) #define z_is_in_isr() (z_arch_curr_cpu()->nested != 0)
static inline void _arch_switch(void *switch_to, void **switched_from) static inline void z_arch_switch(void *switch_to, void **switched_from)
{ {
xuk_switch(switch_to, switched_from); xuk_switch(switch_to, switched_from);
} }

View file

@ -17,7 +17,7 @@ static irq_offload_routine_t offload_routine;
static void *offload_param; static void *offload_param;
/* Called by ISR dispatcher */ /* Called by ISR dispatcher */
void _irq_do_offload(void *unused) void z_irq_do_offload(void *unused)
{ {
ARG_UNUSED(unused); ARG_UNUSED(unused);
offload_routine(offload_param); offload_routine(offload_param);
@ -26,11 +26,11 @@ void _irq_do_offload(void *unused)
void irq_offload(irq_offload_routine_t routine, void *parameter) void irq_offload(irq_offload_routine_t routine, void *parameter)
{ {
IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL, IRQ_CONNECT(CONFIG_IRQ_OFFLOAD_INTNUM, XCHAL_EXCM_LEVEL,
_irq_do_offload, NULL, 0); z_irq_do_offload, NULL, 0);
z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM); z_arch_irq_disable(CONFIG_IRQ_OFFLOAD_INTNUM);
offload_routine = routine; offload_routine = routine;
offload_param = parameter; offload_param = parameter;
_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM)); z_xt_set_intset(BIT(CONFIG_IRQ_OFFLOAD_INTNUM));
/* /*
* Enable the software interrupt, in case it is disabled, so that IRQ * Enable the software interrupt, in case it is disabled, so that IRQ
* offload is serviced. * offload is serviced.

View file

@ -14,7 +14,7 @@
#include <xtensa_config.h> #include <xtensa_config.h>
#include <kernel_internal.h> #include <kernel_internal.h>
extern void _xt_user_exit(void); extern void z_xt_user_exit(void);
/* /*
* @brief Initialize a new thread * @brief Initialize a new thread
@ -57,7 +57,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *cpStack; char *cpStack;
#endif #endif
_new_thread_init(thread, pStack, stackSize, priority, options); z_new_thread_init(thread, pStack, stackSize, priority, options);
#ifdef CONFIG_DEBUG #ifdef CONFIG_DEBUG
printk("\nstackPtr = %p, stackSize = %d\n", pStack, stackSize); printk("\nstackPtr = %p, stackSize = %d\n", pStack, stackSize);
@ -96,7 +96,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx->a1 = (u32_t)pInitCtx + XT_STK_FRMSZ; pInitCtx->a1 = (u32_t)pInitCtx + XT_STK_FRMSZ;
/* user exception exit dispatcher */ /* user exception exit dispatcher */
pInitCtx->exit = (u32_t)_xt_user_exit; pInitCtx->exit = (u32_t)z_xt_user_exit;
/* Set initial PS to int level 0, EXCM disabled, user mode. /* Set initial PS to int level 0, EXCM disabled, user mode.
* Also set entry point argument arg. * Also set entry point argument arg.

View file

@ -308,7 +308,7 @@ _zxt_timer_int:
* _zxt_tick_timer_init * _zxt_tick_timer_init
* void _zxt_tick_timer_init(void) * void _zxt_tick_timer_init(void)
* *
* Initialize timer and timer interrupt handler (_xt_tick_divisor_init() has * Initialize timer and timer interrupt handler (z_xt_tick_divisor_init() has
* already been been called). * already been been called).
* Callable from C (obeys ABI conventions on entry). * Callable from C (obeys ABI conventions on entry).
* *

View file

@ -69,7 +69,7 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, size_t sz,
/* Align downward. The API as specified requires a runtime check. */ /* Align downward. The API as specified requires a runtime check. */
top = (char *)(((unsigned int)top) & ~3); top = (char *)(((unsigned int)top) & ~3);
_new_thread_init(thread, base, sz, prio, opts); z_new_thread_init(thread, base, sz, prio, opts);
thread->switch_handle = xtensa_init_stack((void *)top, entry, thread->switch_handle = xtensa_init_stack((void *)top, entry,
p1, p2, p3); p1, p2, p3);

View file

@ -283,7 +283,7 @@ _xt_context_restore:
ret ret
/* _xt_coproc_init /* z_xt_coproc_init
* *
* Initializes global co-processor management data, setting all co-processors * Initializes global co-processor management data, setting all co-processors
* to "unowned". Leaves CPENABLE as it found it (does NOT clear it). * to "unowned". Leaves CPENABLE as it found it (does NOT clear it).
@ -303,15 +303,15 @@ _xt_context_restore:
* None. * None.
* *
* Obeys ABI conventions per prototype: * Obeys ABI conventions per prototype:
* void _xt_coproc_init(void) * void z_xt_coproc_init(void)
*/ */
#if XCHAL_CP_NUM > 0 #if XCHAL_CP_NUM > 0
.global _xt_coproc_init .global z_xt_coproc_init
.type _xt_coproc_init,@function .type z_xt_coproc_init,@function
.align 4 .align 4
_xt_coproc_init: z_xt_coproc_init:
ENTRY0 ENTRY0
/* Initialize thread co-processor ownerships to 0 (unowned). */ /* Initialize thread co-processor ownerships to 0 (unowned). */

View file

@ -644,10 +644,10 @@ _xt_user_exc:
* on entry and used to return to a thread or interrupted interrupt handler. * on entry and used to return to a thread or interrupted interrupt handler.
*/ */
.global _xt_user_exit .global z_xt_user_exit
.type _xt_user_exit,@function .type z_xt_user_exit,@function
.align 4 .align 4
_xt_user_exit: z_xt_user_exit:
l32i a0, sp, XT_STK_ps /* retrieve interruptee's PS */ l32i a0, sp, XT_STK_ps /* retrieve interruptee's PS */
wsr a0, PS wsr a0, PS
l32i a0, sp, XT_STK_pc /* retrieve interruptee's PC */ l32i a0, sp, XT_STK_pc /* retrieve interruptee's PC */
@ -797,7 +797,7 @@ _xt_coproc_exc:
#if XCHAL_HAVE_WINDOWED #if XCHAL_HAVE_WINDOWED
s32e a0, sp, -16 /* for debug backtrace */ s32e a0, sp, -16 /* for debug backtrace */
#endif #endif
movi a0, _xt_user_exit /* save exit point for dispatch */ movi a0, z_xt_user_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_exit s32i a0, sp, XT_STK_exit
rsr a0, EXCCAUSE rsr a0, EXCCAUSE
@ -900,7 +900,7 @@ _xt_coproc_exc:
xchal_cpi_load_funcbody xchal_cpi_load_funcbody
/* Restore interruptee's saved registers. */ /* Restore interruptee's saved registers. */
/* Can omit rsync for wsr.CPENABLE here because _xt_user_exit does /* Can omit rsync for wsr.CPENABLE here because z_xt_user_exit does
* it. * it.
*/ */
.L_xt_coproc_done: .L_xt_coproc_done:
@ -909,7 +909,7 @@ _xt_coproc_exc:
l32i a4, sp, XT_STK_a4 l32i a4, sp, XT_STK_a4
l32i a3, sp, XT_STK_a3 l32i a3, sp, XT_STK_a3
l32i a2, sp, XT_STK_a2 l32i a2, sp, XT_STK_a2
call0 _xt_user_exit /* return via exit dispatcher */ call0 z_xt_user_exit /* return via exit dispatcher */
/* Never returns here - call0 is used as a jump (see note at top) */ /* Never returns here - call0 is used as a jump (see note at top) */
.L_check_cs: .L_check_cs:
@ -959,7 +959,7 @@ _xt_lowint1:
s32i a0, sp, XT_STK_pc s32i a0, sp, XT_STK_pc
rsr a0, EXCSAVE_1 /* save interruptee's a0 */ rsr a0, EXCSAVE_1 /* save interruptee's a0 */
s32i a0, sp, XT_STK_a0 s32i a0, sp, XT_STK_a0
movi a0, _xt_user_exit /* save exit point for dispatch */ movi a0, z_xt_user_exit /* save exit point for dispatch */
s32i a0, sp, XT_STK_exit s32i a0, sp, XT_STK_exit
/* Save rest of interrupt context and enter RTOS. */ /* Save rest of interrupt context and enter RTOS. */

View file

@ -56,7 +56,7 @@ typedef struct _kernel_arch _kernel_arch_t;
#ifdef CONFIG_USE_SWITCH #ifdef CONFIG_USE_SWITCH
void xtensa_switch(void *switch_to, void **switched_from); void xtensa_switch(void *switch_to, void **switched_from);
#define _arch_switch xtensa_switch #define z_arch_switch xtensa_switch
#endif #endif
/* stacks */ /* stacks */

View file

@ -36,7 +36,7 @@ extern void FatalErrorHandler(void);
extern void ReservedInterruptHandler(unsigned int intNo); extern void ReservedInterruptHandler(unsigned int intNo);
/* Defined in xtensa_context.S */ /* Defined in xtensa_context.S */
extern void _xt_coproc_init(void); extern void z_xt_coproc_init(void);
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
@ -86,7 +86,7 @@ static ALWAYS_INLINE void kernel_arch_init(void)
/* Initialize co-processor management for threads. /* Initialize co-processor management for threads.
* Leave CPENABLE alone. * Leave CPENABLE alone.
*/ */
_xt_coproc_init(); z_xt_coproc_init();
#endif #endif
#ifdef CONFIG_INIT_STACKS #ifdef CONFIG_INIT_STACKS

View file

@ -24,7 +24,7 @@ extern "C" {
* *
* @return The key of the interrupt that is currently being processed. * @return The key of the interrupt that is currently being processed.
*/ */
static inline int _sys_current_irq_key_get(void) static inline int z_sys_current_irq_key_get(void)
{ {
return 0; return 0;
} }

View file

@ -50,7 +50,7 @@ extern void z_xt_ints_off(unsigned int mask);
/* /*
* Call this function to set the specified (s/w) interrupt. * Call this function to set the specified (s/w) interrupt.
*/ */
static inline void _xt_set_intset(unsigned int arg) static inline void z_xt_set_intset(unsigned int arg)
{ {
xthal_set_intset(arg); xthal_set_intset(arg);
} }

View file

@ -147,7 +147,7 @@
#if USE_INTERNAL_TIMER || (EXTERNAL_TIMER_IRQ < 0) #if USE_INTERNAL_TIMER || (EXTERNAL_TIMER_IRQ < 0)
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
extern unsigned int _xt_tick_divisor; extern unsigned int _xt_tick_divisor;
extern void _xt_tick_divisor_init(void); extern void z_xt_tick_divisor_init(void);
#endif #endif
#endif // Internal/External timer #endif // Internal/External timer

View file

@ -85,12 +85,12 @@ void hwm_set_sig_handler(void)
struct sigaction act; struct sigaction act;
act.sa_handler = hwm_signal_end_handler; act.sa_handler = hwm_signal_end_handler;
_SAFE_CALL(sigemptyset(&act.sa_mask)); PC_SAFE_CALL(sigemptyset(&act.sa_mask));
act.sa_flags = SA_RESETHAND; act.sa_flags = SA_RESETHAND;
_SAFE_CALL(sigaction(SIGTERM, &act, NULL)); PC_SAFE_CALL(sigaction(SIGTERM, &act, NULL));
_SAFE_CALL(sigaction(SIGINT, &act, NULL)); PC_SAFE_CALL(sigaction(SIGINT, &act, NULL));
} }

View file

@ -67,7 +67,7 @@ void z_arc_v2_irq_unit_irq_enable_set(
*/ */
static ALWAYS_INLINE static ALWAYS_INLINE
void _arc_v2_irq_unit_int_enable(int irq) void z_arc_v2_irq_unit_int_enable(int irq)
{ {
z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE); z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_ENABLE);
} }
@ -81,7 +81,7 @@ void _arc_v2_irq_unit_int_enable(int irq)
*/ */
static ALWAYS_INLINE static ALWAYS_INLINE
void _arc_v2_irq_unit_int_disable(int irq) void z_arc_v2_irq_unit_int_disable(int irq)
{ {
z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE); z_arc_v2_irq_unit_irq_enable_set(irq, _ARC_V2_INT_DISABLE);
} }
@ -95,7 +95,7 @@ void _arc_v2_irq_unit_int_disable(int irq)
*/ */
static ALWAYS_INLINE static ALWAYS_INLINE
void _arc_v2_irq_unit_prio_set(int irq, unsigned char prio) void z_arc_v2_irq_unit_prio_set(int irq, unsigned char prio)
{ {
z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq); z_arc_v2_aux_reg_write(_ARC_V2_IRQ_SELECT, irq);
#ifdef CONFIG_ARC_HAS_SECURE #ifdef CONFIG_ARC_HAS_SECURE

View file

@ -146,9 +146,9 @@ extern "C" {
#define _ARC_V2_IRQ_PRIORITY_SECURE 0x100 #define _ARC_V2_IRQ_PRIORITY_SECURE 0x100
/* exception cause register masks */ /* exception cause register masks */
#define _ARC_V2_ECR_VECTOR(X) ((X & 0xff0000) >> 16) #define Z_ARC_V2_ECR_VECTOR(X) ((X & 0xff0000) >> 16)
#define _ARC_V2_ECR_CODE(X) ((X & 0xff00) >> 8) #define Z_ARC_V2_ECR_CODE(X) ((X & 0xff00) >> 8)
#define _ARC_V2_ECR_PARAMETER(X) (X & 0xff) #define Z_ARC_V2_ECR_PARAMETER(X) (X & 0xff)
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
#if defined(__GNUC__) #if defined(__GNUC__)

View file

@ -84,7 +84,7 @@ static inline u32_t _nios2_read_sp(void)
/* /*
* Functions for useful processor instructions. * Functions for useful processor instructions.
*/ */
static inline void _nios2_break(void) static inline void z_nios2_break(void)
{ {
__asm__ volatile("break"); __asm__ volatile("break");
} }
@ -102,17 +102,17 @@ static inline void _nios2_dcache_addr_flush(void *addr)
__asm__ volatile ("flushda (%0)" :: "r" (addr)); __asm__ volatile ("flushda (%0)" :: "r" (addr));
} }
static inline void _nios2_dcache_flush(u32_t offset) static inline void z_nios2_dcache_flush(u32_t offset)
{ {
__asm__ volatile ("flushd (%0)" :: "r" (offset)); __asm__ volatile ("flushd (%0)" :: "r" (offset));
} }
static inline void _nios2_icache_flush(u32_t offset) static inline void z_nios2_icache_flush(u32_t offset)
{ {
__asm__ volatile ("flushi %0" :: "r" (offset)); __asm__ volatile ("flushi %0" :: "r" (offset));
} }
static inline void _nios2_pipeline_flush(void) static inline void z_nios2_pipeline_flush(void)
{ {
__asm__ volatile ("flushp"); __asm__ volatile ("flushp");
} }
@ -145,15 +145,15 @@ enum nios2_creg {
* we get errors "Control register number must be in range 0-31 for * we get errors "Control register number must be in range 0-31 for
* __builtin_rdctl" with the following code: * __builtin_rdctl" with the following code:
* *
* static inline u32_t _nios2_creg_read(enum nios2_creg reg) * static inline u32_t z_nios2_creg_read(enum nios2_creg reg)
* { * {
* return __builtin_rdctl(reg); * return __builtin_rdctl(reg);
* } * }
* *
* This compiles just fine with -Os. * This compiles just fine with -Os.
*/ */
#define _nios2_creg_read(reg) __builtin_rdctl(reg) #define z_nios2_creg_read(reg) __builtin_rdctl(reg)
#define _nios2_creg_write(reg, val) __builtin_wrctl(reg, val) #define z_nios2_creg_write(reg, val) __builtin_wrctl(reg, val)
#define z_nios2_get_register_address(base, regnum) \ #define z_nios2_get_register_address(base, regnum) \
((void *)(((u8_t *)base) + ((regnum) * (SYSTEM_BUS_WIDTH / 8)))) ((void *)(((u8_t *)base) + ((regnum) * (SYSTEM_BUS_WIDTH / 8))))

View file

@ -79,13 +79,13 @@ static inline void z_irq_controller_irq_config(unsigned int vector,
* @return the vector of the interrupt that is currently being processed, or * @return the vector of the interrupt that is currently being processed, or
* -1 if this can't be determined * -1 if this can't be determined
*/ */
static inline int _irq_controller_isr_vector_get(void) static inline int z_irq_controller_isr_vector_get(void)
{ {
return __irq_controller_isr_vector_get(); return __irq_controller_isr_vector_get();
} }
static inline void _irq_controller_eoi(void) static inline void z_irq_controller_eoi(void)
{ {
__irq_controller_eoi(); __irq_controller_eoi();
} }

View file

@ -407,7 +407,7 @@ static inline void z_sd_set_seg_offset(struct segment_descriptor *sd,
* @param offset offset of handler * @param offset offset of handler
* @param dpl descriptor privilege level * @param dpl descriptor privilege level
*/ */
static inline void _init_irq_gate(struct segment_descriptor *sd, static inline void z_init_irq_gate(struct segment_descriptor *sd,
u16_t seg_selector, u32_t offset, u16_t seg_selector, u32_t offset,
u32_t dpl) u32_t dpl)
{ {
@ -509,7 +509,7 @@ static inline void _set_gdt(const struct pseudo_descriptor *gdt)
* *
* @param idt Pointer to IDT pseudo descriptor. * @param idt Pointer to IDT pseudo descriptor.
*/ */
static inline void _set_idt(const struct pseudo_descriptor *idt) static inline void z_set_idt(const struct pseudo_descriptor *idt)
{ {
__asm__ __volatile__ ("lidt %0" :: "m" (*idt)); __asm__ __volatile__ ("lidt %0" :: "m" (*idt));
} }

View file

@ -31,7 +31,7 @@ extern "C" {
/* Syscall invocation macros. x86-specific machine constraints used to ensure /* Syscall invocation macros. x86-specific machine constraints used to ensure
* args land in the proper registers, see implementation of * args land in the proper registers, see implementation of
* _x86_syscall_entry_stub in userspace.S * z_x86_syscall_entry_stub in userspace.S
* *
* the entry stub clobbers EDX and ECX on IAMCU systems * the entry stub clobbers EDX and ECX on IAMCU systems
*/ */

Some files were not shown because too many files have changed in this diff Show more