arm: arch code naming cleanup

This patch re-namespaces global variables and functions
that are used only within the arch/arm/ code to be
prefixed with z_arm_.

Some instances of CamelCase have been corrected.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-30 12:31:07 -07:00 committed by Ioannis Glaropoulos
commit 8c98a97581
51 changed files with 332 additions and 329 deletions

View file

@ -137,7 +137,7 @@ LOG_MODULE_DECLARE(os);
*/
#if (CONFIG_FAULT_DUMP == 1)
static void FaultShow(const z_arch_esf_t *esf, int fault)
static void fault_show(const z_arch_esf_t *esf, int fault)
{
PR_EXC("Fault! EXC #%d", fault);
@ -156,7 +156,7 @@ static void FaultShow(const z_arch_esf_t *esf, int fault)
*
* For Dump level 0, no information needs to be generated.
*/
static void FaultShow(const z_arch_esf_t *esf, int fault)
static void fault_show(const z_arch_esf_t *esf, int fault)
{
(void)esf;
(void)fault;
@ -205,13 +205,14 @@ u32_t z_check_thread_stack_fail(const u32_t fault_addr,
/**
*
* @brief Dump MPU fault information
* @brief Dump MemManage fault information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static u32_t MpuFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
static u32_t mem_manage_fault(z_arch_esf_t *esf, int from_hard_fault,
bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
u32_t mmfar = -EINVAL;
@ -239,7 +240,7 @@ static u32_t MpuFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
if ((SCB->CFSR & SCB_CFSR_MMARVALID_Msk) != 0) {
PR_EXC(" MMFAR Address: 0x%x", mmfar);
if (fromHardFault) {
if (from_hard_fault) {
/* clear SCB_MMAR[VALID] to reset */
SCB->CFSR &= ~SCB_CFSR_MMARVALID_Msk;
}
@ -328,13 +329,13 @@ static u32_t MpuFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
/**
*
* @brief Dump bus fault information
* @brief Dump BusFault information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return N/A
*/
static int BusFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
static int bus_fault(z_arch_esf_t *esf, int from_hard_fault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -360,7 +361,7 @@ static int BusFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
if ((SCB->CFSR & SCB_CFSR_BFARVALID_Msk) != 0) {
PR_EXC(" BFAR Address: 0x%x", bfar);
if (fromHardFault) {
if (from_hard_fault) {
/* clear SCB_CFSR_BFAR[VALID] to reset */
SCB->CFSR &= ~SCB_CFSR_BFARVALID_Msk;
}
@ -482,13 +483,13 @@ static int BusFault(z_arch_esf_t *esf, int fromHardFault, bool *recoverable)
/**
*
* @brief Dump usage fault information
* @brief Dump UsageFault information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static u32_t UsageFault(const z_arch_esf_t *esf)
static u32_t usage_fault(const z_arch_esf_t *esf)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -538,13 +539,13 @@ static u32_t UsageFault(const z_arch_esf_t *esf)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
/**
*
* @brief Dump secure fault information
* @brief Dump SecureFault information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return N/A
*/
static void SecureFault(const z_arch_esf_t *esf)
static void secure_fault(const z_arch_esf_t *esf)
{
PR_FAULT_INFO("***** SECURE FAULT *****");
@ -579,11 +580,11 @@ static void SecureFault(const z_arch_esf_t *esf)
*
* @brief Dump debug monitor exception information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return N/A
*/
static void DebugMonitor(const z_arch_esf_t *esf)
static void debug_monitor(const z_arch_esf_t *esf)
{
ARG_UNUSED(esf);
@ -599,11 +600,11 @@ static void DebugMonitor(const z_arch_esf_t *esf)
*
* @brief Dump hard fault information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return error code to identify the fatal error reason
*/
static u32_t HardFault(z_arch_esf_t *esf, bool *recoverable)
static u32_t hard_fault(z_arch_esf_t *esf, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -643,14 +644,14 @@ static u32_t HardFault(z_arch_esf_t *esf, bool *recoverable)
} else if ((SCB->HFSR & SCB_HFSR_FORCED_Msk) != 0) {
PR_EXC(" Fault escalation (see below)");
if (SCB_MMFSR != 0) {
reason = MpuFault(esf, 1, recoverable);
reason = mem_manage_fault(esf, 1, recoverable);
} else if (SCB_BFSR != 0) {
reason = BusFault(esf, 1, recoverable);
reason = bus_fault(esf, 1, recoverable);
} else if (SCB_UFSR != 0) {
reason = UsageFault(esf);
reason = usage_fault(esf);
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
} else if (SAU->SFSR != 0) {
SecureFault(esf);
secure_fault(esf);
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
}
}
@ -665,11 +666,11 @@ static u32_t HardFault(z_arch_esf_t *esf, bool *recoverable)
*
* @brief Dump reserved exception information
*
* See _FaultDump() for example.
* See z_arm_fault_dump() for example.
*
* @return N/A
*/
static void ReservedException(const z_arch_esf_t *esf, int fault)
static void reserved_exception(const z_arch_esf_t *esf, int fault)
{
ARG_UNUSED(esf);
@ -679,7 +680,7 @@ static void ReservedException(const z_arch_esf_t *esf, int fault)
}
/* Handler function for ARM fault conditions. */
static u32_t FaultHandle(z_arch_esf_t *esf, int fault, bool *recoverable)
static u32_t fault_handle(z_arch_esf_t *esf, int fault, bool *recoverable)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
@ -687,39 +688,39 @@ static u32_t FaultHandle(z_arch_esf_t *esf, int fault, bool *recoverable)
switch (fault) {
case 3:
reason = HardFault(esf, recoverable);
reason = hard_fault(esf, recoverable);
break;
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
/* HardFault is raised for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
case 4:
reason = MpuFault(esf, 0, recoverable);
reason = mem_manage_fault(esf, 0, recoverable);
break;
case 5:
reason = BusFault(esf, 0, recoverable);
reason = bus_fault(esf, 0, recoverable);
break;
case 6:
reason = UsageFault(esf);
reason = usage_fault(esf);
break;
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
case 7:
SecureFault(esf);
secure_fault(esf);
break;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
case 12:
DebugMonitor(esf);
debug_monitor(esf);
break;
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
default:
ReservedException(esf, fault);
reserved_exception(esf, fault);
break;
}
if ((*recoverable) == false) {
/* Dump generic information about the fault. */
FaultShow(esf, fault);
fault_show(esf, fault);
}
return reason;
@ -733,7 +734,7 @@ static u32_t FaultHandle(z_arch_esf_t *esf, int fault, bool *recoverable)
*
* @param secure_esf Pointer to the secure stack frame.
*/
static void SecureStackDump(const z_arch_esf_t *secure_esf)
static void secure_stack_dump(const z_arch_esf_t *secure_esf)
{
/*
* In case a Non-Secure exception interrupted the Secure
@ -769,7 +770,7 @@ static void SecureStackDump(const z_arch_esf_t *secure_esf)
PR_FAULT_INFO(" S instruction address: 0x%x", sec_ret_addr);
}
#define SECURE_STACK_DUMP(esf) SecureStackDump(esf)
#define SECURE_STACK_DUMP(esf) secure_stack_dump(esf)
#else
/* We do not dump the Secure stack information for lower dump levels. */
#define SECURE_STACK_DUMP(esf)
@ -805,7 +806,7 @@ static void SecureStackDump(const z_arch_esf_t *secure_esf)
* Note: exc_return argument shall only be used by the Fault handler if we are
* running a Secure Firmware.
*/
void _Fault(z_arch_esf_t *esf, u32_t exc_return)
void z_arm_fault(z_arch_esf_t *esf, u32_t exc_return)
{
u32_t reason = K_ERR_CPU_EXCEPTION;
int fault = SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk;
@ -880,7 +881,7 @@ void _Fault(z_arch_esf_t *esf, u32_t exc_return)
(void) exc_return;
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
reason = FaultHandle(esf, fault, &recoverable);
reason = fault_handle(esf, fault, &recoverable);
if (recoverable) {
return;
}
@ -900,7 +901,7 @@ _exit_fatal:
*
* @return N/A
*/
void z_FaultInit(void)
void z_arm_fault_init(void)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)

View file

@ -24,7 +24,7 @@
* @return N/A
*/
void z_IntLibInit(void)
void z_arm_int_lib_init(void)
{
int irq = 0;

View file

@ -23,7 +23,7 @@ LOG_MODULE_REGISTER(mpu);
* available MPU regions for dynamic programming depends on the number of the
* static MPU regions currently being programmed, and the total number of HW-
* available MPU regions. This macro is only used internally in function
* z_arch_configure_dynamic_mpu_regions(), to reserve sufficient area for the
* z_arm_configure_dynamic_mpu_regions(), to reserve sufficient area for the
* array of dynamic regions passed to the underlying driver.
*/
#if defined(CONFIG_USERSPACE)
@ -59,7 +59,7 @@ LOG_MODULE_REGISTER(mpu);
* For some MPU architectures, such as the unmodified ARMv8-M MPU,
* the function must execute with MPU enabled.
*/
void z_arch_configure_static_mpu_regions(void)
void z_arm_configure_static_mpu_regions(void)
{
#if defined(CONFIG_COVERAGE_GCOV) && defined(CONFIG_USERSPACE)
const struct k_mem_partition gcov_region =
@ -142,7 +142,7 @@ void z_arch_configure_static_mpu_regions(void)
* For some MPU architectures, such as the unmodified ARMv8-M MPU,
* the function must execute with MPU enabled.
*/
void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread)
void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread)
{
/* Define an array of k_mem_partition objects to hold the configuration
* of the respective dynamic MPU regions to be programmed for
@ -293,7 +293,7 @@ void z_arch_mem_domain_thread_add(struct k_thread *thread)
* This triggers re-programming of the entire dynamic
* memory map.
*/
z_arch_configure_dynamic_mpu_regions(thread);
z_arm_configure_dynamic_mpu_regions(thread);
}
/*

View file

@ -18,7 +18,7 @@
_ASM_FILE_PROLOGUE
GTEXT(__reset)
GTEXT(z_arm_reset)
GTEXT(memset)
GDATA(_interrupt_stack)
#if defined(CONFIG_PLATFORM_SPECIFIC_INIT)
@ -39,19 +39,19 @@ GTEXT(z_platform_init)
* deep trouble.
*
* We want to use the process stack pointer (PSP) instead of the MSP, since the
* MSP is to be set up to point to the one-and-only interrupt stack during later
* boot. That would not be possible if in use for running C code.
* MSP is to be set up to point to the one-and-only interrupt stack during
* later boot. That would not be possible if in use for running C code.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
* When these steps are completed, jump to z_arm_prep_c(), which will finish
* setting up the system for running C code.
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,z_arm_reset)
/*
* The entry point is located at the __reset symbol, which
* The entry point is located at the z_arm_reset symbol, which
* is fetched by a XIP image playing the role of a bootloader, which jumps to
* it, not through the reset vector mechanism. Such bootloaders might want to
* search for a __start symbol instead, so create that alias here.
@ -74,7 +74,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#ifdef CONFIG_WDOG_INIT
/* board-specific watchdog initialization is necessary */
bl _WdogInit
bl z_arm_watchdog_init
#endif
#ifdef CONFIG_INIT_STACKS
@ -105,7 +105,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/*
* 'bl' jumps the furthest of the branch instructions that are
* supported on all platforms. So it is used when jumping to _PrepC
* supported on all platforms. So it is used when jumping to z_arm_prep_c
* (even though we do not intend to return).
*/
bl _PrepC
bl z_arm_prep_c

View file

@ -33,42 +33,42 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
*/
.word z_main_stack + CONFIG_MAIN_STACK_SIZE
.word __reset
.word __nmi
.word z_arm_reset
.word z_arm_nmi
.word __hard_fault
.word z_arm_hard_fault
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __svc
.word __reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_svc
.word z_arm_reserved
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
.word __mpu_fault
.word __bus_fault
.word __usage_fault
.word z_arm_mpu_fault
.word z_arm_bus_fault
.word z_arm_usage_fault
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
.word __secure_fault
.word z_arm_secure_fault
#else
.word __reserved
.word z_arm_reserved
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
.word __reserved
.word __reserved
.word __reserved
.word __svc
.word __debug_monitor
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_reserved
.word z_arm_svc
.word z_arm_debug_monitor
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
.word __reserved
.word __pendsv
.word z_arm_reserved
.word z_arm_pendsv
#if defined(CONFIG_SYS_CLOCK_EXISTS)
.word z_clock_isr
#else
.word __reserved
.word z_arm_reserved
#endif

View file

@ -30,27 +30,27 @@
GTEXT(__start)
GTEXT(_vector_table)
GTEXT(__reset)
GTEXT(__nmi)
GTEXT(__hard_fault)
GTEXT(z_arm_reset)
GTEXT(z_arm_nmi)
GTEXT(z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
GTEXT(__svc)
GTEXT(z_arm_svc)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
GTEXT(__mpu_fault)
GTEXT(__bus_fault)
GTEXT(__usage_fault)
GTEXT(z_arm_mpu_fault)
GTEXT(z_arm_bus_fault)
GTEXT(z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
GTEXT(__secure_fault)
GTEXT(z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
GTEXT(__svc)
GTEXT(__debug_monitor)
GTEXT(z_arm_svc)
GTEXT(z_arm_debug_monitor)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
GTEXT(__pendsv)
GTEXT(__reserved)
GTEXT(z_arm_pendsv)
GTEXT(z_arm_reserved)
GTEXT(_PrepC)
GTEXT(z_arm_prep_c)
GTEXT(_isr_wrapper)
#else /* _ASMLANGUAGE */

View file

@ -18,11 +18,11 @@
*
* This is a stub for more exception handling code to be added later.
*/
void _Fault(z_arch_esf_t *esf, u32_t exc_return)
void z_arm_fault(z_arch_esf_t *esf, u32_t exc_return)
{
z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}
void z_FaultInit(void)
void z_arm_fault_init(void)
{
}

View file

@ -19,13 +19,13 @@
_ASM_FILE_PROLOGUE
GTEXT(__reset)
GTEXT(z_arm_reset)
GDATA(_interrupt_stack)
GDATA(_svc_stack)
GDATA(_sys_stack)
GDATA(_fiq_stack)
GDATA(_abort_stack)
GDATA(_undef_stack)
GDATA(z_arm_svc_stack)
GDATA(z_arm_sys_stack)
GDATA(z_arm_fiq_stack)
GDATA(z_arm_abort_stack)
GDATA(z_arm_undef_stack)
#define STACK_MARGIN 4
@ -38,12 +38,12 @@ GDATA(_undef_stack)
* privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
* When these steps are completed, jump to z_arm_prep_c(), which will finish
* setting up the system for running C code.
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _reset_section, __reset)
SECTION_SUBSEC_FUNC(TEXT, _reset_section, z_arm_reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
mov r0, #0
mov r1, #0
@ -65,7 +65,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Setup FIQ stack */
msr CPSR_c, #(MODE_FIQ | I_BIT | F_BIT)
ldr sp, =(_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE - STACK_MARGIN)
ldr sp, =(z_arm_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE - STACK_MARGIN)
/* Setup IRQ stack */
msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT)
@ -73,21 +73,21 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
/* Setup data abort stack */
msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT)
ldr sp, =(_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
ldr sp, =(z_arm_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
STACK_MARGIN)
/* Setup undefined mode stack */
msr CPSR_c, #(MODE_UDF | I_BIT | F_BIT)
ldr sp, =(_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
ldr sp, =(z_arm_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
STACK_MARGIN)
/* Setup SVC mode stack */
msr CPSR_c, #(MODE_SVC | I_BIT | F_BIT)
ldr sp, =(_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE - STACK_MARGIN)
ldr sp, =(z_arm_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE - STACK_MARGIN)
/* Setup System mode stack */
msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT)
ldr sp, =(_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE - STACK_MARGIN)
ldr sp, =(z_arm_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE - STACK_MARGIN)
/* Setup system control register */
mrc p15, 0, r0, c1, c0, 0 /* SCTLR */
@ -96,7 +96,7 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#if defined(CONFIG_WDOG_INIT)
/* board-specific watchdog initialization is necessary */
bl _WdogInit
bl z_arm_watchdog_init
#endif
b _PrepC
b z_arm_prep_c

View file

@ -8,19 +8,19 @@
#include <cortex_r/stack.h>
#include <string.h>
K_THREAD_STACK_DEFINE(_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE);
K_THREAD_STACK_DEFINE(_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(_undef_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(_svc_stack, CONFIG_ARMV7_SVC_STACK_SIZE);
K_THREAD_STACK_DEFINE(_sys_stack, CONFIG_ARMV7_SYS_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_undef_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_svc_stack, CONFIG_ARMV7_SVC_STACK_SIZE);
K_THREAD_STACK_DEFINE(z_arm_sys_stack, CONFIG_ARMV7_SYS_STACK_SIZE);
#if defined(CONFIG_INIT_STACKS)
void init_stacks(void)
void z_arm_init_stacks(void)
{
memset(_fiq_stack, 0xAA, CONFIG_ARMV7_FIQ_STACK_SIZE);
memset(_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE);
memset(_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(z_arm_fiq_stack, 0xAA, CONFIG_ARMV7_FIQ_STACK_SIZE);
memset(z_arm_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE);
memset(z_arm_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(z_arm_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(&_interrupt_stack, 0xAA, CONFIG_ISR_STACK_SIZE);
}
#endif

View file

@ -17,11 +17,11 @@
_ASM_FILE_PROLOGUE
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
ldr pc, =__reset /* offset 0 */
ldr pc, =__undef_instruction /* undef instruction offset 4 */
ldr pc, =__svc /* svc offset 8 */
ldr pc, =__prefetch_abort /* prefetch abort offset 0xc */
ldr pc, =__data_abort /* data abort offset 0x10 */
nop /* offset 0x14 */
ldr pc, =_isr_wrapper /* IRQ offset 0x18 */
ldr pc, =__nmi /* FIQ offset 0x1c */
ldr pc, =z_arm_reset /* offset 0 */
ldr pc, =z_arm_undef_instruction /* undef instruction offset 4 */
ldr pc, =z_arm_svc /* svc offset 8 */
ldr pc, =z_arm_prefetch_abort /* prefetch abort offset 0xc */
ldr pc, =z_arm_data_abort /* data abort offset 0x10 */
nop /* offset 0x14 */
ldr pc, =_isr_wrapper /* IRQ offset 0x18 */
ldr pc, =z_arm_nmi /* FIQ offset 0x1c */

View file

@ -31,16 +31,16 @@
GTEXT(__start)
GTEXT(_vector_table)
GTEXT(__nmi)
GTEXT(__undef_instruction)
GTEXT(__svc)
GTEXT(__prefetch_abort)
GTEXT(__data_abort)
GTEXT(z_arm_nmi)
GTEXT(z_arm_undef_instruction)
GTEXT(z_arm_svc)
GTEXT(z_arm_prefetch_abort)
GTEXT(z_arm_data_abort)
GTEXT(__pendsv)
GTEXT(__reserved)
GTEXT(z_arm_pendsv)
GTEXT(z_arm_reserved)
GTEXT(_PrepC)
GTEXT(z_arm_prep_c)
GTEXT(_isr_wrapper)
#else /* _ASMLANGUAGE */

View file

@ -20,7 +20,7 @@
_ASM_FILE_PROLOGUE
GTEXT(z_CpuIdleInit)
GTEXT(z_arm_cpu_idle_init)
GTEXT(z_arch_cpu_idle)
GTEXT(z_arch_cpu_atomic_idle)
@ -44,10 +44,10 @@ GTEXT(z_arch_cpu_atomic_idle)
*
* C function prototype:
*
* void z_CpuIdleInit (void);
* void z_arm_cpu_idle_init(void);
*/
SECTION_FUNC(TEXT, z_CpuIdleInit)
SECTION_FUNC(TEXT, z_arm_cpu_idle_init)
#if defined(CONFIG_CPU_CORTEX_M)
ldr r1, =_SCB_SCR
movs.n r2, #_SCR_INIT_BITS

View file

@ -21,11 +21,11 @@
_ASM_FILE_PROLOGUE
GTEXT(z_ExcExit)
GTEXT(_IntExit)
GTEXT(z_arm_exc_exit)
GTEXT(z_arm_int_exit)
GDATA(_kernel)
#if defined(CONFIG_CPU_CORTEX_R)
GTEXT(__pendsv)
GTEXT(z_arm_pendsv)
#endif
/**
@ -34,13 +34,13 @@ GTEXT(__pendsv)
* directly in vector table
*
* Kernel allows installing interrupt handlers (ISRs) directly into the vector
* table to get the lowest interrupt latency possible. This allows the ISR to be
* invoked directly without going through a software interrupt table. However,
* upon exiting the ISR, some kernel work must still be performed, namely
* possible context switching. While ISRs connected in the software interrupt
* table do this automatically via a wrapper, ISRs connected directly in the
* vector table must invoke _IntExit() as the *very last* action before
* returning.
* table to get the lowest interrupt latency possible. This allows the ISR to
* be invoked directly without going through a software interrupt table.
* However, upon exiting the ISR, some kernel work must still be performed,
* namely possible context switching. While ISRs connected in the software
* interrupt table do this automatically via a wrapper, ISRs connected directly
* in the vector table must invoke z_arm_int_exit() as the *very last* action
* before returning.
*
* e.g.
*
@ -48,27 +48,29 @@ GTEXT(__pendsv)
* {
* printk("in %s\n", __FUNCTION__);
* doStuff();
* _IntExit();
* z_arm_int_exit();
* }
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_int_exit)
/* _IntExit falls through to z_ExcExit (they are aliases of each other) */
/* z_arm_int_exit falls through to z_arm_exc_exit (they are aliases of each
* other)
*/
/**
*
* @brief Kernel housekeeping when exiting exception handler installed
* directly in vector table
*
* See _IntExit().
* See z_arm_int_exit().
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_arm_exc_exit)
#if defined(CONFIG_CPU_CORTEX_R)
push {r0, lr}
#endif
@ -89,7 +91,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
str r2, [r1]
#elif defined(CONFIG_CPU_CORTEX_R)
push {r0, lr}
bl __pendsv
bl z_arm_pendsv
pop {r0, lr}
#endif

View file

@ -17,27 +17,27 @@
_ASM_FILE_PROLOGUE
GTEXT(_Fault)
GTEXT(z_arm_fault)
GTEXT(__hard_fault)
GTEXT(z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
GTEXT(__mpu_fault)
GTEXT(__bus_fault)
GTEXT(__usage_fault)
GTEXT(z_arm_mpu_fault)
GTEXT(z_arm_bus_fault)
GTEXT(z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
GTEXT(__secure_fault)
GTEXT(z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE*/
GTEXT(__debug_monitor)
GTEXT(z_arm_debug_monitor)
#elif defined(CONFIG_ARMV7_R)
GTEXT(__undef_instruction)
GTEXT(__prefetch_abort)
GTEXT(__data_abort)
GTEXT(z_arm_undef_instruction)
GTEXT(z_arm_prefetch_abort)
GTEXT(z_arm_data_abort)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
GTEXT(__reserved)
GTEXT(z_arm_reserved)
/**
*
@ -47,41 +47,41 @@ GTEXT(__reserved)
* monitor and reserved exceptions.
*
* Save the values of the MSP and PSP in r0 and r1 respectively, so the first
* and second parameters to the _Fault() C function that will handle the rest.
* This has to be done because at this point we do not know if the fault
* and second parameters to the z_arm_fault() C function that will handle the
* rest. This has to be done because at this point we do not know if the fault
* happened while handling an exception or not, and thus the ESF could be on
* either stack. _Fault() will find out where the ESF resides.
* either stack. z_arm_fault() will find out where the ESF resides.
*
* Provides these symbols:
*
* __hard_fault
* __mpu_fault
* __bus_fault
* __usage_fault
* __secure_fault
* __debug_monitor
* __reserved
* z_arm_hard_fault
* z_arm_mpu_fault
* z_arm_bus_fault
* z_arm_usage_fault
* z_arm_secure_fault
* z_arm_debug_monitor
* z_arm_reserved
*/
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_hard_fault)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* HardFault is used for all fault conditions on ARMv6-M. */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_usage_fault)
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
SECTION_SUBSEC_FUNC(TEXT,__fault,__secure_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */
SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_debug_monitor)
#elif defined(CONFIG_ARMV7_R)
SECTION_SUBSEC_FUNC(TEXT,__fault,__undef_instruction)
SECTION_SUBSEC_FUNC(TEXT,__fault,__prefetch_abort)
SECTION_SUBSEC_FUNC(TEXT,__fault,__data_abort)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_undef_instruction)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_prefetch_abort)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_data_abort)
#else
#error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved)
SECTION_SUBSEC_FUNC(TEXT,__fault,z_arm_reserved)
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* force unlock interrupts */
@ -137,8 +137,9 @@ _s_stack_frame_endif:
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
#elif defined(CONFIG_ARMV7_R)
/*
* Pass null for the esf to _Fault for now. A future PR will add better
* exception debug for Cortex-R that subsumes what esf provides.
* Pass null for the esf to z_arm_fault for now. A future PR will add
* better exception debug for Cortex-R that subsumes what esf
* provides.
*/
mov r0, #0
#else
@ -158,7 +159,7 @@ _s_stack_frame_endif:
mov r1, lr
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
push {r0, lr}
bl _Fault
bl z_arm_fault
#if defined(CONFIG_CPU_CORTEX_M)
pop {r0, pc}

View file

@ -30,7 +30,7 @@
#include <kernel_structs.h>
#include <debug/tracing.h>
extern void __reserved(void);
extern void z_arm_reserved(void);
#if defined(CONFIG_CPU_CORTEX_M)
#define NUM_IRQS_PER_REG 32
@ -87,7 +87,7 @@ int z_arch_irq_is_enabled(unsigned int irq)
*
* @return N/A
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
{
/* The kernel may reserve some of the highest priority levels.
* So we offset the requested priority level with the number
@ -180,7 +180,7 @@ int z_arch_irq_is_enabled(unsigned int irq)
*
* @return N/A
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
{
struct device *dev = _sw_isr_table[0].arg;
@ -196,14 +196,14 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
* Installed in all dynamic interrupt slots at boot time. Throws an error if
* called.
*
* See __reserved().
* See z_arm_reserved().
*
* @return N/A
*/
void z_irq_spurious(void *unused)
{
ARG_UNUSED(unused);
__reserved();
z_arm_reserved();
}
/* FIXME: IRQ direct inline functions have to be placed here and not in
@ -320,7 +320,7 @@ int z_arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
u32_t flags)
{
z_isr_install(irq, routine, parameter);
z_irq_priority_set(irq, priority, flags);
z_arm_irq_priority_set(irq, priority, flags);
return irq;
}
#endif /* CONFIG_DYNAMIC_INTERRUPTS */

View file

@ -14,7 +14,7 @@
volatile irq_offload_routine_t offload_routine;
static void *offload_param;
/* Called by __svc */
/* Called by z_arm_svc */
void z_irq_do_offload(void)
{
offload_routine(offload_param);

View file

@ -54,7 +54,7 @@ GTEXT(__vector_relay_handler)
SECTION_FUNC(vector_relay_table, __vector_relay_table)
.word z_main_stack + CONFIG_MAIN_STACK_SIZE
.word __reset
.word z_arm_reset
.word __vector_relay_handler /* nmi */
.word __vector_relay_handler /* hard fault */

View file

@ -24,18 +24,18 @@ _ASM_FILE_PROLOGUE
GDATA(_sw_isr_table)
GTEXT(_isr_wrapper)
GTEXT(_IntExit)
GTEXT(z_arm_int_exit)
/**
*
* @brief Wrapper around ISRs when inserted in software ISR table
*
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table using
* the running interrupt number as the index, and invokes the registered ISR
* with its corresponding argument. When returning from the ISR, it determines
* if a context switch needs to happen (see documentation for __pendsv()) and
* pends the PendSV exception if so: the latter will perform the context switch
* itself.
* When inserted in the vector table, _isr_wrapper() demuxes the ISR table
* using the running interrupt number as the index, and invokes the registered
* ISR with its corresponding argument. When returning from the ISR, it
* determines if a context switch needs to happen (see documentation for
* z_arm_pendsv()) and pends the PendSV exception if so: the latter will
* perform the context switch itself.
*
* @return N/A
*/
@ -45,9 +45,9 @@ SECTION_FUNC(TEXT, _isr_wrapper)
push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
/*
* Save away r0-r3 from previous context to the process stack since they
* are clobbered here. Also, save away lr since we may swap processes
* and return to a different thread.
* Save away r0-r3 from previous context to the process stack since
* they are clobbered here. Also, save away lr since we may swap
* processes and return to a different thread.
*/
push {r4, r5}
mov r4, r12
@ -71,11 +71,11 @@ SECTION_FUNC(TEXT, _isr_wrapper)
#ifdef CONFIG_SYS_POWER_MANAGEMENT
/*
* All interrupts are disabled when handling idle wakeup. For tickless
* idle, this ensures that the calculation and programming of the device
* for the next timer deadline is not interrupted. For non-tickless idle,
* this ensures that the clearing of the kernel idle state is not
* interrupted. In each case, z_sys_power_save_idle_exit is called with
* interrupts disabled.
* idle, this ensures that the calculation and programming of the
* device for the next timer deadline is not interrupted. For
* non-tickless idle, this ensures that the clearing of the kernel idle
* state is not interrupted. In each case, z_sys_power_save_idle_exit
* is called with interrupts disabled.
*/
cpsid i /* PRIMASK = 1 */
@ -177,6 +177,6 @@ _idle_state_cleared:
/* Use 'bx' instead of 'b' because 'bx' can jump further, and use
* 'bx' instead of 'blx' because exception return is done in
* _IntExit() */
ldr r1, =_IntExit
* z_arm_int_exit() */
ldr r1, =z_arm_int_exit
bx r1

View file

@ -34,7 +34,7 @@ static _NmiHandler_t handler = z_SysNmiOnReset;
* @brief Default NMI handler installed when kernel is up
*
* The default handler outputs a error message and reboots the target. It is
* installed by calling z_NmiInit();
* installed by calling z_arm_nmi_init();
*
* @return N/A
*/
@ -57,7 +57,7 @@ static void DefaultHandler(void)
* @return N/A
*/
void z_NmiInit(void)
void z_arm_nmi_init(void)
{
handler = DefaultHandler;
}
@ -88,8 +88,8 @@ void z_NmiHandlerSet(void (*pHandler)(void))
* @return N/A
*/
void __nmi(void)
void z_arm_nmi(void)
{
handler();
z_ExcExit();
z_arm_exc_exit();
}

View file

@ -155,16 +155,16 @@ extern FUNC_NORETURN void z_cstart(void);
*
* @return N/A
*/
void _PrepC(void)
void z_arm_prep_c(void)
{
relocate_vector_table();
enable_floating_point();
z_bss_zero();
z_data_copy();
#if defined(CONFIG_ARMV7_R) && defined(CONFIG_INIT_STACKS)
init_stacks();
z_arm_init_stacks();
#endif
z_IntLibInit();
z_arm_int_lib_init();
z_cstart();
CODE_UNREACHABLE;
}

View file

@ -30,13 +30,13 @@ extern const int _k_neg_eagain;
* the heavy lifting of context switching.
* This is the only place we have to save BASEPRI since the other paths to
* __pendsv all come from handling an interrupt, which means we know the
* z_arm_pendsv all come from handling an interrupt, which means we know the
* interrupts were not locked: in that case the BASEPRI value is 0.
*
* Given that z_arch_swap() is called to effect a cooperative context switch,
* only the caller-saved integer registers need to be saved in the thread of the
* outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the __pendsv exception.
* its exception stack frame, created when handling the z_arm_pendsv exception.
*
* On ARMv6-M, the intlock key is represented by the PRIMASK register,
* as BASEPRI is not available.
@ -62,7 +62,7 @@ int z_arch_swap(unsigned int key)
/* clear mask or enable all irqs to take a pendsv */
irq_unlock(0);
#elif defined(CONFIG_CPU_CORTEX_R)
cortex_r_svc();
z_arm_cortex_r_svc();
irq_unlock(key);
#endif

View file

@ -20,8 +20,8 @@
_ASM_FILE_PROLOGUE
GTEXT(__svc)
GTEXT(__pendsv)
GTEXT(z_arm_svc)
GTEXT(z_arm_pendsv)
GTEXT(z_do_kernel_oops)
GTEXT(z_arm_do_syscall)
GDATA(_k_neg_eagain)
@ -37,11 +37,11 @@ GDATA(_kernel)
* switch contexts, it pends the PendSV exception.
*
* When PendSV is pended, the decision that a context switch must happen has
* already been taken. In other words, when __pendsv() runs, we *know* we have
* to swap *something*.
* already been taken. In other words, when z_arm_pendsv() runs, we *know* we
* have to swap *something*.
*/
SECTION_FUNC(TEXT, __pendsv)
SECTION_FUNC(TEXT, z_arm_pendsv)
#ifdef CONFIG_TRACING
/* Register the context switch */
@ -246,7 +246,7 @@ in_fp_endif:
push {r2,lr}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arch_configure_dynamic_mpu_regions
bl z_arm_configure_dynamic_mpu_regions
pop {r2,lr}
#endif
@ -343,7 +343,7 @@ _thread_irq_disabled:
*
* @return N/A
*/
SECTION_FUNC(TEXT, __svc)
SECTION_FUNC(TEXT, z_arm_svc)
/* Use EXC_RETURN state to find out if stack frame is on the
* MSP or PSP
*/
@ -381,8 +381,8 @@ _stack_frame_endif:
mov lr, r1
#endif /* CONFIG_IRQ_OFFLOAD */
/* exception return is done in _IntExit() */
b _IntExit
/* exception return is done in z_arm_int_exit() */
b z_arm_int_exit
_oops:
push {r0, lr}
@ -402,7 +402,7 @@ _oops:
*
* @return N/A
*/
SECTION_FUNC(TEXT, __svc)
SECTION_FUNC(TEXT, z_arm_svc)
tst lr, #0x4 /* did we come from thread mode ? */
ite eq /* if zero (equal), came from handler mode */
mrseq r0, MSP /* handler mode, stack frame is on MSP */
@ -445,8 +445,8 @@ SECTION_FUNC(TEXT, __svc)
bl z_irq_do_offload /* call C routine which executes the offload */
pop {r0, lr}
/* exception return is done in _IntExit() */
b _IntExit
/* exception return is done in z_arm_int_exit() */
b z_arm_int_exit
#endif
_oops:
@ -517,7 +517,7 @@ valid_syscall_id:
#endif
#elif defined(CONFIG_ARMV7_R)
SECTION_FUNC(TEXT, __svc)
SECTION_FUNC(TEXT, z_arm_svc)
/*
* Switch to system mode to store r0-r3 to the process stack pointer.
* Save r12 and the lr as we will be swapping in another process and
@ -564,19 +564,19 @@ demux:
blx z_irq_do_offload /* call C routine which executes the offload */
pop {r0, lr}
/* exception return is done in _IntExit() */
/* exception return is done in z_arm_int_exit() */
mov r0, #RET_FROM_SVC
b _IntExit
b z_arm_int_exit
#endif
_context_switch:
/* handler mode exit, to PendSV */
push {r0, lr}
bl __pendsv
bl z_arm_pendsv
pop {r0, lr}
mov r0, #RET_FROM_SVC
b _IntExit
b z_arm_int_exit
_oops:
push {r0, lr}
@ -585,8 +585,8 @@ _oops:
cpsie i
movs pc, lr
GTEXT(cortex_r_svc)
SECTION_FUNC(TEXT, cortex_r_svc)
GTEXT(z_arm_cortex_r_svc)
SECTION_FUNC(TEXT, z_arm_cortex_r_svc)
svc #0
bx lr

View file

@ -25,16 +25,17 @@ extern u8_t *z_priv_stack_find(void *obj);
* @brief Initialize a new thread from its stack space
*
* The control structure (thread) is put at the lower address of the stack. An
* initial context, to be "restored" by __pendsv(), is put at the other end of
* the stack, and thus reusable by the stack when not needed anymore.
* initial context, to be "restored" by z_arm_pendsv(), is put at the other end
* of the stack, and thus reusable by the stack when not needed anymore.
*
* The initial context is an exception stack frame (ESF) since exiting the
* PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
* an instruction address to jump to must always be set since the CPU always
* runs in thumb mode, the ESF expects the real address of the instruction,
* with the lsb *not* set (instructions are always aligned on 16 bit halfwords).
* Since the compiler automatically sets the lsb of function addresses, we have
* to unset it manually before storing it in the 'pc' field of the ESF.
* with the lsb *not* set (instructions are always aligned on 16 bit
* halfwords). Since the compiler automatically sets the lsb of function
* addresses, we have to unset it manually before storing it in the 'pc' field
* of the ESF.
*
* <options> is currently unused.
*

View file

@ -68,7 +68,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
/* Re-program dynamic memory map.
*
* Important note:
* z_arch_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
* z_arm_configure_dynamic_mpu_regions() may re-program the MPU Stack Guard
* to guard the privilege stack for overflows (if building with option
* CONFIG_MPU_STACK_GUARD). There is a risk of actually overflowing the
* stack while doing the re-programming. We minimize the risk by placing
@ -82,7 +82,7 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
push {r0,r1,r2,r3,ip,lr}
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
bl z_arch_configure_dynamic_mpu_regions
bl z_arm_configure_dynamic_mpu_regions
pop {r0,r1,r2,r3,ip,lr}
#endif

View file

@ -49,7 +49,7 @@ extern volatile irq_offload_routine_t offload_routine;
*
* @return 1 if in ISR, 0 if not.
*/
static ALWAYS_INLINE bool z_IsInIsr(void)
static ALWAYS_INLINE bool z_arch_is_in_isr(void)
{
u32_t vector = __get_IPSR();
@ -85,7 +85,7 @@ static ALWAYS_INLINE bool z_IsInIsr(void)
*
* @return N/A
*/
static ALWAYS_INLINE void z_ExcSetup(void)
static ALWAYS_INLINE void z_arm_exc_setup(void)
{
NVIC_SetPriority(PendSV_IRQn, 0xff);
@ -137,7 +137,7 @@ static ALWAYS_INLINE void z_ExcSetup(void)
*
* @return N/A
*/
static ALWAYS_INLINE void z_clearfaults(void)
static ALWAYS_INLINE void z_arm_clear_faults(void)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)

View file

@ -37,7 +37,7 @@ extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
*
* @return N/A
*/
static ALWAYS_INLINE void z_InterruptStackSetup(void)
static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void)
{
u32_t msp = (u32_t)(Z_THREAD_STACK_BUFFER(_interrupt_stack)) +
K_THREAD_STACK_SIZEOF(_interrupt_stack);

View file

@ -40,7 +40,7 @@ extern volatile irq_offload_routine_t offload_routine;
*
* @return 1 if in ISR, 0 if not.
*/
static ALWAYS_INLINE bool z_IsInIsr(void)
static ALWAYS_INLINE bool z_arch_is_in_isr(void)
{
unsigned int status;
@ -59,7 +59,7 @@ static ALWAYS_INLINE bool z_IsInIsr(void)
*
* @return N/A
*/
static ALWAYS_INLINE void z_ExcSetup(void)
static ALWAYS_INLINE void z_arm_exc_setup(void)
{
}
@ -70,11 +70,11 @@ static ALWAYS_INLINE void z_ExcSetup(void)
*
* @return N/A
*/
static ALWAYS_INLINE void z_clearfaults(void)
static ALWAYS_INLINE void z_arm_clear_faults(void)
{
}
extern void cortex_r_svc(void);
extern void z_arm_cortex_r_svc(void);
#ifdef __cplusplus
}

View file

@ -26,7 +26,7 @@ extern "C" {
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
extern void init_stacks(void);
extern void z_arm_init_stacks(void);
/**
*
@ -36,7 +36,7 @@ extern void init_stacks(void);
*
* @return N/A
*/
static ALWAYS_INLINE void z_InterruptStackSetup(void)
static ALWAYS_INLINE void z_arm_interrupt_stack_setup(void)
{
}

View file

@ -27,20 +27,20 @@ extern "C" {
#endif
#ifndef _ASMLANGUAGE
extern void z_FaultInit(void);
extern void z_CpuIdleInit(void);
extern void z_arm_fault_init(void);
extern void z_arm_cpu_idle_init(void);
#ifdef CONFIG_ARM_MPU
extern void z_arch_configure_static_mpu_regions(void);
extern void z_arch_configure_dynamic_mpu_regions(struct k_thread *thread);
extern void z_arm_configure_static_mpu_regions(void);
extern void z_arm_configure_dynamic_mpu_regions(struct k_thread *thread);
#endif /* CONFIG_ARM_MPU */
static ALWAYS_INLINE void z_arch_kernel_init(void)
{
z_InterruptStackSetup();
z_ExcSetup();
z_FaultInit();
z_CpuIdleInit();
z_clearfaults();
z_arm_interrupt_stack_setup();
z_arm_exc_setup();
z_arm_fault_init();
z_arm_cpu_idle_init();
z_arm_clear_faults();
}
static ALWAYS_INLINE void
@ -68,7 +68,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
*
* This function is invoked once, upon system initialization.
*/
z_arch_configure_static_mpu_regions();
z_arm_configure_static_mpu_regions();
#endif
/* get high address of the stack, i.e. its start (stack grows down) */
@ -91,7 +91,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
* If stack protection is enabled, make sure to set it
* before jumping to thread entry function
*/
z_arch_configure_dynamic_mpu_regions(main_thread);
z_arm_configure_dynamic_mpu_regions(main_thread);
#endif
#if defined(CONFIG_BUILTIN_STACK_GUARD)
@ -143,8 +143,6 @@ z_arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
extern void z_arch_cpu_atomic_idle(unsigned int key);
#define z_arch_is_in_isr() z_IsInIsr()
extern FUNC_NORETURN void z_arm_userspace_enter(k_thread_entry_t user_entry,
void *p1, void *p2, void *p3,
u32_t stack_end,

View file

@ -8,7 +8,7 @@
#include <spinlock.h>
#include <arch/arm/cortex_m/cmsis.h>
void z_ExcExit(void);
void z_arm_exc_exit(void);
#define COUNTER_MAX 0x00ffffff
#define TIMER_STOPPED 0xff000000
@ -96,7 +96,7 @@ void z_clock_isr(void *arg)
overflow_cyc = 0U;
z_clock_announce(TICKLESS ? dticks : 1);
z_ExcExit();
z_arm_exc_exit();
}
int z_clock_driver_init(struct device *device)

View file

@ -36,7 +36,7 @@
#define _EXC_IRQ_DEFAULT_PRIO Z_EXC_PRIO(_IRQ_PRIO_OFFSET)
#ifdef _ASMLANGUAGE
GTEXT(z_ExcExit);
GTEXT(z_arm_exc_exit);
#else
#include <zephyr/types.h>
@ -64,7 +64,7 @@ struct __esf {
typedef struct __esf z_arch_esf_t;
extern void z_ExcExit(void);
extern void z_arm_exc_exit(void);
#ifdef __cplusplus
}

View file

@ -23,7 +23,7 @@ extern "C" {
#endif
#ifdef _ASMLANGUAGE
GTEXT(_IntExit);
GTEXT(z_arm_int_exit);
GTEXT(z_arch_irq_enable)
GTEXT(z_arch_irq_disable)
GTEXT(z_arch_irq_is_enabled)
@ -32,14 +32,14 @@ extern void z_arch_irq_enable(unsigned int irq);
extern void z_arch_irq_disable(unsigned int irq);
extern int z_arch_irq_is_enabled(unsigned int irq);
extern void _IntExit(void);
extern void z_arm_int_exit(void);
#if defined(CONFIG_ARMV7_R)
static ALWAYS_INLINE void z_IntLibInit(void)
static ALWAYS_INLINE void z_arm_int_lib_init(void)
{
}
#else
extern void z_IntLibInit(void);
extern void z_arm_int_lib_init(void);
#endif
/* macros convert value of it's argument to a string */
@ -51,8 +51,8 @@ extern void z_IntLibInit(void);
#define CONCAT(x, y) DO_CONCAT(x, y)
/* internal routine documented in C file, needed by IRQ_CONNECT() macro */
extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
u32_t flags);
extern void z_arm_irq_priority_set(unsigned int irq, unsigned int prio,
u32_t flags);
/* Flags for use with IRQ_CONNECT() */
@ -90,7 +90,7 @@ extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
#define Z_ARCH_IRQ_CONNECT(irq_p, priority_p, isr_p, isr_param_p, flags_p) \
({ \
Z_ISR_DECLARE(irq_p, 0, isr_p, isr_param_p); \
z_irq_priority_set(irq_p, priority_p, flags_p); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
irq_p; \
})
@ -104,7 +104,7 @@ extern void z_irq_priority_set(unsigned int irq, unsigned int prio,
#define Z_ARCH_IRQ_DIRECT_CONNECT(irq_p, priority_p, isr_p, flags_p) \
({ \
Z_ISR_DECLARE(irq_p, ISR_FLAG_DIRECT, isr_p, NULL); \
z_irq_priority_set(irq_p, priority_p, flags_p); \
z_arm_irq_priority_set(irq_p, priority_p, flags_p); \
irq_p; \
})
@ -122,7 +122,7 @@ extern void z_arch_isr_direct_header(void);
#define Z_ARCH_ISR_DIRECT_FOOTER(swap) z_arch_isr_direct_footer(swap)
/* arch/arm/core/exc_exit.S */
extern void _IntExit(void);
extern void z_arm_int_exit(void);
#ifdef CONFIG_TRACING
extern void sys_trace_isr_exit(void);
@ -135,7 +135,7 @@ static inline void z_arch_isr_direct_footer(int maybe_swap)
sys_trace_isr_exit();
#endif
if (maybe_swap) {
_IntExit();
z_arm_int_exit();
}
}

View file

@ -15,8 +15,8 @@
#ifndef _ASMLANGUAGE
#ifdef CONFIG_RUNTIME_NMI
extern void z_NmiInit(void);
#define NMI_INIT() z_NmiInit()
extern void z_arm_nmi_init(void);
#define NMI_INIT() z_arm_nmi_init()
#else
#define NMI_INIT()
#endif

View file

@ -20,8 +20,8 @@
#include <logging/log.h>
#ifdef CONFIG_RUNTIME_NMI
extern void z_NmiInit(void);
#define NMI_INIT() z_NmiInit()
extern void z_arm_nmi_init(void);
#define NMI_INIT() z_arm_nmi_init()
#else
#define NMI_INIT()
#endif

View file

@ -20,8 +20,8 @@
#include <logging/log.h>
#ifdef CONFIG_RUNTIME_NMI
extern void z_NmiInit(void);
#define NMI_INIT() z_NmiInit()
extern void z_arm_nmi_init(void);
#define NMI_INIT() z_arm_nmi_init()
#else
#define NMI_INIT()
#endif

View file

@ -19,8 +19,8 @@
#include <logging/log.h>
#ifdef CONFIG_RUNTIME_NMI
extern void z_NmiInit(void);
#define NMI_INIT() z_NmiInit()
extern void z_arm_nmi_init(void);
#define NMI_INIT() z_arm_nmi_init()
#else
#define NMI_INIT()
#endif

View file

@ -106,7 +106,7 @@ const __imx_boot_ivt_section ivt image_vector_table = {
* @return N/A
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
/* Boot ROM did initialize the XTAL, here we only sets external XTAL
* OSC freq
@ -280,7 +280,7 @@ static int imxrt_init(struct device *arg)
}
/* Initialize system clock */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -86,7 +86,7 @@ static const sim_clock_config_t simConfig = {
* @return N/A
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
CLOCK_SetSimSafeDivs();
@ -153,7 +153,7 @@ static int fsl_frdm_k22f_init(struct device *arg)
#endif /* !CONFIG_ARM_MPU */
/* Initialize PLL/system clock to 120 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -17,7 +17,7 @@
_ASM_FILE_PROLOGUE
GTEXT(_WdogInit)
GTEXT(z_arm_watchdog_init)
/* watchdog register offsets */
#define WDOG_SCTRL_HI_OFFSET 0x0
@ -36,7 +36,7 @@ GTEXT(_WdogInit)
* @return N/A
*/
SECTION_FUNC(TEXT,_WdogInit)
SECTION_FUNC(TEXT,z_arm_watchdog_init)
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!!
* There are timing requirements for the execution of the unlock process.

View file

@ -87,7 +87,7 @@ static const sim_clock_config_t simConfig = {
* @return N/A
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
CLOCK_SetSimSafeDivs();
@ -149,7 +149,7 @@ static int fsl_frdm_k64f_init(struct device *arg)
#endif /* !CONFIG_ARM_MPU */
/* Initialize PLL/system clock to 120 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -17,7 +17,7 @@
_ASM_FILE_PROLOGUE
GTEXT(_WdogInit)
GTEXT(z_arm_watchdog_init)
/* watchdog register offsets */
#define WDOG_SCTRL_HI_OFFSET 0x0
@ -36,7 +36,7 @@ GTEXT(_WdogInit)
* @return N/A
*/
SECTION_FUNC(TEXT,_WdogInit)
SECTION_FUNC(TEXT,z_arm_watchdog_init)
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!!
* There are timing requirements for the execution of the unlock process.

View file

@ -17,7 +17,7 @@
_ASM_FILE_PROLOGUE
GTEXT(_WdogInit)
GTEXT(z_arm_watchdog_init)
/* watchdog register offsets */
#define WDOG_SCTRL_HI_OFFSET 0x0
@ -36,7 +36,7 @@ GTEXT(_WdogInit)
* @return N/A
*/
SECTION_FUNC(TEXT,_WdogInit)
SECTION_FUNC(TEXT,z_arm_watchdog_init)
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!!
* There are timing requirements for the execution of the unlock process.

View file

@ -251,7 +251,7 @@ static int ke1xf_init(struct device *arg)
return 0;
}
void _WdogInit(void)
void z_arm_watchdog_init(void)
{
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!! Watchdog

View file

@ -18,7 +18,7 @@
* Variables
******************************************************************************/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
/*
* Core clock: 48MHz
@ -85,7 +85,7 @@ static int kl2x_init(struct device *arg)
SIM->COPC = 0;
/* Initialize system clock to 48 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -115,7 +115,7 @@ static void set_modem_clock(void)
* @return N/A
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
CLOCK_SetSimSafeDivs();
@ -158,7 +158,7 @@ static int kw2xd_init(struct device *arg)
PMC->REGSC |= PMC_REGSC_ACKISO_MASK;
/* Initialize PLL/system clock to 48 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -51,7 +51,7 @@ static void CLOCK_SYS_FllStableDelay(void)
}
}
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
CLOCK_SetSimSafeDivs();
@ -84,7 +84,7 @@ static int kwx_init(struct device *arg)
SIM->COPC = 0;
/* Initialize system clock to 40 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -17,7 +17,7 @@
_ASM_FILE_PROLOGUE
GTEXT(_WdogInit)
GTEXT(z_arm_watchdog_init)
/* watchdog register offsets */
#define WDOG_SCTRL_HI_OFFSET 0x0
@ -36,7 +36,7 @@ GTEXT(_WdogInit)
* @return N/A
*/
SECTION_FUNC(TEXT,_WdogInit)
SECTION_FUNC(TEXT,z_arm_watchdog_init)
/*
* NOTE: DO NOT SINGLE STEP THROUGH THIS FUNCTION!!!
* There are timing requirements for the execution of the unlock process.

View file

@ -33,7 +33,7 @@
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
#ifdef CONFIG_SOC_LPC54114_M4
@ -88,7 +88,7 @@ static int nxp_lpc54114_init(struct device *arg)
oldLevel = irq_lock();
/* Initialize FRO/system clock to 48 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU if configured in

View file

@ -33,7 +33,7 @@
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
#ifdef CONFIG_SOC_LPC55S69_CPU0
/*!< Set up the clock sources */
@ -82,10 +82,10 @@ static int nxp_lpc55s69_init(struct device *arg)
/* disable interrupts */
oldLevel = irq_lock();
z_clearfaults();
z_arm_clear_faults();
/* Initialize FRO/system clock to 48 MHz */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU if configured in

View file

@ -40,7 +40,7 @@ static const CMU_LFXOInit_TypeDef lfxoInit = CMU_LFXOINIT_DEFAULT;
* @return N/A
*
*/
static ALWAYS_INLINE void clkInit(void)
static ALWAYS_INLINE void clock_init(void)
{
#ifdef CONFIG_CMU_HFCLK_HFXO
if (CMU_ClockSelectGet(cmuClock_HF) != cmuSelect_HFXO) {
@ -124,7 +124,7 @@ static int silabs_exx32_init(struct device *arg)
#endif
/* Initialize system clock according to CONFIG_CMU settings */
clkInit();
clock_init();
/*
* install default handler that simply resets the CPU

View file

@ -69,7 +69,7 @@ void isr0(void)
{
printk("%s ran!\n", __func__);
k_sem_give(&sem[0]);
_IntExit();
z_arm_int_exit();
}
/**
@ -83,7 +83,7 @@ void isr1(void)
{
printk("%s ran!\n", __func__);
k_sem_give(&sem[1]);
_IntExit();
z_arm_int_exit();
}
/**
@ -97,7 +97,7 @@ void isr2(void)
{
printk("%s ran!\n", __func__);
k_sem_give(&sem[2]);
_IntExit();
z_arm_int_exit();
}
/**
@ -125,7 +125,7 @@ void test_arm_irq_vector_table(void)
for (int ii = 0; ii < 3; ii++) {
irq_enable(_ISR_OFFSET + ii);
z_irq_priority_set(_ISR_OFFSET + ii, 0, 0);
z_arm_irq_priority_set(_ISR_OFFSET + ii, 0, 0);
k_sem_init(&sem[ii], 0, UINT_MAX);
}