arch: arm: core: aarch32: enable ARMv7-R/Cortex-R code for ARMv7-A/Cortex-A

Modify #ifdefs so that any code that is compiled if CONFIG_ARMV7_R is
set is also compiled if CONFIG_ARMV7_A is set.
Modify #ifdefs so that any code that is compiled if CONFIG_CPU_CORTEX_R
is set is also compiled if CONFIG_CPU_AARCH32_CORTEX_A is set.
Modify source dir inclusion in CMakeLists.txt accordingly.

Brief file descriptions have been updated to include Cortex-A whereever
only Cortex-M and Cortex-R were mentioned so far.

Signed-off-by: Immo Birnbaum <Immo.Birnbaum@weidmueller.com>
This commit is contained in:
Immo Birnbaum 2021-07-15 10:49:19 +02:00 committed by Carles Cufí
commit c6141c49c1
13 changed files with 42 additions and 34 deletions

View file

@ -30,5 +30,6 @@ add_subdirectory_ifdef(CONFIG_ARM_MPU mpu)
add_subdirectory_ifdef(CONFIG_ARM_AARCH32_MMU mmu)
add_subdirectory_ifdef(CONFIG_CPU_CORTEX_R cortex_a_r)
add_subdirectory_ifdef(CONFIG_CPU_AARCH32_CORTEX_A cortex_a_r)
zephyr_linker_sources(ROM_START SORT_KEY 0x0vectors vector_table.ld)

View file

@ -6,7 +6,7 @@
/**
* @file
* @brief ARM Cortex-M and Cortex-R power management
* @brief ARM Cortex-A, Cortex-M and Cortex-R power management
*
*/
@ -135,7 +135,8 @@ SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
/* r0: interrupt mask from caller */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
|| defined(CONFIG_ARMV7_R) \
|| defined(CONFIG_ARMV7_A)
/* No BASEPRI, call wfe directly
* (SEVONPEND is set in z_arm_cpu_idle_init())
*/

View file

@ -6,7 +6,7 @@
/**
* @file
* @brief ARM Cortex-M and Cortex-R interrupt management
* @brief ARM Cortex-A, Cortex-M and Cortex-R interrupt management
*
*
* Interrupt management: enabling/disabling and dynamic ISR
@ -18,7 +18,8 @@
#include <arch/cpu.h>
#if defined(CONFIG_CPU_CORTEX_M)
#include <arch/arm/aarch32/cortex_m/cmsis.h>
#elif defined(CONFIG_CPU_CORTEX_A) || defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_AARCH32_CORTEX_A) \
|| defined(CONFIG_CPU_CORTEX_R)
#include <drivers/interrupt_controller/gic.h>
#endif
#include <sys/__assert.h>
@ -92,7 +93,8 @@ void z_arm_irq_priority_set(unsigned int irq, unsigned int prio, uint32_t flags)
NVIC_SetPriority((IRQn_Type)irq, prio);
}
#elif defined(CONFIG_CPU_CORTEX_A) || defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_AARCH32_CORTEX_A) \
|| defined(CONFIG_CPU_CORTEX_R)
/*
* For Cortex-A and Cortex-R cores, the default interrupt controller is the ARM
* Generic Interrupt Controller (GIC) and therefore the architecture interrupt
@ -162,7 +164,8 @@ void z_irq_spurious(const void *unused)
void _arch_isr_direct_pm(void)
{
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
|| defined(CONFIG_ARMV7_R) \
|| defined(CONFIG_ARMV7_A)
unsigned int key;
/* irq_lock() does what we wan for this CPU */
@ -185,7 +188,8 @@ void _arch_isr_direct_pm(void)
}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
|| defined(CONFIG_ARMV7_R) \
|| defined(CONFIG_ARMV7_A)
irq_unlock(key);
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__asm__ volatile("cpsie i" : : : "memory");

View file

@ -7,7 +7,7 @@
/**
* @file
* @brief ARM Cortex-M and Cortex-R wrapper for ISRs with parameter
* @brief ARM Cortex-A, Cortex-M and Cortex-R wrapper for ISRs with parameter
*
* Wrapper installed in vector table for handling dynamic interrupts that accept
* a parameter.
@ -48,7 +48,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
#if defined(CONFIG_CPU_CORTEX_M)
push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#if defined(CONFIG_USERSPACE)
/* See comment below about svc stack usage */
@ -164,7 +164,7 @@ _idle_state_cleared:
/* clear kernel idle state */
strne r1, [r2, #_kernel_offset_to_idle]
blne z_pm_save_idle_exit
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
beq _idle_state_cleared
movs r1, #0
/* clear kernel idle state */
@ -191,7 +191,7 @@ _idle_state_cleared:
sub r0, r0, #16 /* get IRQ number */
lsl r0, r0, #3 /* table is 8-byte wide */
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#elif defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
/* Get active IRQ number from the interrupt controller */
#if !defined(CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER)
bl arm_gic_get_active
@ -233,7 +233,7 @@ _idle_state_cleared:
ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */
blx r3 /* call ISR */
#if defined(CONFIG_CPU_CORTEX_R)
#if defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
spurious_continue:
/* Signal end-of-interrupt */
pop {r0, r1}
@ -242,7 +242,7 @@ spurious_continue:
#else
bl z_soc_irq_eoi
#endif /* !CONFIG_ARM_CUSTOM_INTERRUPT_CONTROLLER */
#endif /* CONFIG_CPU_CORTEX_R */
#endif /* CONFIG_CPU_CORTEX_R || CONFIG_CPU_AARCH32_CORTEX_A */
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_exit
@ -253,7 +253,7 @@ spurious_continue:
mov lr, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
pop {r0, lr}
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
/*
* r0 and lr_irq were saved on the process stack since a swap could
* happen. exc_exit will handle getting those values back

View file

@ -20,7 +20,7 @@
#include <kernel_internal.h>
#include <linker/linker-defs.h>
#if defined(CONFIG_ARMV7_R)
#if defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
#include <aarch32/cortex_a_r/stack.h>
#endif
@ -166,6 +166,7 @@ static inline void z_arm_floating_point_init(void)
#endif /* CONFIG_CPU_HAS_FPU */
extern FUNC_NORETURN void z_cstart(void);
/**
*
* @brief Prepare to and run C code
@ -182,7 +183,7 @@ void z_arm_prep_c(void)
#endif
z_bss_zero();
z_data_copy();
#if defined(CONFIG_ARMV7_R) && defined(CONFIG_INIT_STACKS)
#if ((defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)) && defined(CONFIG_INIT_STACKS))
z_arm_init_stacks();
#endif
z_arm_interrupt_init();

View file

@ -42,7 +42,7 @@ int arch_swap(unsigned int key)
/* clear mask or enable all irqs to take a pendsv */
irq_unlock(0);
#elif defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
z_arm_cortex_r_svc();
irq_unlock(key);
#endif

View file

@ -11,7 +11,7 @@
* @brief Thread context switching for ARM Cortex-M and Cortex-R
*
* This module implements the routines necessary for thread context switching
* on ARM Cortex-M and Cortex-R CPUs.
* on ARM Cortex-A, Cortex-M and Cortex-R CPUs.
*/
#include <toolchain.h>
@ -120,7 +120,7 @@ out_fp_endif:
* regardless of whether the thread has an active FP context.
*/
#endif /* CONFIG_FPU_SHARING */
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
/* Store rest of process context */
cps #MODE_SYS
stm r0, {r4-r11, sp}
@ -136,7 +136,7 @@ out_fp_endif:
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI_MAX, r0
isb /* Make the effect of disabling interrupts be realized immediately */
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
/*
* Interrupts are still disabled from arch_swap so empty clause
* here to avoid the preprocessor error below
@ -344,7 +344,7 @@ in_fp_endif:
/* load callee-saved + psp from thread */
add r0, r2, #_thread_offset_to_callee_saved
ldmia r0, {v1-v8, ip}
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
_thread_irq_disabled:
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
@ -603,7 +603,7 @@ valid_syscall_id:
bx lr
#endif /* CONFIG_USERSPACE */
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
/**
*

View file

@ -6,10 +6,10 @@
/**
* @file
* @brief New thread creation for ARM Cortex-M and Cortex-R
* @brief New thread creation for ARM Cortex-A, Cortex-M and Cortex-R
*
* Core thread related primitives for the ARM Cortex-M and Cortex-R
* processor architecture.
* Core thread related primitives for the ARM Cortex-A, Cortex-M and
* Cortex-R processor architecture.
*/
#include <kernel.h>
@ -562,7 +562,8 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
"movs r1, #0\n\t"
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
|| defined(CONFIG_ARMV7_R) \
|| defined(CONFIG_ARMV7_A)
"cpsie i\n\t" /* __enable_irq() */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
"cpsie if\n\t" /* __enable_irq(); __enable_fault_irq() */

View file

@ -30,7 +30,7 @@ extern void z_arm_init_stacks(void);
*
* @brief Setup interrupt stack
*
* On Cortex-R, the interrupt stack is set up by reset.S
* On Cortex-A and Cortex-R, the interrupt stack is set up by reset.S
*
* @return N/A
*/

View file

@ -27,7 +27,7 @@
#if defined(CONFIG_CPU_CORTEX_M)
#include <aarch32/cortex_m/stack.h>
#include <aarch32/cortex_m/exc.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <aarch32/cortex_a_r/stack.h>
#include <aarch32/cortex_a_r/exc.h>
#endif

View file

@ -37,7 +37,7 @@
#include <arch/arm/aarch32/cortex_m/cpu.h>
#include <arch/arm/aarch32/cortex_m/memory_map.h>
#include <arch/common/sys_io.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#elif defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <arch/arm/aarch32/cortex_a_r/cpu.h>
#include <arch/arm/aarch32/cortex_a_r/sys_io.h>
#include <arch/arm/aarch32/cortex_a_r/timer.h>
@ -71,7 +71,7 @@ extern "C" {
*/
#if defined(CONFIG_USERSPACE)
#define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
#elif defined(CONFIG_ARM_MMU)
#elif defined(CONFIG_ARM_AARCH32_MMU)
#define Z_THREAD_MIN_STACK_ALIGN CONFIG_ARM_MMU_REGION_MIN_ALIGN_AND_SIZE
#else
#define Z_THREAD_MIN_STACK_ALIGN ARCH_STACK_PTR_ALIGN

View file

@ -22,7 +22,7 @@
#include <arch/arm/aarch32/exc.h>
#include <irq.h>
#if defined(CONFIG_CPU_CORTEX_R)
#if defined(CONFIG_CPU_CORTEX_R) || defined(CONFIG_CPU_AARCH32_CORTEX_A)
#include <arch/arm/aarch32/cortex_a_r/cpu.h>
#endif
@ -61,7 +61,7 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
: "=r"(key), "=r"(tmp)
: "i"(_EXC_IRQ_DEFAULT_PRIO)
: "memory");
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
__asm__ volatile(
"mrs %0, cpsr;"
"and %0, #" TOSTR(I_BIT) ";"
@ -96,7 +96,7 @@ static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
"msr BASEPRI, %0;"
"isb;"
: : "r"(key) : "memory");
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
if (key != 0U) {
return;
}

View file

@ -53,7 +53,7 @@ do { \
: [reason] "i" (reason_p), [id] "i" (_SVC_CALL_RUNTIME_EXCEPT) \
: "memory"); \
} while (false)
#elif defined(CONFIG_ARMV7_R)
#elif defined(CONFIG_ARMV7_R) || defined(CONFIG_ARMV7_A)
/*
* In order to support using svc for an exception while running in an
* isr, stack $lr_svc before calling svc. While exiting the isr,