From c30a71df9515aad8fe70cb03578fc4779e08f3fb Mon Sep 17 00:00:00 2001 From: Bradley Bolen Date: Mon, 25 Jun 2018 09:15:14 -0400 Subject: [PATCH] arch: arm: Add Cortex-R support This adds initial Cortex-R support for interrupts and context switching. Signed-off-by: Bradley Bolen --- CODEOWNERS | 2 + arch/arm/core/CMakeLists.txt | 2 + arch/arm/core/Kconfig | 91 ++++ arch/arm/core/cortex_m/Kconfig | 70 --- arch/arm/core/cortex_r/CMakeLists.txt | 11 + arch/arm/core/cortex_r/Kconfig | 82 +++ arch/arm/core/cortex_r/fault.c | 28 + arch/arm/core/cortex_r/reboot.c | 28 + arch/arm/core/cortex_r/reset.S | 102 ++++ arch/arm/core/cortex_r/stacks.c | 26 + arch/arm/core/cortex_r/vector_table.S | 27 + arch/arm/core/cortex_r/vector_table.h | 52 ++ arch/arm/core/cpu_idle.S | 10 +- arch/arm/core/exc_exit.S | 39 ++ arch/arm/core/fault_s.S | 19 + arch/arm/core/irq_manage.c | 80 ++- arch/arm/core/isr_wrapper.S | 47 +- arch/arm/core/offsets/offsets.c | 4 + arch/arm/core/prep_c.c | 11 +- arch/arm/core/swap.c | 5 + arch/arm/core/swap_helper.S | 107 +++- arch/arm/core/thread.c | 7 + arch/arm/include/cortex_r/exc.h | 86 +++ arch/arm/include/cortex_r/stack.h | 49 ++ arch/arm/include/kernel_arch_data.h | 3 + arch/arm/include/kernel_arch_func.h | 6 +- arch/arm/include/kernel_arch_thread.h | 6 + dts/arm/armv7-r.dtsi | 34 ++ include/arch/arm/arch.h | 2 + include/arch/arm/asm_inline_gcc.h | 11 + include/arch/arm/cortex_r/cpu.h | 32 ++ .../cortex_r/scripts/app_data_alignment.ld | 9 + include/arch/arm/cortex_r/scripts/linker.ld | 493 ++++++++++++++++++ include/arch/arm/error.h | 2 + include/arch/arm/irq.h | 8 + include/arch/arm/syscall.h | 1 + .../testsuite/include/test_asm_inline_gcc.h | 6 + 37 files changed, 1516 insertions(+), 82 deletions(-) create mode 100644 arch/arm/core/cortex_r/CMakeLists.txt create mode 100644 arch/arm/core/cortex_r/Kconfig create mode 100644 arch/arm/core/cortex_r/fault.c create mode 100644 arch/arm/core/cortex_r/reboot.c create mode 100644 arch/arm/core/cortex_r/reset.S create mode 100644 arch/arm/core/cortex_r/stacks.c create mode 100644 arch/arm/core/cortex_r/vector_table.S create mode 100644 arch/arm/core/cortex_r/vector_table.h create mode 100644 arch/arm/include/cortex_r/exc.h create mode 100644 arch/arm/include/cortex_r/stack.h create mode 100644 dts/arm/armv7-r.dtsi create mode 100644 include/arch/arm/cortex_r/cpu.h create mode 100644 include/arch/arm/cortex_r/scripts/app_data_alignment.ld create mode 100644 include/arch/arm/cortex_r/scripts/linker.ld diff --git a/CODEOWNERS b/CODEOWNERS index 932148aecba..315413d9823 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -18,6 +18,7 @@ /arch/arm/ @MaureenHelm @galak @ioannisg /arch/arm/core/cortex_m/cmse/ @ioannisg /arch/arm/include/cortex_m/cmse.h @ioannisg +/arch/arm/core/cortex_r/ @MaureenHelm @galak @ioannisg @bbolen /arch/common/ @andrewboie @ioannisg @andyross /arch/x86_64/ @andyross /soc/arc/snps_*/ @vonhust @ruuddw @@ -183,6 +184,7 @@ /dts/riscv/rv32m1* @MaureenHelm /dts/riscv/riscv32-fe310.dtsi @nategraff-sifive /dts/riscv/riscv32-litex-vexriscv.dtsi @mateusz-holenko @kgugala @pgielda +/dts/arm/armv7-r.dtsi @bbolen /dts/xtensa/xtensa.dtsi @ydamigos /dts/bindings/ @galak /dts/bindings/can/ @alexanderwachter diff --git a/arch/arm/core/CMakeLists.txt b/arch/arm/core/CMakeLists.txt index 4e407e002d2..7ea2f9fd299 100644 --- a/arch/arm/core/CMakeLists.txt +++ b/arch/arm/core/CMakeLists.txt @@ -33,3 +33,5 @@ add_subdirectory_ifdef(CONFIG_ARM_MPU cortex_m/mpu) add_subdirectory_ifdef(CONFIG_CPU_CORTEX_M_HAS_CMSE cortex_m/cmse) add_subdirectory_ifdef(CONFIG_ARM_SECURE_FIRMWARE cortex_m/tz) add_subdirectory_ifdef(CONFIG_ARM_NONSECURE_FIRMWARE cortex_m/tz) + +add_subdirectory_ifdef(CONFIG_CPU_CORTEX_R cortex_r) diff --git a/arch/arm/core/Kconfig b/arch/arm/core/Kconfig index eec107f7212..89d030bbab3 100644 --- a/arch/arm/core/Kconfig +++ b/arch/arm/core/Kconfig @@ -29,6 +29,96 @@ config CPU_CORTEX_M help This option signifies the use of a CPU of the Cortex-M family. +config CPU_CORTEX_R + bool + select CPU_CORTEX + select HAS_FLASH_LOAD_OFFSET + help + This option signifies the use of a CPU of the Cortex-R family. + +config ISA_THUMB2 + bool + help + From: http://www.arm.com/products/processors/technologies/instruction-set-architectures.php + + Thumb-2 technology is the instruction set underlying the ARM Cortex + architecture which provides enhanced levels of performance, energy + efficiency, and code density for a wide range of embedded + applications. + + Thumb-2 technology builds on the success of Thumb, the innovative + high code density instruction set for ARM microprocessor cores, to + increase the power of the ARM microprocessor core available to + developers of low cost, high performance systems. + + The technology is backwards compatible with existing ARM and Thumb + solutions, while significantly extending the features available to + the Thumb instructions set. This allows more of the application to + benefit from the best in class code density of Thumb. + + For performance optimized code Thumb-2 technology uses 31 percent + less memory to reduce system cost, while providing up to 38 percent + higher performance than existing high density code, which can be used + to prolong battery-life or to enrich the product feature set. Thumb-2 + technology is featured in the processor, and in all ARMv7 + architecture-based processors. + +config ISA_ARM + bool + help + From: https://developer.arm.com/products/architecture/instruction-sets/a32-and-t32-instruction-sets + + A32 instructions, known as Arm instructions in pre-Armv8 architectures, + are 32 bits wide, and are aligned on 4-byte boundaries. A32 instructions + are supported by both A-profile and R-profile architectures. + + A32 was traditionally used in applications requiring the highest + performance, or for handling hardware exceptions such as interrupts and + processor start-up. Much of its functionality was subsumed into T32 with + the introduction of Thumb-2 technology. + +config DATA_ENDIANNESS_LITTLE + bool + default y if CPU_CORTEX + help + This is driven by the processor implementation, since it is fixed in + hardware. The board should set this value to 'n' if the data is + implemented as big endian. + +config STACK_ALIGN_DOUBLE_WORD + bool "Align stacks on double-words (8 octets)" + default y + help + This is needed to conform to AAPCS, the procedure call standard for + the ARM. It wastes stack space. The option also enforces alignment + of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M). + Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment + on exception entry is enabled by default and it is not configurable. + +config RUNTIME_NMI + bool "Attach an NMI handler at runtime" + select REBOOT + help + The kernel provides a simple NMI handler that simply hangs in a tight + loop if triggered. This fills the requirement that there must be an + NMI handler installed when the CPU boots. If a custom handler is + needed, enable this option and attach it via _NmiHandlerSet(). + +config FAULT_DUMP + int "Fault dump level" + default 2 + range 0 2 + help + Different levels for display information when a fault occurs. + + 2: The default. Display specific and verbose information. Consumes + the most memory (long strings). + + 1: Display general and short information. Consumes less memory + (short strings). + + 0: Off. + config BUILTIN_STACK_GUARD bool "Thread Stack Guards based on built-in ARM stack limit checking" depends on CPU_CORTEX_M_HAS_SPLIM @@ -185,6 +275,7 @@ endchoice endmenu source "arch/arm/core/cortex_m/Kconfig" +source "arch/arm/core/cortex_r/Kconfig" source "arch/arm/core/cortex_m/mpu/Kconfig" diff --git a/arch/arm/core/cortex_m/Kconfig b/arch/arm/core/cortex_m/Kconfig index 17ebd61bf56..bd3516ecd07 100644 --- a/arch/arm/core/cortex_m/Kconfig +++ b/arch/arm/core/cortex_m/Kconfig @@ -75,34 +75,6 @@ config CPU_CORTEX_M7 if CPU_CORTEX_M -config ISA_THUMB2 - bool - # Omit prompt to signify "hidden" option - help - From: http://www.arm.com/products/processors/technologies/instruction-set-architectures.php - - Thumb-2 technology is the instruction set underlying the ARM Cortex - architecture which provides enhanced levels of performance, energy - efficiency, and code density for a wide range of embedded - applications. - - Thumb-2 technology builds on the success of Thumb, the innovative - high code density instruction set for ARM microprocessor cores, to - increase the power of the ARM microprocessor core available to - developers of low cost, high performance systems. - - The technology is backwards compatible with existing ARM and Thumb - solutions, while significantly extending the features available to - the Thumb instructions set. This allows more of the application to - benefit from the best in class code density of Thumb. - - For performance optimized code Thumb-2 technology uses 31 percent - less memory to reduce system cost, while providing up to 38 percent - higher performance than existing high density code, which can be used - to prolong battery-life or to enrich the product feature set. Thumb-2 - technology is featured in the processor, and in all ARMv7 - architecture-based processors. - config CPU_CORTEX_M_HAS_SYSTICK bool # Omit prompt to signify "hidden" option @@ -275,48 +247,6 @@ config LDREX_STREX_AVAILABLE bool default y -config DATA_ENDIANNESS_LITTLE - bool - default y - help - This is driven by the processor implementation, since it is fixed in - hardware. The board should set this value to 'n' if the data is - implemented as big endian. - -config STACK_ALIGN_DOUBLE_WORD - bool "Align stacks on double-words (8 octets)" - default y - help - This is needed to conform to AAPCS, the procedure call standard for - the ARM. It wastes stack space. The option also enforces alignment - of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M). - Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment - on exception entry is enabled by default and it is not configurable. - -config RUNTIME_NMI - bool "Attach an NMI handler at runtime" - select REBOOT - help - The kernel provides a simple NMI handler that simply hangs in a tight - loop if triggered. This fills the requirement that there must be an - NMI handler installed when the CPU boots. If a custom handler is - needed, enable this option and attach it via _NmiHandlerSet(). - -config FAULT_DUMP - int "Fault dump level" - default 2 - range 0 2 - help - Different levels for display information when a fault occurs. - - 2: The default. Display specific and verbose information. Consumes - the most memory (long strings). - - 1: Display general and short information. Consumes less memory - (short strings). - - 0: Off. - config XIP default y diff --git a/arch/arm/core/cortex_r/CMakeLists.txt b/arch/arm/core/cortex_r/CMakeLists.txt new file mode 100644 index 00000000000..a1154a3efb9 --- /dev/null +++ b/arch/arm/core/cortex_r/CMakeLists.txt @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: Apache-2.0 + +zephyr_library() + +zephyr_library_sources( + vector_table.S + reset.S + fault.c + reboot.c + stacks.c + ) diff --git a/arch/arm/core/cortex_r/Kconfig b/arch/arm/core/cortex_r/Kconfig new file mode 100644 index 00000000000..45ad42db023 --- /dev/null +++ b/arch/arm/core/cortex_r/Kconfig @@ -0,0 +1,82 @@ +# Kconfig - ARM Cortex-R platform configuration options + +# +# Copyright (c) 2018 Marvell +# Copyright (c) 2018 Lexmark International, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# NOTE: We have the specific core implementations first and outside of the +# if CPU_CORTEX_R block so that SoCs can select which core they are using +# without having to select all the options related to that core. Everything +# else is captured inside the if CPU_CORTEX_R block so they are not exposed +# if one selects a different ARM Cortex Family (Cortex-A or Cortex-M) + + +if CPU_CORTEX_R + +config ARMV7_R + bool + select ATOMIC_OPERATIONS_BUILTIN + select ISA_ARM + help + This option signifies the use of an ARMv7-R processor + implementation. + + From https://developer.arm.com/products/architecture/cpu-architecture/r-profile: + The Armv7-R architecture implements a traditional Arm architecture with + multiple modes and supports a Protected Memory System Architecture + (PMSA) based on a Memory Protection Unit (MPU). It supports the Arm (32) + and Thumb (T32) instruction sets. + +config ARMV7_R_FP + bool + depends on ARMV7_R + help + This option signifies the use of an ARMv7-R processor + implementation supporting the Floating-Point Extension. + +config ARMV7_EXCEPTION_STACK_SIZE + int "Undefined Instruction and Abort stack size (in bytes)" + default 256 + help + This option specifies the size of the stack used by the undefined + instruction and data abort exception handlers. + +config ARMV7_FIQ_STACK_SIZE + int "FIQ stack size (in bytes)" + default 256 + help + This option specifies the size of the stack used by the FIQ handler. + +config ARMV7_SVC_STACK_SIZE + int "SVC stack size (in bytes)" + default 512 + help + This option specifies the size of the stack used by the SVC handler. + +config ARMV7_SYS_STACK_SIZE + int "SYS stack size (in bytes)" + default 1024 + help + This option specifies the size of the stack used by the system mode. + +menu "ARM Cortex-R options" + depends on CPU_CORTEX_R + +config RUNTIME_NMI + default y + +config LDREX_STREX_AVAILABLE + default y + +config GEN_ISR_TABLES + default y + +config GEN_IRQ_VECTOR_TABLE + default n + +endmenu + +endif # CPU_CORTEX_R diff --git a/arch/arm/core/cortex_r/fault.c b/arch/arm/core/cortex_r/fault.c new file mode 100644 index 00000000000..7ae32eb3b02 --- /dev/null +++ b/arch/arm/core/cortex_r/fault.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +/** + * + * @brief Fault handler + * + * This routine is called when fatal error conditions are detected by hardware + * and is responsible only for reporting the error. Once reported, it then + * invokes the user provided routine _SysFatalErrorHandler() which is + * responsible for implementing the error handling policy. + * + * This is a stub for more exception handling code to be added later. + */ +void _Fault(z_arch_esf_t *esf, u32_t exc_return) +{ + z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf); +} + +void z_FaultInit(void) +{ +} diff --git a/arch/arm/core/cortex_r/reboot.c b/arch/arm/core/cortex_r/reboot.c new file mode 100644 index 00000000000..1d38faf421a --- /dev/null +++ b/arch/arm/core/cortex_r/reboot.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2013-2014 Wind River Systems, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief ARM Cortex-R System Control Block interface + */ + +#include +#include +#include + +/** + * + * @brief Reset the system + * + * This routine resets the processor. + * + * @return N/A + */ + +void __weak sys_arch_reboot(int type) +{ + ARG_UNUSED(type); +} diff --git a/arch/arm/core/cortex_r/reset.S b/arch/arm/core/cortex_r/reset.S new file mode 100644 index 00000000000..51388c9f11d --- /dev/null +++ b/arch/arm/core/cortex_r/reset.S @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2013-2014 Wind River Systems, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Reset handler + * + * Reset handler that prepares the system for running C code. + */ + +#include +#include +#include +#include +#include "vector_table.h" + +_ASM_FILE_PROLOGUE + +GTEXT(__reset) +GDATA(_interrupt_stack) +GDATA(_svc_stack) +GDATA(_sys_stack) +GDATA(_fiq_stack) +GDATA(_abort_stack) +GDATA(_undef_stack) + +#define STACK_MARGIN 4 + + +/** + * + * @brief Reset vector + * + * Ran when the system comes out of reset. The processor is in thread mode with + * privileged level. At this point, the main stack pointer (MSP) is already + * pointing to a valid area in SRAM. + * + * When these steps are completed, jump to _PrepC(), which will finish setting + * up the system for running C code. + * + * @return N/A + */ +SECTION_SUBSEC_FUNC(TEXT, _reset_section, __reset) +SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) + mov r0, #0 + mov r1, #0 + mov r2, #0 + mov r3, #0 + mov r4, #0 + mov r5, #0 + mov r6, #0 + mov r7, #0 + mov r8, #0 + mov r9, #0 + mov r10, #0 + mov r11, #0 + mov r12, #0 + mov r14, #0 + + /* lock interrupts: will get unlocked when switch to main task */ + cpsid if + + /* Setup FIQ stack */ + msr CPSR_c, #(MODE_FIQ | I_BIT | F_BIT) + ldr sp, =(_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE - STACK_MARGIN) + + /* Setup IRQ stack */ + msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT) + ldr sp, =(_interrupt_stack + CONFIG_ISR_STACK_SIZE - STACK_MARGIN) + + /* Setup data abort stack */ + msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT) + ldr sp, =(_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \ + STACK_MARGIN) + + /* Setup undefined mode stack */ + msr CPSR_c, #(MODE_UDF | I_BIT | F_BIT) + ldr sp, =(_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \ + STACK_MARGIN) + + /* Setup SVC mode stack */ + msr CPSR_c, #(MODE_SVC | I_BIT | F_BIT) + ldr sp, =(_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE - STACK_MARGIN) + + /* Setup System mode stack */ + msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT) + ldr sp, =(_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE - STACK_MARGIN) + + /* Setup system control register */ + mrc p15, 0, r0, c1, c0, 0 /* SCTLR */ + bic r0, r0, #HIVECS /* Exception vectors from 0-0x1c */ + mcr p15, 0, r0, c1, c0, 0 + +#if defined(CONFIG_WDOG_INIT) + /* board-specific watchdog initialization is necessary */ + bl _WdogInit +#endif + + b _PrepC diff --git a/arch/arm/core/cortex_r/stacks.c b/arch/arm/core/cortex_r/stacks.c new file mode 100644 index 00000000000..445b246dfe0 --- /dev/null +++ b/arch/arm/core/cortex_r/stacks.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +K_THREAD_STACK_DEFINE(_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE); +K_THREAD_STACK_DEFINE(_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); +K_THREAD_STACK_DEFINE(_undef_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); +K_THREAD_STACK_DEFINE(_svc_stack, CONFIG_ARMV7_SVC_STACK_SIZE); +K_THREAD_STACK_DEFINE(_sys_stack, CONFIG_ARMV7_SYS_STACK_SIZE); + +#if defined(CONFIG_INIT_STACKS) +void init_stacks(void) +{ + memset(_fiq_stack, 0xAA, CONFIG_ARMV7_FIQ_STACK_SIZE); + memset(_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE); + memset(_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); + memset(_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE); + memset(&_interrupt_stack, 0xAA, CONFIG_ISR_STACK_SIZE); +} +#endif diff --git a/arch/arm/core/cortex_r/vector_table.S b/arch/arm/core/cortex_r/vector_table.S new file mode 100644 index 00000000000..71ed41949c9 --- /dev/null +++ b/arch/arm/core/cortex_r/vector_table.S @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018 Marvell + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Populated vector table in ROM + */ + +#include +#include +#include "vector_table.h" + +_ASM_FILE_PROLOGUE + +SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table) + ldr pc, =__reset /* offset 0 */ + ldr pc, =__undef_instruction /* undef instruction offset 4 */ + ldr pc, =__svc /* svc offset 8 */ + ldr pc, =__prefetch_abort /* prefetch abort offset 0xc */ + ldr pc, =__data_abort /* data abort offset 0x10 */ + nop /* offset 0x14 */ + ldr pc, =_isr_wrapper /* IRQ offset 0x18 */ + ldr pc, =__nmi /* FIQ offset 0x1c */ diff --git a/arch/arm/core/cortex_r/vector_table.h b/arch/arm/core/cortex_r/vector_table.h new file mode 100644 index 00000000000..f10e0f82dd9 --- /dev/null +++ b/arch/arm/core/cortex_r/vector_table.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018 Marvell + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Definitions for the boot vector table + * + * + * Definitions for the boot vector table. + * + * System exception handler names all have the same format: + * + * __ + * + * No other symbol has the same format, so they are easy to spot. + */ + +#ifndef _VECTOR_TABLE__H_ +#define _VECTOR_TABLE__H_ + +#ifdef _ASMLANGUAGE + +#include +#include +#include + +GTEXT(__start) +GTEXT(_vector_table) + +GTEXT(__nmi) +GTEXT(__undef_instruction) +GTEXT(__svc) +GTEXT(__prefetch_abort) +GTEXT(__data_abort) + +GTEXT(__pendsv) +GTEXT(__reserved) + +GTEXT(_PrepC) +GTEXT(_isr_wrapper) + +#else + +extern void *_vector_table[]; + +#endif /* _ASMLANGUAGE */ + +#endif /* _VECTOR_TABLE__H_ */ diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index 30279cb7133..bcd359210b9 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -24,12 +24,14 @@ GTEXT(z_CpuIdleInit) GTEXT(k_cpu_idle) GTEXT(k_cpu_atomic_idle) +#if defined(CONFIG_CPU_CORTEX_M) #define _SCB_SCR 0xE000ED10 #define _SCB_SCR_SEVONPEND (1 << 4) #define _SCB_SCR_SLEEPDEEP (1 << 2) #define _SCB_SCR_SLEEPONEXIT (1 << 1) #define _SCR_INIT_BITS _SCB_SCR_SEVONPEND +#endif /** * @@ -46,9 +48,11 @@ GTEXT(k_cpu_atomic_idle) */ SECTION_FUNC(TEXT, z_CpuIdleInit) +#if defined(CONFIG_CPU_CORTEX_M) ldr r1, =_SCB_SCR movs.n r2, #_SCR_INIT_BITS str r2, [r1] +#endif bx lr /** @@ -79,7 +83,8 @@ SECTION_FUNC(TEXT, k_cpu_idle) #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_TRACING */ -#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) +#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ + || defined(CONFIG_ARMV7_R) cpsie i #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) /* clear BASEPRI so wfi is awakened by incoming interrupts */ @@ -140,7 +145,8 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle) /* r0: interrupt mask from caller */ -#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) +#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ + || defined(CONFIG_ARMV7_R) /* No BASEPRI, call wfe directly (SEVONPEND set in z_CpuIdleInit()) */ wfe diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index d074ce622df..5247103359c 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -24,6 +24,9 @@ _ASM_FILE_PROLOGUE GTEXT(z_ExcExit) GTEXT(_IntExit) GDATA(_kernel) +#if defined(CONFIG_CPU_CORTEX_R) +GTEXT(__pendsv) +#endif /** * @@ -66,6 +69,9 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit) */ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit) +#if defined(CONFIG_CPU_CORTEX_R) + push {r0, lr} +#endif #ifdef CONFIG_PREEMPT_ENABLED ldr r0, =_kernel @@ -76,10 +82,16 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit) cmp r0, r1 beq _EXIT_EXC +#if defined(CONFIG_CPU_CORTEX_M) /* context switch required, pend the PendSV exception */ ldr r1, =_SCS_ICSR ldr r2, =_SCS_ICSR_PENDSV str r2, [r1] +#elif defined(CONFIG_CPU_CORTEX_R) + push {r0, lr} + bl __pendsv + pop {r0, lr} +#endif _ExcExitWithGdbStub: @@ -96,4 +108,31 @@ _EXIT_EXC: pop {r0, lr} #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_STACK_SENTINEL */ + +#if defined(CONFIG_CPU_CORTEX_M) bx lr +#elif defined(CONFIG_CPU_CORTEX_R) + /* + * r0-r3 are either the values from the thread before it was switched out + * or they are the args to _new_thread for a new thread + */ + pop {r0, lr} + push {r4, r5} + + cmp r0, #RET_FROM_SVC + cps #MODE_SYS + ldmia sp!, {r0-r5} + beq _svc_exit + + cps #MODE_IRQ + b _exc_exit + +_svc_exit: + cps #MODE_SVC + +_exc_exit: + mov r12, r4 + mov lr, r5 + pop {r4, r5} + movs pc, lr +#endif diff --git a/arch/arm/core/fault_s.S b/arch/arm/core/fault_s.S index 472fd01da7f..785f49d01a7 100644 --- a/arch/arm/core/fault_s.S +++ b/arch/arm/core/fault_s.S @@ -30,6 +30,10 @@ GTEXT(__usage_fault) GTEXT(__secure_fault) #endif /* CONFIG_ARM_SECURE_FIRMWARE*/ GTEXT(__debug_monitor) +#elif defined(CONFIG_ARMV7_R) +GTEXT(__undef_instruction) +GTEXT(__prefetch_abort) +GTEXT(__data_abort) #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -70,6 +74,10 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__secure_fault) #endif /* CONFIG_ARM_SECURE_FIRMWARE */ SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor) +#elif defined(CONFIG_ARMV7_R) +SECTION_SUBSEC_FUNC(TEXT,__fault,__undef_instruction) +SECTION_SUBSEC_FUNC(TEXT,__fault,__prefetch_abort) +SECTION_SUBSEC_FUNC(TEXT,__fault,__data_abort) #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -127,6 +135,12 @@ _s_stack_frame_msp: mrs r0, MSP _s_stack_frame_endif: #endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */ +#elif defined(CONFIG_ARMV7_R) + /* + * Pass null for the esf to _Fault for now. A future PR will add better + * exception debug for Cortex-R that subsumes what esf provides. + */ + mov r0, #0 #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -146,6 +160,11 @@ _s_stack_frame_endif: push {r0, lr} bl _Fault +#if defined(CONFIG_CPU_CORTEX_M) pop {r0, pc} +#elif defined(CONFIG_CPU_CORTEX_R) + pop {r0, lr} + subs pc, lr, #8 +#endif .end diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c index dfbea870127..a4ebcc5faee 100644 --- a/arch/arm/core/irq_manage.c +++ b/arch/arm/core/irq_manage.c @@ -16,7 +16,12 @@ #include #include +#if defined(CONFIG_CPU_CORTEX_M) #include +#elif defined(CONFIG_CPU_CORTEX_R) +#include +#include +#endif #include #include #include @@ -27,6 +32,7 @@ extern void __reserved(void); +#if defined(CONFIG_CPU_CORTEX_M) #define NUM_IRQS_PER_REG 32 #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG) #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG) @@ -115,6 +121,74 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) NVIC_SetPriority((IRQn_Type)irq, prio); } +#elif defined(CONFIG_CPU_CORTEX_R) + +/** + * + * @brief Enable an interrupt line + * + * Enable the interrupt. After this call, the CPU will receive interrupts for + * the specified . + * + * @return N/A + */ +void z_arch_irq_enable(unsigned int irq) +{ + struct device *dev = _sw_isr_table[0].arg; + + irq_enable_next_level(dev, (irq >> 8) - 1); +} + +/** + * + * @brief Disable an interrupt line + * + * Disable an interrupt line. After this call, the CPU will stop receiving + * interrupts for the specified . + * + * @return N/A + */ +void z_arch_irq_disable(unsigned int irq) +{ + struct device *dev = _sw_isr_table[0].arg; + + irq_disable_next_level(dev, (irq >> 8) - 1); +} + +/** + * @brief Return IRQ enable state + * + * @param irq IRQ line + * @return interrupt enable state, true or false + */ +int z_arch_irq_is_enabled(unsigned int irq) +{ + struct device *dev = _sw_isr_table[0].arg; + + return irq_is_enabled_next_level(dev); +} + +/** + * @internal + * + * @brief Set an interrupt's priority + * + * The priority is verified if ASSERT_ON is enabled. The maximum number + * of priority levels is a little complex, as there are some hardware + * priority levels which are reserved: three for various types of exceptions, + * and possibly one additional to support zero latency interrupts. + * + * @return N/A + */ +void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags) +{ + struct device *dev = _sw_isr_table[0].arg; + + irq_set_priority_next_level(dev, (irq >> 8) - 1, prio, flags); +} + +#endif + /** * * @brief Spurious interrupt handler @@ -144,7 +218,8 @@ void z_irq_spurious(void *unused) #ifdef CONFIG_SYS_POWER_MANAGEMENT void _arch_isr_direct_pm(void) { -#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) +#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ + || defined(CONFIG_ARMV7_R) unsigned int key; /* irq_lock() does what we wan for this CPU */ @@ -166,7 +241,8 @@ void _arch_isr_direct_pm(void) z_sys_power_save_idle_exit(idle_val); } -#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) +#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ + || defined(CONFIG_ARMV7_R) irq_unlock(key); #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) __asm__ volatile("cpsie i" : : : "memory"); diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index 026b2ea6eab..5b05e8ad6c2 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -41,7 +41,24 @@ GTEXT(_IntExit) */ SECTION_FUNC(TEXT, _isr_wrapper) +#if defined(CONFIG_CPU_CORTEX_M) push {r0,lr} /* r0, lr are now the first items on the stack */ +#elif defined(CONFIG_CPU_CORTEX_R) + /* + * Save away r0-r3 from previous context to the process stack since they + * are clobbered here. Also, save away lr since we may swap processes + * and return to a different thread. + */ + push {r4, r5} + mov r4, r12 + sub r5, lr, #4 + + cps #MODE_SYS + stmdb sp!, {r0-r5} + cps #MODE_IRQ + + pop {r4, r5} +#endif #ifdef CONFIG_EXECUTION_BENCHMARKING bl read_timer_start_of_isr @@ -82,6 +99,13 @@ _idle_state_cleared: /* clear kernel idle state */ strne r1, [r2, #_kernel_offset_to_idle] blne z_sys_power_save_idle_exit +#elif defined(CONFIG_ARMV7_R) + beq _idle_state_cleared + movs r1, #0 + /* clear kernel idle state */ + str r1, [r2, #_kernel_offset_to_idle] + bl z_sys_power_save_idle_exit +_idle_state_cleared: #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -89,7 +113,9 @@ _idle_state_cleared: cpsie i /* re-enable interrupts (PRIMASK = 0) */ #endif +#if defined(CONFIG_CPU_CORTEX_M) mrs r0, IPSR /* get exception number */ +#endif #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) ldr r1, =16 subs r0, r1 /* get IRQ number */ @@ -97,6 +123,12 @@ _idle_state_cleared: #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) sub r0, r0, #16 /* get IRQ number */ lsl r0, r0, #3 /* table is 8-byte wide */ +#elif defined(CONFIG_ARMV7_R) + /* + * Cortex-R only has one IRQ line so the main handler will be at + * offset 0 of the table. + */ + mov r0, #0 #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -128,12 +160,23 @@ _idle_state_cleared: mov lr, r3 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) pop {r0, lr} +#elif defined(CONFIG_ARMV7_R) + /* + * r0,lr were saved on the process stack since a swap could + * happen. exc_exit will handle getting those values back + * from the process stack to return to the correct location + * so there is no need to do anything here. + */ #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ +#if defined(CONFIG_CPU_CORTEX_R) + mov r0, #RET_FROM_IRQ +#endif + /* Use 'bx' instead of 'b' because 'bx' can jump further, and use * 'bx' instead of 'blx' because exception return is done in * _IntExit() */ - ldr r0, =_IntExit - bx r0 + ldr r1, =_IntExit + bx r1 diff --git a/arch/arm/core/offsets/offsets.c b/arch/arm/core/offsets/offsets.c index a7747c1796d..b58945c07b6 100644 --- a/arch/arm/core/offsets/offsets.c +++ b/arch/arm/core/offsets/offsets.c @@ -65,6 +65,10 @@ GEN_OFFSET_SYM(_callee_saved_t, v6); GEN_OFFSET_SYM(_callee_saved_t, v7); GEN_OFFSET_SYM(_callee_saved_t, v8); GEN_OFFSET_SYM(_callee_saved_t, psp); +#if defined(CONFIG_CPU_CORTEX_R) +GEN_OFFSET_SYM(_callee_saved_t, spsr); +GEN_OFFSET_SYM(_callee_saved_t, lr); +#endif /* size of the entire preempt registers structure */ diff --git a/arch/arm/core/prep_c.c b/arch/arm/core/prep_c.c index 83e97d0817c..7154d53aab2 100644 --- a/arch/arm/core/prep_c.c +++ b/arch/arm/core/prep_c.c @@ -21,7 +21,12 @@ #include #include #include +#include +#if defined(CONFIG_CPU_CORTEX_M) #include +#elif defined(CONFIG_ARMV7_R) +#include +#endif #if defined(__GNUC__) /* @@ -150,9 +155,6 @@ extern FUNC_NORETURN void z_cstart(void); * * @return N/A */ - -extern void z_IntLibInit(void); - #ifdef CONFIG_BOOT_TIME_MEASUREMENT extern u64_t __start_time_stamp; #endif @@ -162,6 +164,9 @@ void _PrepC(void) enable_floating_point(); z_bss_zero(); z_data_copy(); +#if defined(CONFIG_ARMV7_R) && defined(CONFIG_INIT_STACKS) + init_stacks(); +#endif #ifdef CONFIG_BOOT_TIME_MEASUREMENT __start_time_stamp = 0U; #endif diff --git a/arch/arm/core/swap.c b/arch/arm/core/swap.c index 26792c3620b..7759b1017fb 100644 --- a/arch/arm/core/swap.c +++ b/arch/arm/core/swap.c @@ -55,11 +55,16 @@ int __swap(int key) _current->arch.basepri = key; _current->arch.swap_return_value = _k_neg_eagain; +#if defined(CONFIG_CPU_CORTEX_M) /* set pending bit to make sure we will take a PendSV exception */ SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; /* clear mask or enable all irqs to take a pendsv */ irq_unlock(0); +#elif defined(CONFIG_CPU_CORTEX_R) + cortex_r_svc(); + irq_unlock(key); +#endif /* Context switch is performed here. Returning implies the * thread has been context-switched-in again. diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S index 52549a710e5..636d95dc8c4 100644 --- a/arch/arm/core/swap_helper.S +++ b/arch/arm/core/swap_helper.S @@ -64,7 +64,9 @@ SECTION_FUNC(TEXT, __pendsv) add r0, r2 /* save callee-saved + psp in thread */ +#if defined(CONFIG_CPU_CORTEX_M) mrs ip, PSP +#endif #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) /* Store current r4-r7 */ @@ -99,6 +101,10 @@ out_fp_active: out_fp_endif: str r0, [r2, #_thread_offset_to_mode] #endif /* CONFIG_FP_SHARING */ +#elif defined(CONFIG_ARMV7_R) + /* Store rest of process context */ + mrs r12, SPSR + stm r0, {r4-r12,sp,lr}^ #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -110,6 +116,11 @@ out_fp_endif: movs.n r0, #_EXC_IRQ_DEFAULT_PRIO msr BASEPRI, r0 isb /* Make the effect of disabling interrupts be realized immediately */ +#elif defined(CONFIG_ARMV7_R) + /* + * Interrupts are still disabled from __swap so empty clause + * here to avoid the preprocessor error below + */ #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -121,8 +132,10 @@ out_fp_endif: * to pend PendSV have been taken with the current kernel * state and this is what we're handling currently. */ +#if defined(CONFIG_CPU_CORTEX_M) ldr v4, =_SCS_ICSR ldr v3, =_SCS_ICSR_UNPENDSV +#endif /* _kernel is still in r1 */ @@ -141,7 +154,9 @@ out_fp_endif: */ /* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */ +#if defined(CONFIG_CPU_CORTEX_M) str v3, [v4, #0] +#endif /* Restore previous interrupt disable state (irq_lock key) */ #if (defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M0)) && \ @@ -158,7 +173,7 @@ out_fp_endif: str r3, [r4] #else ldr r0, [r2, #_thread_offset_to_basepri] - movs.n r3, #0 + movs r3, #0 str r3, [r2, #_thread_offset_to_basepri] #endif @@ -254,6 +269,19 @@ in_fp_endif: /* load callee-saved + psp from thread */ add r0, r2, #_thread_offset_to_callee_saved ldmia r0, {v1-v8, ip} +#elif defined(CONFIG_ARMV7_R) +_thread_irq_disabled: + /* load _kernel into r1 and current k_thread into r2 */ + ldr r1, =_kernel + ldr r2, [r1, #_kernel_offset_to_current] + + /* addr of callee-saved regs in thread in r0 */ + ldr r0, =_thread_offset_to_callee_saved + add r0, r2 + + /* restore r4-r12 for incoming thread, plus system sp and lr */ + ldm r0, {r4-r12,sp,lr}^ + msr SPSR_fsxc, r12 #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -264,7 +292,9 @@ in_fp_endif: msr PSPLIM, r0 #endif /* CONFIG_BUILTIN_STACK_GUARD */ +#if defined(CONFIG_CPU_CORTEX_M) msr PSP, ip +#endif #ifdef CONFIG_BUILTIN_STACK_GUARD /* r2 contains k_thread */ @@ -486,7 +516,80 @@ valid_syscall_id: bx lr #endif +#elif defined(CONFIG_ARMV7_R) +SECTION_FUNC(TEXT, __svc) + /* + * Switch to system mode to store r0-r3 to the process stack pointer. + * Save r12 and the lr as we will be swapping in another process and + * returning to a different location. + */ + push {r4, r5} + mov r4, r12 + mov r5, lr + + cps #MODE_SYS + stmdb sp!, {r0-r5} + cps #MODE_SVC + + pop {r4, r5} + + /* Get SVC number */ + mrs r0, spsr + tst r0, #0x20 + + ldreq r1, [lr, #-4] + biceq r1, #0xff000000 + beq demux + + ldr r1, [lr, #-2] + bic r1, #0xff00 + + /* + * grab service call number: + * 0: context switch + * 1: irq_offload (if configured) + * 2: kernel panic or oops (software generated fatal exception) + * Planned implementation of system calls for memory protection will + * expand this case. + */ +demux: + cmp r1, #_SVC_CALL_CONTEXT_SWITCH + beq _context_switch + + cmp r1, #_SVC_CALL_RUNTIME_EXCEPT + beq _oops + +#if CONFIG_IRQ_OFFLOAD + push {r0, lr} + blx z_irq_do_offload /* call C routine which executes the offload */ + pop {r0, lr} + + /* exception return is done in _IntExit() */ + mov r0, #RET_FROM_SVC + b _IntExit +#endif + +_context_switch: + /* handler mode exit, to PendSV */ + push {r0, lr} + bl __pendsv + pop {r0, lr} + + mov r0, #RET_FROM_SVC + b _IntExit + +_oops: + push {r0, lr} + blx z_do_kernel_oops + pop {r0, lr} + cpsie i + movs pc, lr + +GTEXT(cortex_r_svc) +SECTION_FUNC(TEXT, cortex_r_svc) + svc #0 + bx lr + #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ - diff --git a/arch/arm/core/thread.c b/arch/arm/core/thread.c index 742482abea5..42812a62366 100644 --- a/arch/arm/core/thread.c +++ b/arch/arm/core/thread.c @@ -138,8 +138,10 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, pInitCtx->basic.pc = (u32_t)z_thread_entry; #endif +#if defined(CONFIG_CPU_CORTEX_M) /* force ARM mode by clearing LSB of address */ pInitCtx->basic.pc &= 0xfffffffe; +#endif pInitCtx->basic.a1 = (u32_t)pEntry; pInitCtx->basic.a2 = (u32_t)parameter1; @@ -149,6 +151,11 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack, 0x01000000UL; /* clear all, thumb bit is 1, even if RO */ thread->callee_saved.psp = (u32_t)pInitCtx; +#if defined(CONFIG_CPU_CORTEX_R) + pInitCtx->basic.lr = (u32_t)pInitCtx->basic.pc; + thread->callee_saved.spsr = A_BIT | T_BIT | MODE_SYS; + thread->callee_saved.lr = (u32_t)pInitCtx->basic.pc; +#endif thread->arch.basepri = 0; #if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING) diff --git a/arch/arm/include/cortex_r/exc.h b/arch/arm/include/cortex_r/exc.h new file mode 100644 index 00000000000..be1a06e061a --- /dev/null +++ b/arch/arm/include/cortex_r/exc.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Exception/interrupt context helpers for Cortex-R CPUs + * + * Exception/interrupt context helpers. + */ + +#ifndef _ARM_CORTEXR_ISR__H_ +#define _ARM_CORTEXR_ISR__H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _ASMLANGUAGE + +/* nothing */ + +#else + +#include + +#ifdef CONFIG_IRQ_OFFLOAD +extern volatile irq_offload_routine_t offload_routine; +#endif + +/** + * + * @brief Find out if running in an ISR context + * + * Check the CPSR mode bits to see if we are in IRQ or FIQ mode + * + * @return 1 if in ISR, 0 if not. + */ +static ALWAYS_INLINE bool z_IsInIsr(void) +{ + unsigned int status; + + __asm__ volatile( + " mrs %0, cpsr" + : "=r" (status) : : "memory", "cc"); + status &= MODE_MASK; + + return (status == MODE_FIQ) || (status == MODE_IRQ); +} + +/** + * @brief Setup system exceptions + * + * Enable fault exceptions. + * + * @return N/A + */ +static ALWAYS_INLINE void z_ExcSetup(void) +{ +} + +/** + * @brief Clear Fault exceptions + * + * Clear out exceptions for Mem, Bus, Usage and Hard Faults + * + * @return N/A + */ +static ALWAYS_INLINE void z_clearfaults(void) +{ +} + +extern void cortex_r_svc(void); + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + + +#endif /* _ARM_CORTEXRM_ISR__H_ */ diff --git a/arch/arm/include/cortex_r/stack.h b/arch/arm/include/cortex_r/stack.h new file mode 100644 index 00000000000..200da41be93 --- /dev/null +++ b/arch/arm/include/cortex_r/stack.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Stack helpers for Cortex-R CPUs + * + * Stack helper functions. + */ + +#ifndef _ARM_CORTEXR_STACK__H_ +#define _ARM_CORTEXR_STACK__H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _ASMLANGUAGE + +/* nothing */ + +#else + +extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE); + +extern void init_stacks(void); + +/** + * + * @brief Setup interrupt stack + * + * On Cortex-R, the interrupt stack is set up by reset.S + * + * @return N/A + */ +static ALWAYS_INLINE void z_InterruptStackSetup(void) +{ +} + +#endif /* _ASMLANGUAGE */ + +#ifdef __cplusplus +} +#endif + +#endif /* _ARM_CORTEXR_STACK__H_ */ diff --git a/arch/arm/include/kernel_arch_data.h b/arch/arm/include/kernel_arch_data.h index f025bcd1747..37d822fa37c 100644 --- a/arch/arm/include/kernel_arch_data.h +++ b/arch/arm/include/kernel_arch_data.h @@ -52,6 +52,9 @@ typedef struct __basic_sf _basic_sf_t; #ifdef CONFIG_CPU_CORTEX_M #include #include +#elif defined(CONFIG_CPU_CORTEX_R) +#include +#include #endif #ifndef _ASMLANGUAGE diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h index b6e63f73aa2..26ed8ac6d43 100644 --- a/arch/arm/include/kernel_arch_func.h +++ b/arch/arm/include/kernel_arch_func.h @@ -109,8 +109,12 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread, */ __asm__ volatile ( "mov r0, %0 \n\t" /* Store _main in R0 */ +#if defined(CONFIG_CPU_CORTEX_M) "msr PSP, %1 \n\t" /* __set_PSP(start_of_main_stack) */ -#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) +#endif + +#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \ + || defined(CONFIG_ARMV7_R) "cpsie i \n\t" /* __enable_irq() */ #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) "cpsie if \n\t" /* __enable_irq(); __enable_fault_irq() */ diff --git a/arch/arm/include/kernel_arch_thread.h b/arch/arm/include/kernel_arch_thread.h index f8691e85b56..a359e8f816f 100644 --- a/arch/arm/include/kernel_arch_thread.h +++ b/arch/arm/include/kernel_arch_thread.h @@ -31,7 +31,13 @@ struct _callee_saved { u32_t v6; /* r9 */ u32_t v7; /* r10 */ u32_t v8; /* r11 */ +#if defined(CONFIG_CPU_CORTEX_R) + u32_t spsr;/* r12 */ u32_t psp; /* r13 */ + u32_t lr; /* r14 */ +#else + u32_t psp; /* r13 */ +#endif }; typedef struct _callee_saved _callee_saved_t; diff --git a/dts/arm/armv7-r.dtsi b/dts/arm/armv7-r.dtsi new file mode 100644 index 00000000000..30ce1e237e1 --- /dev/null +++ b/dts/arm/armv7-r.dtsi @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ +#include "skeleton.dtsi" + +/ { + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "Cortex-R"; + reg = <0>; + }; + + core_intc: core_intc@0 { + compatible = "armv7-r,core-intc"; + reg = <0x00 0x4>; + interrupt-controller; + #interrupt-cells = <2>; + }; + }; + + soc { + #address-cells = <1>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges; + }; +}; + diff --git a/include/arch/arm/arch.h b/include/arch/arm/arch.h index bbce87d8b48..34a9ee64613 100644 --- a/include/arch/arm/arch.h +++ b/include/arch/arm/arch.h @@ -35,6 +35,8 @@ #ifdef CONFIG_CPU_CORTEX_M #include #include +#elif defined(CONFIG_CPU_CORTEX_R) +#include #endif #ifdef __cplusplus diff --git a/include/arch/arm/asm_inline_gcc.h b/include/arch/arm/asm_inline_gcc.h index 148444846cb..122104e29bc 100644 --- a/include/arch/arm/asm_inline_gcc.h +++ b/include/arch/arm/asm_inline_gcc.h @@ -90,6 +90,12 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void) : "=r"(key), "=r"(tmp) : "i"(_EXC_IRQ_DEFAULT_PRIO) : "memory"); +#elif defined(CONFIG_ARMV7_R) + __asm__ volatile("mrs %0, cpsr;" + "cpsid i" + : "=r" (key) + : + : "memory", "cc"); #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ @@ -132,6 +138,11 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key) "msr BASEPRI, %0;" "isb;" : : "r"(key) : "memory"); +#elif defined(CONFIG_ARMV7_R) + __asm__ volatile("msr cpsr_c, %0" + : + : "r" (key) + : "memory", "cc"); #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ diff --git a/include/arch/arm/cortex_r/cpu.h b/include/arch/arm/cortex_r/cpu.h new file mode 100644 index 00000000000..9ce788a511b --- /dev/null +++ b/include/arch/arm/cortex_r/cpu.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018 Lexmark International, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _CORTEX_R_CPU_H +#define _CORTEX_R_CPU_H + +#define MODE_USR 0x10 +#define MODE_FIQ 0x11 +#define MODE_IRQ 0x12 +#define MODE_SVC 0x13 +#define MODE_ABT 0x17 +#define MODE_UDF 0x1b +#define MODE_SYS 0x1f +#define MODE_MASK 0x1f + +#define A_BIT (1 << 8) +#define I_BIT (1 << 7) +#define F_BIT (1 << 6) +#define T_BIT (1 << 5) + +#define HIVECS (1 << 13) + +#define RET_FROM_SVC 0 +#define RET_FROM_IRQ 1 + +#define __ISB() __asm__ volatile ("isb sy" : : : "memory") +#define __DMB() __asm__ volatile ("dmb sy" : : : "memory") + +#endif diff --git a/include/arch/arm/cortex_r/scripts/app_data_alignment.ld b/include/arch/arm/cortex_r/scripts/app_data_alignment.ld new file mode 100644 index 00000000000..a9257f14ee8 --- /dev/null +++ b/include/arch/arm/cortex_r/scripts/app_data_alignment.ld @@ -0,0 +1,9 @@ +/* + * Copyright (c) 2017 Linaro Limited. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Set initial alignment to the 32 byte minimum for all MPUs */ +_app_data_align = 32; +. = ALIGN(32); diff --git a/include/arch/arm/cortex_r/scripts/linker.ld b/include/arch/arm/cortex_r/scripts/linker.ld new file mode 100644 index 00000000000..e4a8e9fdaa7 --- /dev/null +++ b/include/arch/arm/cortex_r/scripts/linker.ld @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2013-2014 Wind River Systems, Inc. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * @file + * @brief Linker command/script file + * + * Linker script for the Cortex-R platforms. + */ + +#define _LINKER +#define _ASMLANGUAGE + +#include +#include +#include + +#include +#include + +/* physical address of RAM */ +#ifdef CONFIG_XIP + #define ROMABLE_REGION FLASH + #define RAMABLE_REGION SRAM +#else + #define ROMABLE_REGION SRAM + #define RAMABLE_REGION SRAM +#endif + +#if defined(CONFIG_XIP) + #define _DATA_IN_ROM __data_rom_start +#else + #define _DATA_IN_ROM +#endif + +#if !defined(SKIP_TO_KINETIS_FLASH_CONFIG) + #define SKIP_TO_KINETIS_FLASH_CONFIG +#endif + +#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0) +#define ROM_ADDR RAM_ADDR +#else +#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET) +#endif + +#ifdef CONFIG_TI_CCFG_PRESENT + #define CCFG_SIZE 88 + #define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET - \ + CCFG_SIZE) + #define CCFG_ADDR (ROM_ADDR + ROM_SIZE) +#else +#if CONFIG_FLASH_LOAD_SIZE > 0 + #define ROM_SIZE CONFIG_FLASH_LOAD_SIZE +#else + #define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET) +#endif +#endif + +#if defined(CONFIG_XIP) + #if defined(CONFIG_IS_BOOTLOADER) + #define RAM_SIZE (CONFIG_BOOTLOADER_SRAM_SIZE * 1K) + #define RAM_ADDR (CONFIG_SRAM_BASE_ADDRESS + \ + (CONFIG_SRAM_SIZE * 1K - RAM_SIZE)) + #else + #define RAM_SIZE (CONFIG_SRAM_SIZE * 1K) + #define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS + #endif +#else + #define RAM_SIZE (CONFIG_SRAM_SIZE * 1K - CONFIG_BOOTLOADER_SRAM_SIZE * 1K) + #define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS +#endif + +/* Set alignment to CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE + * to make linker section alignment comply with MPU granularity. + */ +#if defined(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE) +_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE; +#else +/* If building without MPU support, use default 4-byte alignment. */ +_region_min_align = 4; +#endif + +#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT) +#define MPU_ALIGN(region_size) \ + . = ALIGN(_region_min_align); \ + . = ALIGN( 1 << LOG2CEIL(region_size)) +#else +#define MPU_ALIGN(region_size) \ + . = ALIGN(_region_min_align) +#endif + +MEMORY + { + FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE +#ifdef CONFIG_TI_CCFG_PRESENT + FLASH_CCFG (rwx): ORIGIN = CCFG_ADDR, LENGTH = CCFG_SIZE +#endif +#ifdef DT_CCM_BASE_ADDRESS + CCM (rw) : ORIGIN = DT_CCM_BASE_ADDRESS, LENGTH = DT_CCM_SIZE * 1K +#endif + SRAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE +#ifdef CONFIG_BT_STM32_IPM + SRAM1 (rw) : ORIGIN = RAM1_ADDR, LENGTH = RAM1_SIZE + SRAM2 (rw) : ORIGIN = RAM2_ADDR, LENGTH = RAM2_SIZE +#endif + /* Used by and documented in include/linker/intlist.ld */ + IDT_LIST (wx) : ORIGIN = (RAM_ADDR + RAM_SIZE), LENGTH = 2K + } + +ENTRY(CONFIG_KERNEL_ENTRY) + +SECTIONS + { + +#include + + /* + * .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose', + * before text section. + */ + SECTION_PROLOGUE(.plt,,) + { + *(.plt) + } + + SECTION_PROLOGUE(.iplt,,) + { + *(.iplt) + } + + GROUP_START(ROMABLE_REGION) + + _image_rom_start = ROM_ADDR; + + SECTION_PROLOGUE(_TEXT_SECTION_NAME,,) + { +#ifdef CONFIG_CC3220SF_DEBUG + /* Add CC3220SF flash header to disable flash verification */ + . = 0x0; + KEEP(*(.dbghdr)) + KEEP(*(".dbghdr.*")) +#endif + +#ifdef CONFIG_NXP_IMX_RT_BOOT_HEADER + KEEP(*(.boot_hdr.conf)) + . = CONFIG_IMAGE_VECTOR_TABLE_OFFSET; + KEEP(*(.boot_hdr.ivt)) + KEEP(*(.boot_hdr.data)) +#ifdef CONFIG_DEVICE_CONFIGURATION_DATA + KEEP(*(.boot_hdr.dcd_data)) +#endif +#endif + + . = CONFIG_TEXT_SECTION_OFFSET; + +#if defined(CONFIG_SW_VECTOR_RELAY) + KEEP(*(.vector_relay_table)) + KEEP(*(".vector_relay_table.*")) + KEEP(*(.vector_relay_handler)) + KEEP(*(".vector_relay_handler.*")) +#endif + + _vector_start = .; + KEEP(*(.exc_vector_table)) + KEEP(*(".exc_vector_table.*")) + + KEEP(*(IRQ_VECTOR_TABLE)) + + KEEP(*(.vectors)) + + KEEP(*(.openocd_dbg)) + KEEP(*(".openocd_dbg.*")) + + /* Kinetis has to write 16 bytes at 0x400 */ + SKIP_TO_KINETIS_FLASH_CONFIG + KEEP(*(.kinetis_flash_config)) + KEEP(*(".kinetis_flash_config.*")) + + _vector_end = .; + } GROUP_LINK_IN(ROMABLE_REGION) + +#ifdef CONFIG_CODE_DATA_RELOCATION + +#include + +#endif /* CONFIG_CODE_DATA_RELOCATION */ + + SECTION_PROLOGUE(_TEXT_SECTION_NAME_2,,) + { + _image_text_start = .; + *(.text) + *(".text.*") + *(.gnu.linkonce.t.*) + + /* + * These are here according to 'arm-zephyr-elf-ld --verbose', + * after .gnu.linkonce.t.* + */ + *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx) + +#include +#include + + } GROUP_LINK_IN(ROMABLE_REGION) + + _image_text_end = .; + +#if defined (CONFIG_CPLUSPLUS) + SECTION_PROLOGUE(.ARM.extab,,) + { + /* + * .ARM.extab section containing exception unwinding information. + */ + *(.ARM.extab* .gnu.linkonce.armextab.*) + } GROUP_LINK_IN(ROMABLE_REGION) +#endif + + SECTION_PROLOGUE(.ARM.exidx,,) + { + /* + * This section, related to stack and exception unwinding, is placed + * explicitly to prevent it from being shared between multiple regions. + * It must be defined for gcc to support 64-bit math and avoid + * section overlap. + */ + __exidx_start = .; +#if defined (__GCC_LINKER_CMD__) + *(.ARM.exidx* gnu.linkonce.armexidx.*) +#endif + __exidx_end = .; + } GROUP_LINK_IN(ROMABLE_REGION) + + _image_rodata_start = .; + +#include + + SECTION_PROLOGUE(_RODATA_SECTION_NAME,,) + { + *(.rodata) + *(".rodata.*") + *(.gnu.linkonce.r.*) + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include +#ifdef CONFIG_SOC_RODATA_LD +#include +#endif + +#ifdef CONFIG_CUSTOM_RODATA_LD +/* Located in project source directory */ +#include +#endif + +#include +#include + + /* + * For XIP images, in order to avoid the situation when __data_rom_start + * is 32-bit aligned, but the actual data is placed right after rodata + * section, which may not end exactly at 32-bit border, pad rodata + * section, so __data_rom_start points at data and it is 32-bit aligned. + * + * On non-XIP images this may enlarge image size up to 3 bytes. This + * generally is not an issue, since modern ROM and FLASH memory is + * usually 4k aligned. + */ + . = ALIGN(4); + } GROUP_LINK_IN(ROMABLE_REGION) + + _image_rodata_end = .; + MPU_ALIGN(_image_rodata_end -_image_rom_start); + _image_rom_end = .; + + GROUP_END(ROMABLE_REGION) + +/* Some TI SoCs have a special configuration footer, at the end of flash. */ +#ifdef CONFIG_TI_CCFG_PRESENT + SECTION_PROLOGUE(.ti_ccfg,,) + { + KEEP(*(TI_CCFG)) + } > FLASH_CCFG +#endif + + /* + * These are here according to 'arm-zephyr-elf-ld --verbose', + * before data section. + */ + SECTION_PROLOGUE(.got,,) + { + *(.got.plt) + *(.igot.plt) + *(.got) + *(.igot) + } + + GROUP_START(RAMABLE_REGION) + + . = RAM_ADDR; + /* Align the start of image SRAM with the + * minimum granularity required by MPU. + */ + . = ALIGN(_region_min_align); + _image_ram_start = .; + +#if defined(CONFIG_SOC_SERIES_STM32F0X) && !defined(CONFIG_IS_BOOTLOADER) + /* Must be first in ramable region */ + SECTION_PROLOGUE(.st_stm32f0x_vt,(NOLOAD),) + { + _ram_vector_start = .; + . += _vector_end - _vector_start; + _ram_vector_end = .; + } GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION) +#endif + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include + +#if defined(CONFIG_USERSPACE) +#define APP_SHARED_ALIGN . = ALIGN(_region_min_align); +#define SMEM_PARTITION_ALIGN MPU_ALIGN + +#include + + _app_smem_size = _app_smem_end - _app_smem_start; + _app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME); +#endif /* CONFIG_USERSPACE */ + + SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),) + { + /* + * For performance, BSS section is assumed to be 4 byte aligned and + * a multiple of 4 bytes + */ + . = ALIGN(4); + __bss_start = .; + __kernel_ram_start = .; + + *(.bss) + *(".bss.*") + *(COMMON) + *(".kernel_bss.*") + +#ifdef CONFIG_CODE_DATA_RELOCATION +#include +#endif + + /* + * As memory is cleared in words only, it is simpler to ensure the BSS + * section ends on a 4 byte boundary. This wastes a maximum of 3 bytes. + */ + __bss_end = ALIGN(4); + } GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION) + + SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),) + { + /* + * This section is used for non-initialized objects that + * will not be cleared during the boot process. + */ + *(.noinit) + *(".noinit.*") + *(".kernel_noinit.*") + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include +#ifdef CONFIG_SOC_NOINIT_LD +#include +#endif + + } GROUP_LINK_IN(RAMABLE_REGION) + + SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,) + { + __data_ram_start = .; + *(.data) + *(".data.*") + *(".kernel.*") + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include +#ifdef CONFIG_SOC_RWDATA_LD +#include +#endif + +#ifdef CONFIG_CUSTOM_RWDATA_LD +/* Located in project source directory */ +#include +#endif + +#ifdef CONFIG_CODE_DATA_RELOCATION +#include +#endif + + } GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION) + + __data_rom_start = LOADADDR(_DATA_SECTION_NAME); + +#include +#include +#include + +#include + + __data_ram_end = .; + + + /* Define linker symbols */ + + _image_ram_end = .; + _end = .; /* end of image */ + + __kernel_ram_end = RAM_ADDR + RAM_SIZE; + __kernel_ram_size = __kernel_ram_end - __kernel_ram_start; + + GROUP_END(RAMABLE_REGION) + +#ifdef CONFIG_CUSTOM_SECTIONS_LD +/* Located in project source directory */ +#include +#endif + +/* Located in generated directory. This file is populated by the + * zephyr_linker_sources() Cmake function. + */ +#include + +#include + + SECTION_PROLOGUE(.ARM.attributes, 0,) + { + KEEP(*(.ARM.attributes)) + KEEP(*(.gnu.attributes)) + } + +#if defined(CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS) +#if CONFIG_ARM_NSC_REGION_BASE_ADDRESS != 0 + #define NSC_ALIGN . = ABSOLUTE(CONFIG_ARM_NSC_REGION_BASE_ADDRESS) +#elif defined(CONFIG_CPU_HAS_NRF_IDAU) + /* The nRF9160 needs the NSC region to be at the end of a 32 kB region. */ + #define NSC_ALIGN . = ALIGN(0x8000) - (1 << LOG2CEIL(__sg_size)) +#else + #define NSC_ALIGN . = ALIGN(4) +#endif + +#ifdef CONFIG_CPU_HAS_NRF_IDAU + #define NSC_ALIGN_END . = ALIGN(0x8000) +#else + #define NSC_ALIGN_END . = ALIGN(4) +#endif + +SECTION_PROLOGUE(.gnu.sgstubs,,) +{ + NSC_ALIGN; + __sg_start = .; + /* No input section necessary, since the Secure Entry Veneers are + automatically placed after the .gnu.sgstubs output section. */ +} GROUP_LINK_IN(ROMABLE_REGION) +__sg_end = .; +__sg_size = __sg_end - __sg_start; +NSC_ALIGN_END; +__nsc_size = . - __sg_start; + +#ifdef CONFIG_CPU_HAS_NRF_IDAU + ASSERT(1 << LOG2CEIL(0x8000 - (__sg_start % 0x8000)) + == (0x8000 - (__sg_start % 0x8000)) + && (0x8000 - (__sg_start % 0x8000)) >= 32 + && (0x8000 - (__sg_start % 0x8000)) <= 4096, + "The Non-Secure Callable region size must be a power of 2 \ +between 32 and 4096 bytes.") +#endif +#endif /* CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS */ + +/* Must be last in romable region */ +SECTION_PROLOGUE(.last_section,(NOLOAD),) +{ +} GROUP_LINK_IN(ROMABLE_REGION) + +/* To provide the image size as a const expression, + * calculate this value here. */ +_flash_used = LOADADDR(.last_section) - _image_rom_start; + + } diff --git a/include/arch/arm/error.h b/include/arch/arm/error.h index ad563671415..35a66eabbe8 100644 --- a/include/arch/arm/error.h +++ b/include/arch/arm/error.h @@ -53,6 +53,8 @@ extern "C" { : "memory"); \ CODE_UNREACHABLE; \ } while (false) +#elif defined(CONFIG_ARMV7_R) +/* Pick up the default definition in kernel.h for now */ #else #error Unknown ARM architecture #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ diff --git a/include/arch/arm/irq.h b/include/arch/arm/irq.h index c365e23611d..6eded9e5795 100644 --- a/include/arch/arm/irq.h +++ b/include/arch/arm/irq.h @@ -34,6 +34,14 @@ extern int z_arch_irq_is_enabled(unsigned int irq); extern void _IntExit(void); +#if defined(CONFIG_ARMV7_R) +static ALWAYS_INLINE void z_IntLibInit(void) +{ +} +#else +extern void z_IntLibInit(void); +#endif + /* macros convert value of it's argument to a string */ #define DO_TOSTR(s) #s #define TOSTR(s) DO_TOSTR(s) diff --git a/include/arch/arm/syscall.h b/include/arch/arm/syscall.h index d774a4a1c2b..b6ce4c0dc0e 100644 --- a/include/arch/arm/syscall.h +++ b/include/arch/arm/syscall.h @@ -16,6 +16,7 @@ #ifndef ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ #define ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ +#define _SVC_CALL_CONTEXT_SWITCH 0 #define _SVC_CALL_IRQ_OFFLOAD 1 #define _SVC_CALL_RUNTIME_EXCEPT 2 #define _SVC_CALL_SYSTEM_CALL 3 diff --git a/subsys/testsuite/include/test_asm_inline_gcc.h b/subsys/testsuite/include/test_asm_inline_gcc.h index 518952eebd3..28d8e3f0a24 100644 --- a/subsys/testsuite/include/test_asm_inline_gcc.h +++ b/subsys/testsuite/include/test_asm_inline_gcc.h @@ -36,6 +36,12 @@ static inline void timestamp_serialize(void) /* isb is available in all Cortex-M */ __ISB(); } +#elif defined(CONFIG_CPU_CORTEX_R) +#include +static inline void timestamp_serialize(void) +{ + __ISB(); +} #elif defined(CONFIG_CPU_ARCV2) #define timestamp_serialize() #elif defined(CONFIG_ARCH_POSIX)