arch: arm: Add Cortex-R support

This adds initial Cortex-R support for interrupts and context switching.

Signed-off-by: Bradley Bolen <bbolen@lexmark.com>
This commit is contained in:
Bradley Bolen 2018-06-25 09:15:14 -04:00 committed by Ioannis Glaropoulos
commit c30a71df95
37 changed files with 1516 additions and 82 deletions

View file

@ -18,6 +18,7 @@
/arch/arm/ @MaureenHelm @galak @ioannisg /arch/arm/ @MaureenHelm @galak @ioannisg
/arch/arm/core/cortex_m/cmse/ @ioannisg /arch/arm/core/cortex_m/cmse/ @ioannisg
/arch/arm/include/cortex_m/cmse.h @ioannisg /arch/arm/include/cortex_m/cmse.h @ioannisg
/arch/arm/core/cortex_r/ @MaureenHelm @galak @ioannisg @bbolen
/arch/common/ @andrewboie @ioannisg @andyross /arch/common/ @andrewboie @ioannisg @andyross
/arch/x86_64/ @andyross /arch/x86_64/ @andyross
/soc/arc/snps_*/ @vonhust @ruuddw /soc/arc/snps_*/ @vonhust @ruuddw
@ -183,6 +184,7 @@
/dts/riscv/rv32m1* @MaureenHelm /dts/riscv/rv32m1* @MaureenHelm
/dts/riscv/riscv32-fe310.dtsi @nategraff-sifive /dts/riscv/riscv32-fe310.dtsi @nategraff-sifive
/dts/riscv/riscv32-litex-vexriscv.dtsi @mateusz-holenko @kgugala @pgielda /dts/riscv/riscv32-litex-vexriscv.dtsi @mateusz-holenko @kgugala @pgielda
/dts/arm/armv7-r.dtsi @bbolen
/dts/xtensa/xtensa.dtsi @ydamigos /dts/xtensa/xtensa.dtsi @ydamigos
/dts/bindings/ @galak /dts/bindings/ @galak
/dts/bindings/can/ @alexanderwachter /dts/bindings/can/ @alexanderwachter

View file

@ -33,3 +33,5 @@ add_subdirectory_ifdef(CONFIG_ARM_MPU cortex_m/mpu)
add_subdirectory_ifdef(CONFIG_CPU_CORTEX_M_HAS_CMSE cortex_m/cmse) add_subdirectory_ifdef(CONFIG_CPU_CORTEX_M_HAS_CMSE cortex_m/cmse)
add_subdirectory_ifdef(CONFIG_ARM_SECURE_FIRMWARE cortex_m/tz) add_subdirectory_ifdef(CONFIG_ARM_SECURE_FIRMWARE cortex_m/tz)
add_subdirectory_ifdef(CONFIG_ARM_NONSECURE_FIRMWARE cortex_m/tz) add_subdirectory_ifdef(CONFIG_ARM_NONSECURE_FIRMWARE cortex_m/tz)
add_subdirectory_ifdef(CONFIG_CPU_CORTEX_R cortex_r)

View file

@ -29,6 +29,96 @@ config CPU_CORTEX_M
help help
This option signifies the use of a CPU of the Cortex-M family. This option signifies the use of a CPU of the Cortex-M family.
config CPU_CORTEX_R
bool
select CPU_CORTEX
select HAS_FLASH_LOAD_OFFSET
help
This option signifies the use of a CPU of the Cortex-R family.
config ISA_THUMB2
bool
help
From: http://www.arm.com/products/processors/technologies/instruction-set-architectures.php
Thumb-2 technology is the instruction set underlying the ARM Cortex
architecture which provides enhanced levels of performance, energy
efficiency, and code density for a wide range of embedded
applications.
Thumb-2 technology builds on the success of Thumb, the innovative
high code density instruction set for ARM microprocessor cores, to
increase the power of the ARM microprocessor core available to
developers of low cost, high performance systems.
The technology is backwards compatible with existing ARM and Thumb
solutions, while significantly extending the features available to
the Thumb instructions set. This allows more of the application to
benefit from the best in class code density of Thumb.
For performance optimized code Thumb-2 technology uses 31 percent
less memory to reduce system cost, while providing up to 38 percent
higher performance than existing high density code, which can be used
to prolong battery-life or to enrich the product feature set. Thumb-2
technology is featured in the processor, and in all ARMv7
architecture-based processors.
config ISA_ARM
bool
help
From: https://developer.arm.com/products/architecture/instruction-sets/a32-and-t32-instruction-sets
A32 instructions, known as Arm instructions in pre-Armv8 architectures,
are 32 bits wide, and are aligned on 4-byte boundaries. A32 instructions
are supported by both A-profile and R-profile architectures.
A32 was traditionally used in applications requiring the highest
performance, or for handling hardware exceptions such as interrupts and
processor start-up. Much of its functionality was subsumed into T32 with
the introduction of Thumb-2 technology.
config DATA_ENDIANNESS_LITTLE
bool
default y if CPU_CORTEX
help
This is driven by the processor implementation, since it is fixed in
hardware. The board should set this value to 'n' if the data is
implemented as big endian.
config STACK_ALIGN_DOUBLE_WORD
bool "Align stacks on double-words (8 octets)"
default y
help
This is needed to conform to AAPCS, the procedure call standard for
the ARM. It wastes stack space. The option also enforces alignment
of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M).
Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment
on exception entry is enabled by default and it is not configurable.
config RUNTIME_NMI
bool "Attach an NMI handler at runtime"
select REBOOT
help
The kernel provides a simple NMI handler that simply hangs in a tight
loop if triggered. This fills the requirement that there must be an
NMI handler installed when the CPU boots. If a custom handler is
needed, enable this option and attach it via _NmiHandlerSet().
config FAULT_DUMP
int "Fault dump level"
default 2
range 0 2
help
Different levels for display information when a fault occurs.
2: The default. Display specific and verbose information. Consumes
the most memory (long strings).
1: Display general and short information. Consumes less memory
(short strings).
0: Off.
config BUILTIN_STACK_GUARD config BUILTIN_STACK_GUARD
bool "Thread Stack Guards based on built-in ARM stack limit checking" bool "Thread Stack Guards based on built-in ARM stack limit checking"
depends on CPU_CORTEX_M_HAS_SPLIM depends on CPU_CORTEX_M_HAS_SPLIM
@ -185,6 +275,7 @@ endchoice
endmenu endmenu
source "arch/arm/core/cortex_m/Kconfig" source "arch/arm/core/cortex_m/Kconfig"
source "arch/arm/core/cortex_r/Kconfig"
source "arch/arm/core/cortex_m/mpu/Kconfig" source "arch/arm/core/cortex_m/mpu/Kconfig"

View file

@ -75,34 +75,6 @@ config CPU_CORTEX_M7
if CPU_CORTEX_M if CPU_CORTEX_M
config ISA_THUMB2
bool
# Omit prompt to signify "hidden" option
help
From: http://www.arm.com/products/processors/technologies/instruction-set-architectures.php
Thumb-2 technology is the instruction set underlying the ARM Cortex
architecture which provides enhanced levels of performance, energy
efficiency, and code density for a wide range of embedded
applications.
Thumb-2 technology builds on the success of Thumb, the innovative
high code density instruction set for ARM microprocessor cores, to
increase the power of the ARM microprocessor core available to
developers of low cost, high performance systems.
The technology is backwards compatible with existing ARM and Thumb
solutions, while significantly extending the features available to
the Thumb instructions set. This allows more of the application to
benefit from the best in class code density of Thumb.
For performance optimized code Thumb-2 technology uses 31 percent
less memory to reduce system cost, while providing up to 38 percent
higher performance than existing high density code, which can be used
to prolong battery-life or to enrich the product feature set. Thumb-2
technology is featured in the processor, and in all ARMv7
architecture-based processors.
config CPU_CORTEX_M_HAS_SYSTICK config CPU_CORTEX_M_HAS_SYSTICK
bool bool
# Omit prompt to signify "hidden" option # Omit prompt to signify "hidden" option
@ -275,48 +247,6 @@ config LDREX_STREX_AVAILABLE
bool bool
default y default y
config DATA_ENDIANNESS_LITTLE
bool
default y
help
This is driven by the processor implementation, since it is fixed in
hardware. The board should set this value to 'n' if the data is
implemented as big endian.
config STACK_ALIGN_DOUBLE_WORD
bool "Align stacks on double-words (8 octets)"
default y
help
This is needed to conform to AAPCS, the procedure call standard for
the ARM. It wastes stack space. The option also enforces alignment
of stack upon exception entry on Cortex-M3 and Cortex-M4 (ARMv7-M).
Note that for ARMv6-M, ARMv8-M, and Cortex-M7 MCUs stack alignment
on exception entry is enabled by default and it is not configurable.
config RUNTIME_NMI
bool "Attach an NMI handler at runtime"
select REBOOT
help
The kernel provides a simple NMI handler that simply hangs in a tight
loop if triggered. This fills the requirement that there must be an
NMI handler installed when the CPU boots. If a custom handler is
needed, enable this option and attach it via _NmiHandlerSet().
config FAULT_DUMP
int "Fault dump level"
default 2
range 0 2
help
Different levels for display information when a fault occurs.
2: The default. Display specific and verbose information. Consumes
the most memory (long strings).
1: Display general and short information. Consumes less memory
(short strings).
0: Off.
config XIP config XIP
default y default y

View file

@ -0,0 +1,11 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_library()
zephyr_library_sources(
vector_table.S
reset.S
fault.c
reboot.c
stacks.c
)

View file

@ -0,0 +1,82 @@
# Kconfig - ARM Cortex-R platform configuration options
#
# Copyright (c) 2018 Marvell
# Copyright (c) 2018 Lexmark International, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# NOTE: We have the specific core implementations first and outside of the
# if CPU_CORTEX_R block so that SoCs can select which core they are using
# without having to select all the options related to that core. Everything
# else is captured inside the if CPU_CORTEX_R block so they are not exposed
# if one selects a different ARM Cortex Family (Cortex-A or Cortex-M)
if CPU_CORTEX_R
config ARMV7_R
bool
select ATOMIC_OPERATIONS_BUILTIN
select ISA_ARM
help
This option signifies the use of an ARMv7-R processor
implementation.
From https://developer.arm.com/products/architecture/cpu-architecture/r-profile:
The Armv7-R architecture implements a traditional Arm architecture with
multiple modes and supports a Protected Memory System Architecture
(PMSA) based on a Memory Protection Unit (MPU). It supports the Arm (32)
and Thumb (T32) instruction sets.
config ARMV7_R_FP
bool
depends on ARMV7_R
help
This option signifies the use of an ARMv7-R processor
implementation supporting the Floating-Point Extension.
config ARMV7_EXCEPTION_STACK_SIZE
int "Undefined Instruction and Abort stack size (in bytes)"
default 256
help
This option specifies the size of the stack used by the undefined
instruction and data abort exception handlers.
config ARMV7_FIQ_STACK_SIZE
int "FIQ stack size (in bytes)"
default 256
help
This option specifies the size of the stack used by the FIQ handler.
config ARMV7_SVC_STACK_SIZE
int "SVC stack size (in bytes)"
default 512
help
This option specifies the size of the stack used by the SVC handler.
config ARMV7_SYS_STACK_SIZE
int "SYS stack size (in bytes)"
default 1024
help
This option specifies the size of the stack used by the system mode.
menu "ARM Cortex-R options"
depends on CPU_CORTEX_R
config RUNTIME_NMI
default y
config LDREX_STREX_AVAILABLE
default y
config GEN_ISR_TABLES
default y
config GEN_IRQ_VECTOR_TABLE
default n
endmenu
endif # CPU_CORTEX_R

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <kernel_structs.h>
/**
*
* @brief Fault handler
*
* This routine is called when fatal error conditions are detected by hardware
* and is responsible only for reporting the error. Once reported, it then
* invokes the user provided routine _SysFatalErrorHandler() which is
* responsible for implementing the error handling policy.
*
* This is a stub for more exception handling code to be added later.
*/
void _Fault(z_arch_esf_t *esf, u32_t exc_return)
{
z_arm_fatal_error(K_ERR_CPU_EXCEPTION, esf);
}
void z_FaultInit(void)
{
}

View file

@ -0,0 +1,28 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief ARM Cortex-R System Control Block interface
*/
#include <kernel.h>
#include <arch/cpu.h>
#include <misc/util.h>
/**
*
* @brief Reset the system
*
* This routine resets the processor.
*
* @return N/A
*/
void __weak sys_arch_reboot(int type)
{
ARG_UNUSED(type);
}

View file

@ -0,0 +1,102 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Reset handler
*
* Reset handler that prepares the system for running C code.
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <offsets_short.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
GTEXT(__reset)
GDATA(_interrupt_stack)
GDATA(_svc_stack)
GDATA(_sys_stack)
GDATA(_fiq_stack)
GDATA(_abort_stack)
GDATA(_undef_stack)
#define STACK_MARGIN 4
/**
*
* @brief Reset vector
*
* Ran when the system comes out of reset. The processor is in thread mode with
* privileged level. At this point, the main stack pointer (MSP) is already
* pointing to a valid area in SRAM.
*
* When these steps are completed, jump to _PrepC(), which will finish setting
* up the system for running C code.
*
* @return N/A
*/
SECTION_SUBSEC_FUNC(TEXT, _reset_section, __reset)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
mov r0, #0
mov r1, #0
mov r2, #0
mov r3, #0
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov r8, #0
mov r9, #0
mov r10, #0
mov r11, #0
mov r12, #0
mov r14, #0
/* lock interrupts: will get unlocked when switch to main task */
cpsid if
/* Setup FIQ stack */
msr CPSR_c, #(MODE_FIQ | I_BIT | F_BIT)
ldr sp, =(_fiq_stack + CONFIG_ARMV7_FIQ_STACK_SIZE - STACK_MARGIN)
/* Setup IRQ stack */
msr CPSR_c, #(MODE_IRQ | I_BIT | F_BIT)
ldr sp, =(_interrupt_stack + CONFIG_ISR_STACK_SIZE - STACK_MARGIN)
/* Setup data abort stack */
msr CPSR_c, #(MODE_ABT | I_BIT | F_BIT)
ldr sp, =(_abort_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
STACK_MARGIN)
/* Setup undefined mode stack */
msr CPSR_c, #(MODE_UDF | I_BIT | F_BIT)
ldr sp, =(_undef_stack + CONFIG_ARMV7_EXCEPTION_STACK_SIZE - \
STACK_MARGIN)
/* Setup SVC mode stack */
msr CPSR_c, #(MODE_SVC | I_BIT | F_BIT)
ldr sp, =(_svc_stack + CONFIG_ARMV7_SVC_STACK_SIZE - STACK_MARGIN)
/* Setup System mode stack */
msr CPSR_c, #(MODE_SYS | I_BIT | F_BIT)
ldr sp, =(_sys_stack + CONFIG_ARMV7_SYS_STACK_SIZE - STACK_MARGIN)
/* Setup system control register */
mrc p15, 0, r0, c1, c0, 0 /* SCTLR */
bic r0, r0, #HIVECS /* Exception vectors from 0-0x1c */
mcr p15, 0, r0, c1, c0, 0
#if defined(CONFIG_WDOG_INIT)
/* board-specific watchdog initialization is necessary */
bl _WdogInit
#endif
b _PrepC

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <cortex_r/stack.h>
#include <string.h>
K_THREAD_STACK_DEFINE(_fiq_stack, CONFIG_ARMV7_FIQ_STACK_SIZE);
K_THREAD_STACK_DEFINE(_abort_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(_undef_stack, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
K_THREAD_STACK_DEFINE(_svc_stack, CONFIG_ARMV7_SVC_STACK_SIZE);
K_THREAD_STACK_DEFINE(_sys_stack, CONFIG_ARMV7_SYS_STACK_SIZE);
#if defined(CONFIG_INIT_STACKS)
void init_stacks(void)
{
memset(_fiq_stack, 0xAA, CONFIG_ARMV7_FIQ_STACK_SIZE);
memset(_svc_stack, 0xAA, CONFIG_ARMV7_SVC_STACK_SIZE);
memset(_abort_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(_undef_stack, 0xAA, CONFIG_ARMV7_EXCEPTION_STACK_SIZE);
memset(&_interrupt_stack, 0xAA, CONFIG_ISR_STACK_SIZE);
}
#endif

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2018 Marvell
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Populated vector table in ROM
*/
#include <toolchain.h>
#include <linker/sections.h>
#include "vector_table.h"
_ASM_FILE_PROLOGUE
SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,_vector_table)
ldr pc, =__reset /* offset 0 */
ldr pc, =__undef_instruction /* undef instruction offset 4 */
ldr pc, =__svc /* svc offset 8 */
ldr pc, =__prefetch_abort /* prefetch abort offset 0xc */
ldr pc, =__data_abort /* data abort offset 0x10 */
nop /* offset 0x14 */
ldr pc, =_isr_wrapper /* IRQ offset 0x18 */
ldr pc, =__nmi /* FIQ offset 0x1c */

View file

@ -0,0 +1,52 @@
/*
* Copyright (c) 2018 Marvell
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Definitions for the boot vector table
*
*
* Definitions for the boot vector table.
*
* System exception handler names all have the same format:
*
* __<exception name with underscores>
*
* No other symbol has the same format, so they are easy to spot.
*/
#ifndef _VECTOR_TABLE__H_
#define _VECTOR_TABLE__H_
#ifdef _ASMLANGUAGE
#include <toolchain.h>
#include <linker/sections.h>
#include <misc/util.h>
GTEXT(__start)
GTEXT(_vector_table)
GTEXT(__nmi)
GTEXT(__undef_instruction)
GTEXT(__svc)
GTEXT(__prefetch_abort)
GTEXT(__data_abort)
GTEXT(__pendsv)
GTEXT(__reserved)
GTEXT(_PrepC)
GTEXT(_isr_wrapper)
#else
extern void *_vector_table[];
#endif /* _ASMLANGUAGE */
#endif /* _VECTOR_TABLE__H_ */

View file

@ -24,12 +24,14 @@ GTEXT(z_CpuIdleInit)
GTEXT(k_cpu_idle) GTEXT(k_cpu_idle)
GTEXT(k_cpu_atomic_idle) GTEXT(k_cpu_atomic_idle)
#if defined(CONFIG_CPU_CORTEX_M)
#define _SCB_SCR 0xE000ED10 #define _SCB_SCR 0xE000ED10
#define _SCB_SCR_SEVONPEND (1 << 4) #define _SCB_SCR_SEVONPEND (1 << 4)
#define _SCB_SCR_SLEEPDEEP (1 << 2) #define _SCB_SCR_SLEEPDEEP (1 << 2)
#define _SCB_SCR_SLEEPONEXIT (1 << 1) #define _SCB_SCR_SLEEPONEXIT (1 << 1)
#define _SCR_INIT_BITS _SCB_SCR_SEVONPEND #define _SCR_INIT_BITS _SCB_SCR_SEVONPEND
#endif
/** /**
* *
@ -46,9 +48,11 @@ GTEXT(k_cpu_atomic_idle)
*/ */
SECTION_FUNC(TEXT, z_CpuIdleInit) SECTION_FUNC(TEXT, z_CpuIdleInit)
#if defined(CONFIG_CPU_CORTEX_M)
ldr r1, =_SCB_SCR ldr r1, =_SCB_SCR
movs.n r2, #_SCR_INIT_BITS movs.n r2, #_SCR_INIT_BITS
str r2, [r1] str r2, [r1]
#endif
bx lr bx lr
/** /**
@ -79,7 +83,8 @@ SECTION_FUNC(TEXT, k_cpu_idle)
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#endif /* CONFIG_TRACING */ #endif /* CONFIG_TRACING */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
cpsie i cpsie i
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* clear BASEPRI so wfi is awakened by incoming interrupts */ /* clear BASEPRI so wfi is awakened by incoming interrupts */
@ -140,7 +145,8 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle)
/* r0: interrupt mask from caller */ /* r0: interrupt mask from caller */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
/* No BASEPRI, call wfe directly (SEVONPEND set in z_CpuIdleInit()) */ /* No BASEPRI, call wfe directly (SEVONPEND set in z_CpuIdleInit()) */
wfe wfe

View file

@ -24,6 +24,9 @@ _ASM_FILE_PROLOGUE
GTEXT(z_ExcExit) GTEXT(z_ExcExit)
GTEXT(_IntExit) GTEXT(_IntExit)
GDATA(_kernel) GDATA(_kernel)
#if defined(CONFIG_CPU_CORTEX_R)
GTEXT(__pendsv)
#endif
/** /**
* *
@ -66,6 +69,9 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _IntExit)
*/ */
SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit) SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
#if defined(CONFIG_CPU_CORTEX_R)
push {r0, lr}
#endif
#ifdef CONFIG_PREEMPT_ENABLED #ifdef CONFIG_PREEMPT_ENABLED
ldr r0, =_kernel ldr r0, =_kernel
@ -76,10 +82,16 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
cmp r0, r1 cmp r0, r1
beq _EXIT_EXC beq _EXIT_EXC
#if defined(CONFIG_CPU_CORTEX_M)
/* context switch required, pend the PendSV exception */ /* context switch required, pend the PendSV exception */
ldr r1, =_SCS_ICSR ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1] str r2, [r1]
#elif defined(CONFIG_CPU_CORTEX_R)
push {r0, lr}
bl __pendsv
pop {r0, lr}
#endif
_ExcExitWithGdbStub: _ExcExitWithGdbStub:
@ -96,4 +108,31 @@ _EXIT_EXC:
pop {r0, lr} pop {r0, lr}
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#endif /* CONFIG_STACK_SENTINEL */ #endif /* CONFIG_STACK_SENTINEL */
#if defined(CONFIG_CPU_CORTEX_M)
bx lr bx lr
#elif defined(CONFIG_CPU_CORTEX_R)
/*
* r0-r3 are either the values from the thread before it was switched out
* or they are the args to _new_thread for a new thread
*/
pop {r0, lr}
push {r4, r5}
cmp r0, #RET_FROM_SVC
cps #MODE_SYS
ldmia sp!, {r0-r5}
beq _svc_exit
cps #MODE_IRQ
b _exc_exit
_svc_exit:
cps #MODE_SVC
_exc_exit:
mov r12, r4
mov lr, r5
pop {r4, r5}
movs pc, lr
#endif

View file

@ -30,6 +30,10 @@ GTEXT(__usage_fault)
GTEXT(__secure_fault) GTEXT(__secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE*/ #endif /* CONFIG_ARM_SECURE_FIRMWARE*/
GTEXT(__debug_monitor) GTEXT(__debug_monitor)
#elif defined(CONFIG_ARMV7_R)
GTEXT(__undef_instruction)
GTEXT(__prefetch_abort)
GTEXT(__data_abort)
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -70,6 +74,10 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__secure_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__secure_fault)
#endif /* CONFIG_ARM_SECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE */
SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor) SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor)
#elif defined(CONFIG_ARMV7_R)
SECTION_SUBSEC_FUNC(TEXT,__fault,__undef_instruction)
SECTION_SUBSEC_FUNC(TEXT,__fault,__prefetch_abort)
SECTION_SUBSEC_FUNC(TEXT,__fault,__data_abort)
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -127,6 +135,12 @@ _s_stack_frame_msp:
mrs r0, MSP mrs r0, MSP
_s_stack_frame_endif: _s_stack_frame_endif:
#endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */ #endif /* CONFIG_ARM_SECURE_FIRMWARE || CONFIG_ARM_NONSECURE_FIRMWARE */
#elif defined(CONFIG_ARMV7_R)
/*
* Pass null for the esf to _Fault for now. A future PR will add better
* exception debug for Cortex-R that subsumes what esf provides.
*/
mov r0, #0
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -146,6 +160,11 @@ _s_stack_frame_endif:
push {r0, lr} push {r0, lr}
bl _Fault bl _Fault
#if defined(CONFIG_CPU_CORTEX_M)
pop {r0, pc} pop {r0, pc}
#elif defined(CONFIG_CPU_CORTEX_R)
pop {r0, lr}
subs pc, lr, #8
#endif
.end .end

View file

@ -16,7 +16,12 @@
#include <kernel.h> #include <kernel.h>
#include <arch/cpu.h> #include <arch/cpu.h>
#if defined(CONFIG_CPU_CORTEX_M)
#include <arch/arm/cortex_m/cmsis.h> #include <arch/arm/cortex_m/cmsis.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#include <device.h>
#include <irq_nextlevel.h>
#endif
#include <sys/__assert.h> #include <sys/__assert.h>
#include <toolchain.h> #include <toolchain.h>
#include <linker/sections.h> #include <linker/sections.h>
@ -27,6 +32,7 @@
extern void __reserved(void); extern void __reserved(void);
#if defined(CONFIG_CPU_CORTEX_M)
#define NUM_IRQS_PER_REG 32 #define NUM_IRQS_PER_REG 32
#define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG) #define REG_FROM_IRQ(irq) (irq / NUM_IRQS_PER_REG)
#define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG) #define BIT_FROM_IRQ(irq) (irq % NUM_IRQS_PER_REG)
@ -115,6 +121,74 @@ void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
NVIC_SetPriority((IRQn_Type)irq, prio); NVIC_SetPriority((IRQn_Type)irq, prio);
} }
#elif defined(CONFIG_CPU_CORTEX_R)
/**
*
* @brief Enable an interrupt line
*
* Enable the interrupt. After this call, the CPU will receive interrupts for
* the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_enable(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
irq_enable_next_level(dev, (irq >> 8) - 1);
}
/**
*
* @brief Disable an interrupt line
*
* Disable an interrupt line. After this call, the CPU will stop receiving
* interrupts for the specified <irq>.
*
* @return N/A
*/
void z_arch_irq_disable(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
irq_disable_next_level(dev, (irq >> 8) - 1);
}
/**
* @brief Return IRQ enable state
*
* @param irq IRQ line
* @return interrupt enable state, true or false
*/
int z_arch_irq_is_enabled(unsigned int irq)
{
struct device *dev = _sw_isr_table[0].arg;
return irq_is_enabled_next_level(dev);
}
/**
* @internal
*
* @brief Set an interrupt's priority
*
* The priority is verified if ASSERT_ON is enabled. The maximum number
* of priority levels is a little complex, as there are some hardware
* priority levels which are reserved: three for various types of exceptions,
* and possibly one additional to support zero latency interrupts.
*
* @return N/A
*/
void z_irq_priority_set(unsigned int irq, unsigned int prio, u32_t flags)
{
struct device *dev = _sw_isr_table[0].arg;
irq_set_priority_next_level(dev, (irq >> 8) - 1, prio, flags);
}
#endif
/** /**
* *
* @brief Spurious interrupt handler * @brief Spurious interrupt handler
@ -144,7 +218,8 @@ void z_irq_spurious(void *unused)
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
void _arch_isr_direct_pm(void) void _arch_isr_direct_pm(void)
{ {
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
unsigned int key; unsigned int key;
/* irq_lock() does what we wan for this CPU */ /* irq_lock() does what we wan for this CPU */
@ -166,7 +241,8 @@ void _arch_isr_direct_pm(void)
z_sys_power_save_idle_exit(idle_val); z_sys_power_save_idle_exit(idle_val);
} }
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
irq_unlock(key); irq_unlock(key);
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
__asm__ volatile("cpsie i" : : : "memory"); __asm__ volatile("cpsie i" : : : "memory");

View file

@ -41,7 +41,24 @@ GTEXT(_IntExit)
*/ */
SECTION_FUNC(TEXT, _isr_wrapper) SECTION_FUNC(TEXT, _isr_wrapper)
#if defined(CONFIG_CPU_CORTEX_M)
push {r0,lr} /* r0, lr are now the first items on the stack */ push {r0,lr} /* r0, lr are now the first items on the stack */
#elif defined(CONFIG_CPU_CORTEX_R)
/*
* Save away r0-r3 from previous context to the process stack since they
* are clobbered here. Also, save away lr since we may swap processes
* and return to a different thread.
*/
push {r4, r5}
mov r4, r12
sub r5, lr, #4
cps #MODE_SYS
stmdb sp!, {r0-r5}
cps #MODE_IRQ
pop {r4, r5}
#endif
#ifdef CONFIG_EXECUTION_BENCHMARKING #ifdef CONFIG_EXECUTION_BENCHMARKING
bl read_timer_start_of_isr bl read_timer_start_of_isr
@ -82,6 +99,13 @@ _idle_state_cleared:
/* clear kernel idle state */ /* clear kernel idle state */
strne r1, [r2, #_kernel_offset_to_idle] strne r1, [r2, #_kernel_offset_to_idle]
blne z_sys_power_save_idle_exit blne z_sys_power_save_idle_exit
#elif defined(CONFIG_ARMV7_R)
beq _idle_state_cleared
movs r1, #0
/* clear kernel idle state */
str r1, [r2, #_kernel_offset_to_idle]
bl z_sys_power_save_idle_exit
_idle_state_cleared:
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -89,7 +113,9 @@ _idle_state_cleared:
cpsie i /* re-enable interrupts (PRIMASK = 0) */ cpsie i /* re-enable interrupts (PRIMASK = 0) */
#endif #endif
#if defined(CONFIG_CPU_CORTEX_M)
mrs r0, IPSR /* get exception number */ mrs r0, IPSR /* get exception number */
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
ldr r1, =16 ldr r1, =16
subs r0, r1 /* get IRQ number */ subs r0, r1 /* get IRQ number */
@ -97,6 +123,12 @@ _idle_state_cleared:
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
sub r0, r0, #16 /* get IRQ number */ sub r0, r0, #16 /* get IRQ number */
lsl r0, r0, #3 /* table is 8-byte wide */ lsl r0, r0, #3 /* table is 8-byte wide */
#elif defined(CONFIG_ARMV7_R)
/*
* Cortex-R only has one IRQ line so the main handler will be at
* offset 0 of the table.
*/
mov r0, #0
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -128,12 +160,23 @@ _idle_state_cleared:
mov lr, r3 mov lr, r3
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
pop {r0, lr} pop {r0, lr}
#elif defined(CONFIG_ARMV7_R)
/*
* r0,lr were saved on the process stack since a swap could
* happen. exc_exit will handle getting those values back
* from the process stack to return to the correct location
* so there is no need to do anything here.
*/
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
#if defined(CONFIG_CPU_CORTEX_R)
mov r0, #RET_FROM_IRQ
#endif
/* Use 'bx' instead of 'b' because 'bx' can jump further, and use /* Use 'bx' instead of 'b' because 'bx' can jump further, and use
* 'bx' instead of 'blx' because exception return is done in * 'bx' instead of 'blx' because exception return is done in
* _IntExit() */ * _IntExit() */
ldr r0, =_IntExit ldr r1, =_IntExit
bx r0 bx r1

View file

@ -65,6 +65,10 @@ GEN_OFFSET_SYM(_callee_saved_t, v6);
GEN_OFFSET_SYM(_callee_saved_t, v7); GEN_OFFSET_SYM(_callee_saved_t, v7);
GEN_OFFSET_SYM(_callee_saved_t, v8); GEN_OFFSET_SYM(_callee_saved_t, v8);
GEN_OFFSET_SYM(_callee_saved_t, psp); GEN_OFFSET_SYM(_callee_saved_t, psp);
#if defined(CONFIG_CPU_CORTEX_R)
GEN_OFFSET_SYM(_callee_saved_t, spsr);
GEN_OFFSET_SYM(_callee_saved_t, lr);
#endif
/* size of the entire preempt registers structure */ /* size of the entire preempt registers structure */

View file

@ -21,7 +21,12 @@
#include <toolchain.h> #include <toolchain.h>
#include <linker/linker-defs.h> #include <linker/linker-defs.h>
#include <kernel_internal.h> #include <kernel_internal.h>
#include <arch/cpu.h>
#if defined(CONFIG_CPU_CORTEX_M)
#include <arch/arm/cortex_m/cmsis.h> #include <arch/arm/cortex_m/cmsis.h>
#elif defined(CONFIG_ARMV7_R)
#include <cortex_r/stack.h>
#endif
#if defined(__GNUC__) #if defined(__GNUC__)
/* /*
@ -150,9 +155,6 @@ extern FUNC_NORETURN void z_cstart(void);
* *
* @return N/A * @return N/A
*/ */
extern void z_IntLibInit(void);
#ifdef CONFIG_BOOT_TIME_MEASUREMENT #ifdef CONFIG_BOOT_TIME_MEASUREMENT
extern u64_t __start_time_stamp; extern u64_t __start_time_stamp;
#endif #endif
@ -162,6 +164,9 @@ void _PrepC(void)
enable_floating_point(); enable_floating_point();
z_bss_zero(); z_bss_zero();
z_data_copy(); z_data_copy();
#if defined(CONFIG_ARMV7_R) && defined(CONFIG_INIT_STACKS)
init_stacks();
#endif
#ifdef CONFIG_BOOT_TIME_MEASUREMENT #ifdef CONFIG_BOOT_TIME_MEASUREMENT
__start_time_stamp = 0U; __start_time_stamp = 0U;
#endif #endif

View file

@ -55,11 +55,16 @@ int __swap(int key)
_current->arch.basepri = key; _current->arch.basepri = key;
_current->arch.swap_return_value = _k_neg_eagain; _current->arch.swap_return_value = _k_neg_eagain;
#if defined(CONFIG_CPU_CORTEX_M)
/* set pending bit to make sure we will take a PendSV exception */ /* set pending bit to make sure we will take a PendSV exception */
SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
/* clear mask or enable all irqs to take a pendsv */ /* clear mask or enable all irqs to take a pendsv */
irq_unlock(0); irq_unlock(0);
#elif defined(CONFIG_CPU_CORTEX_R)
cortex_r_svc();
irq_unlock(key);
#endif
/* Context switch is performed here. Returning implies the /* Context switch is performed here. Returning implies the
* thread has been context-switched-in again. * thread has been context-switched-in again.

View file

@ -64,7 +64,9 @@ SECTION_FUNC(TEXT, __pendsv)
add r0, r2 add r0, r2
/* save callee-saved + psp in thread */ /* save callee-saved + psp in thread */
#if defined(CONFIG_CPU_CORTEX_M)
mrs ip, PSP mrs ip, PSP
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* Store current r4-r7 */ /* Store current r4-r7 */
@ -99,6 +101,10 @@ out_fp_active:
out_fp_endif: out_fp_endif:
str r0, [r2, #_thread_offset_to_mode] str r0, [r2, #_thread_offset_to_mode]
#endif /* CONFIG_FP_SHARING */ #endif /* CONFIG_FP_SHARING */
#elif defined(CONFIG_ARMV7_R)
/* Store rest of process context */
mrs r12, SPSR
stm r0, {r4-r12,sp,lr}^
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -110,6 +116,11 @@ out_fp_endif:
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0 msr BASEPRI, r0
isb /* Make the effect of disabling interrupts be realized immediately */ isb /* Make the effect of disabling interrupts be realized immediately */
#elif defined(CONFIG_ARMV7_R)
/*
* Interrupts are still disabled from __swap so empty clause
* here to avoid the preprocessor error below
*/
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -121,8 +132,10 @@ out_fp_endif:
* to pend PendSV have been taken with the current kernel * to pend PendSV have been taken with the current kernel
* state and this is what we're handling currently. * state and this is what we're handling currently.
*/ */
#if defined(CONFIG_CPU_CORTEX_M)
ldr v4, =_SCS_ICSR ldr v4, =_SCS_ICSR
ldr v3, =_SCS_ICSR_UNPENDSV ldr v3, =_SCS_ICSR_UNPENDSV
#endif
/* _kernel is still in r1 */ /* _kernel is still in r1 */
@ -141,7 +154,9 @@ out_fp_endif:
*/ */
/* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */ /* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */
#if defined(CONFIG_CPU_CORTEX_M)
str v3, [v4, #0] str v3, [v4, #0]
#endif
/* Restore previous interrupt disable state (irq_lock key) */ /* Restore previous interrupt disable state (irq_lock key) */
#if (defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M0)) && \ #if (defined(CONFIG_CPU_CORTEX_M0PLUS) || defined(CONFIG_CPU_CORTEX_M0)) && \
@ -158,7 +173,7 @@ out_fp_endif:
str r3, [r4] str r3, [r4]
#else #else
ldr r0, [r2, #_thread_offset_to_basepri] ldr r0, [r2, #_thread_offset_to_basepri]
movs.n r3, #0 movs r3, #0
str r3, [r2, #_thread_offset_to_basepri] str r3, [r2, #_thread_offset_to_basepri]
#endif #endif
@ -254,6 +269,19 @@ in_fp_endif:
/* load callee-saved + psp from thread */ /* load callee-saved + psp from thread */
add r0, r2, #_thread_offset_to_callee_saved add r0, r2, #_thread_offset_to_callee_saved
ldmia r0, {v1-v8, ip} ldmia r0, {v1-v8, ip}
#elif defined(CONFIG_ARMV7_R)
_thread_irq_disabled:
/* load _kernel into r1 and current k_thread into r2 */
ldr r1, =_kernel
ldr r2, [r1, #_kernel_offset_to_current]
/* addr of callee-saved regs in thread in r0 */
ldr r0, =_thread_offset_to_callee_saved
add r0, r2
/* restore r4-r12 for incoming thread, plus system sp and lr */
ldm r0, {r4-r12,sp,lr}^
msr SPSR_fsxc, r12
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -264,7 +292,9 @@ in_fp_endif:
msr PSPLIM, r0 msr PSPLIM, r0
#endif /* CONFIG_BUILTIN_STACK_GUARD */ #endif /* CONFIG_BUILTIN_STACK_GUARD */
#if defined(CONFIG_CPU_CORTEX_M)
msr PSP, ip msr PSP, ip
#endif
#ifdef CONFIG_BUILTIN_STACK_GUARD #ifdef CONFIG_BUILTIN_STACK_GUARD
/* r2 contains k_thread */ /* r2 contains k_thread */
@ -486,7 +516,80 @@ valid_syscall_id:
bx lr bx lr
#endif #endif
#elif defined(CONFIG_ARMV7_R)
SECTION_FUNC(TEXT, __svc)
/*
* Switch to system mode to store r0-r3 to the process stack pointer.
* Save r12 and the lr as we will be swapping in another process and
* returning to a different location.
*/
push {r4, r5}
mov r4, r12
mov r5, lr
cps #MODE_SYS
stmdb sp!, {r0-r5}
cps #MODE_SVC
pop {r4, r5}
/* Get SVC number */
mrs r0, spsr
tst r0, #0x20
ldreq r1, [lr, #-4]
biceq r1, #0xff000000
beq demux
ldr r1, [lr, #-2]
bic r1, #0xff00
/*
* grab service call number:
* 0: context switch
* 1: irq_offload (if configured)
* 2: kernel panic or oops (software generated fatal exception)
* Planned implementation of system calls for memory protection will
* expand this case.
*/
demux:
cmp r1, #_SVC_CALL_CONTEXT_SWITCH
beq _context_switch
cmp r1, #_SVC_CALL_RUNTIME_EXCEPT
beq _oops
#if CONFIG_IRQ_OFFLOAD
push {r0, lr}
blx z_irq_do_offload /* call C routine which executes the offload */
pop {r0, lr}
/* exception return is done in _IntExit() */
mov r0, #RET_FROM_SVC
b _IntExit
#endif
_context_switch:
/* handler mode exit, to PendSV */
push {r0, lr}
bl __pendsv
pop {r0, lr}
mov r0, #RET_FROM_SVC
b _IntExit
_oops:
push {r0, lr}
blx z_do_kernel_oops
pop {r0, lr}
cpsie i
movs pc, lr
GTEXT(cortex_r_svc)
SECTION_FUNC(TEXT, cortex_r_svc)
svc #0
bx lr
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */

View file

@ -138,8 +138,10 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
pInitCtx->basic.pc = (u32_t)z_thread_entry; pInitCtx->basic.pc = (u32_t)z_thread_entry;
#endif #endif
#if defined(CONFIG_CPU_CORTEX_M)
/* force ARM mode by clearing LSB of address */ /* force ARM mode by clearing LSB of address */
pInitCtx->basic.pc &= 0xfffffffe; pInitCtx->basic.pc &= 0xfffffffe;
#endif
pInitCtx->basic.a1 = (u32_t)pEntry; pInitCtx->basic.a1 = (u32_t)pEntry;
pInitCtx->basic.a2 = (u32_t)parameter1; pInitCtx->basic.a2 = (u32_t)parameter1;
@ -149,6 +151,11 @@ void z_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
0x01000000UL; /* clear all, thumb bit is 1, even if RO */ 0x01000000UL; /* clear all, thumb bit is 1, even if RO */
thread->callee_saved.psp = (u32_t)pInitCtx; thread->callee_saved.psp = (u32_t)pInitCtx;
#if defined(CONFIG_CPU_CORTEX_R)
pInitCtx->basic.lr = (u32_t)pInitCtx->basic.pc;
thread->callee_saved.spsr = A_BIT | T_BIT | MODE_SYS;
thread->callee_saved.lr = (u32_t)pInitCtx->basic.pc;
#endif
thread->arch.basepri = 0; thread->arch.basepri = 0;
#if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING) #if defined(CONFIG_USERSPACE) || defined(CONFIG_FP_SHARING)

View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Exception/interrupt context helpers for Cortex-R CPUs
*
* Exception/interrupt context helpers.
*/
#ifndef _ARM_CORTEXR_ISR__H_
#define _ARM_CORTEXR_ISR__H_
#include <arch/cpu.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
/* nothing */
#else
#include <irq_offload.h>
#ifdef CONFIG_IRQ_OFFLOAD
extern volatile irq_offload_routine_t offload_routine;
#endif
/**
*
* @brief Find out if running in an ISR context
*
* Check the CPSR mode bits to see if we are in IRQ or FIQ mode
*
* @return 1 if in ISR, 0 if not.
*/
static ALWAYS_INLINE bool z_IsInIsr(void)
{
unsigned int status;
__asm__ volatile(
" mrs %0, cpsr"
: "=r" (status) : : "memory", "cc");
status &= MODE_MASK;
return (status == MODE_FIQ) || (status == MODE_IRQ);
}
/**
* @brief Setup system exceptions
*
* Enable fault exceptions.
*
* @return N/A
*/
static ALWAYS_INLINE void z_ExcSetup(void)
{
}
/**
* @brief Clear Fault exceptions
*
* Clear out exceptions for Mem, Bus, Usage and Hard Faults
*
* @return N/A
*/
static ALWAYS_INLINE void z_clearfaults(void)
{
}
extern void cortex_r_svc(void);
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ARM_CORTEXRM_ISR__H_ */

View file

@ -0,0 +1,49 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Stack helpers for Cortex-R CPUs
*
* Stack helper functions.
*/
#ifndef _ARM_CORTEXR_STACK__H_
#define _ARM_CORTEXR_STACK__H_
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _ASMLANGUAGE
/* nothing */
#else
extern K_THREAD_STACK_DEFINE(_interrupt_stack, CONFIG_ISR_STACK_SIZE);
extern void init_stacks(void);
/**
*
* @brief Setup interrupt stack
*
* On Cortex-R, the interrupt stack is set up by reset.S
*
* @return N/A
*/
static ALWAYS_INLINE void z_InterruptStackSetup(void)
{
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* _ARM_CORTEXR_STACK__H_ */

View file

@ -52,6 +52,9 @@ typedef struct __basic_sf _basic_sf_t;
#ifdef CONFIG_CPU_CORTEX_M #ifdef CONFIG_CPU_CORTEX_M
#include <cortex_m/stack.h> #include <cortex_m/stack.h>
#include <cortex_m/exc.h> #include <cortex_m/exc.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#include <cortex_r/stack.h>
#include <cortex_r/exc.h>
#endif #endif
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE

View file

@ -109,8 +109,12 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
*/ */
__asm__ volatile ( __asm__ volatile (
"mov r0, %0 \n\t" /* Store _main in R0 */ "mov r0, %0 \n\t" /* Store _main in R0 */
#if defined(CONFIG_CPU_CORTEX_M)
"msr PSP, %1 \n\t" /* __set_PSP(start_of_main_stack) */ "msr PSP, %1 \n\t" /* __set_PSP(start_of_main_stack) */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) #endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
|| defined(CONFIG_ARMV7_R)
"cpsie i \n\t" /* __enable_irq() */ "cpsie i \n\t" /* __enable_irq() */
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE) #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
"cpsie if \n\t" /* __enable_irq(); __enable_fault_irq() */ "cpsie if \n\t" /* __enable_irq(); __enable_fault_irq() */

View file

@ -31,7 +31,13 @@ struct _callee_saved {
u32_t v6; /* r9 */ u32_t v6; /* r9 */
u32_t v7; /* r10 */ u32_t v7; /* r10 */
u32_t v8; /* r11 */ u32_t v8; /* r11 */
#if defined(CONFIG_CPU_CORTEX_R)
u32_t spsr;/* r12 */
u32_t psp; /* r13 */ u32_t psp; /* r13 */
u32_t lr; /* r14 */
#else
u32_t psp; /* r13 */
#endif
}; };
typedef struct _callee_saved _callee_saved_t; typedef struct _callee_saved _callee_saved_t;

34
dts/arm/armv7-r.dtsi Normal file
View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "skeleton.dtsi"
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "Cortex-R";
reg = <0>;
};
core_intc: core_intc@0 {
compatible = "armv7-r,core-intc";
reg = <0x00 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
};
};
soc {
#address-cells = <1>;
#size-cells = <1>;
compatible = "simple-bus";
ranges;
};
};

View file

@ -35,6 +35,8 @@
#ifdef CONFIG_CPU_CORTEX_M #ifdef CONFIG_CPU_CORTEX_M
#include <arch/arm/cortex_m/cpu.h> #include <arch/arm/cortex_m/cpu.h>
#include <arch/arm/cortex_m/memory_map.h> #include <arch/arm/cortex_m/memory_map.h>
#elif defined(CONFIG_CPU_CORTEX_R)
#include <arch/arm/cortex_r/cpu.h>
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -90,6 +90,12 @@ static ALWAYS_INLINE unsigned int z_arch_irq_lock(void)
: "=r"(key), "=r"(tmp) : "=r"(key), "=r"(tmp)
: "i"(_EXC_IRQ_DEFAULT_PRIO) : "i"(_EXC_IRQ_DEFAULT_PRIO)
: "memory"); : "memory");
#elif defined(CONFIG_ARMV7_R)
__asm__ volatile("mrs %0, cpsr;"
"cpsid i"
: "=r" (key)
:
: "memory", "cc");
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
@ -132,6 +138,11 @@ static ALWAYS_INLINE void z_arch_irq_unlock(unsigned int key)
"msr BASEPRI, %0;" "msr BASEPRI, %0;"
"isb;" "isb;"
: : "r"(key) : "memory"); : : "r"(key) : "memory");
#elif defined(CONFIG_ARMV7_R)
__asm__ volatile("msr cpsr_c, %0"
:
: "r" (key)
: "memory", "cc");
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */

View file

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018 Lexmark International, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef _CORTEX_R_CPU_H
#define _CORTEX_R_CPU_H
#define MODE_USR 0x10
#define MODE_FIQ 0x11
#define MODE_IRQ 0x12
#define MODE_SVC 0x13
#define MODE_ABT 0x17
#define MODE_UDF 0x1b
#define MODE_SYS 0x1f
#define MODE_MASK 0x1f
#define A_BIT (1 << 8)
#define I_BIT (1 << 7)
#define F_BIT (1 << 6)
#define T_BIT (1 << 5)
#define HIVECS (1 << 13)
#define RET_FROM_SVC 0
#define RET_FROM_IRQ 1
#define __ISB() __asm__ volatile ("isb sy" : : : "memory")
#define __DMB() __asm__ volatile ("dmb sy" : : : "memory")
#endif

View file

@ -0,0 +1,9 @@
/*
* Copyright (c) 2017 Linaro Limited.
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Set initial alignment to the 32 byte minimum for all MPUs */
_app_data_align = 32;
. = ALIGN(32);

View file

@ -0,0 +1,493 @@
/*
* Copyright (c) 2013-2014 Wind River Systems, Inc.
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @file
* @brief Linker command/script file
*
* Linker script for the Cortex-R platforms.
*/
#define _LINKER
#define _ASMLANGUAGE
#include <autoconf.h>
#include <linker/sections.h>
#include <generated_dts_board.h>
#include <linker/linker-defs.h>
#include <linker/linker-tool.h>
/* physical address of RAM */
#ifdef CONFIG_XIP
#define ROMABLE_REGION FLASH
#define RAMABLE_REGION SRAM
#else
#define ROMABLE_REGION SRAM
#define RAMABLE_REGION SRAM
#endif
#if defined(CONFIG_XIP)
#define _DATA_IN_ROM __data_rom_start
#else
#define _DATA_IN_ROM
#endif
#if !defined(SKIP_TO_KINETIS_FLASH_CONFIG)
#define SKIP_TO_KINETIS_FLASH_CONFIG
#endif
#if !defined(CONFIG_XIP) && (CONFIG_FLASH_SIZE == 0)
#define ROM_ADDR RAM_ADDR
#else
#define ROM_ADDR (CONFIG_FLASH_BASE_ADDRESS + CONFIG_FLASH_LOAD_OFFSET)
#endif
#ifdef CONFIG_TI_CCFG_PRESENT
#define CCFG_SIZE 88
#define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET - \
CCFG_SIZE)
#define CCFG_ADDR (ROM_ADDR + ROM_SIZE)
#else
#if CONFIG_FLASH_LOAD_SIZE > 0
#define ROM_SIZE CONFIG_FLASH_LOAD_SIZE
#else
#define ROM_SIZE (CONFIG_FLASH_SIZE*1K - CONFIG_FLASH_LOAD_OFFSET)
#endif
#endif
#if defined(CONFIG_XIP)
#if defined(CONFIG_IS_BOOTLOADER)
#define RAM_SIZE (CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR (CONFIG_SRAM_BASE_ADDRESS + \
(CONFIG_SRAM_SIZE * 1K - RAM_SIZE))
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
#else
#define RAM_SIZE (CONFIG_SRAM_SIZE * 1K - CONFIG_BOOTLOADER_SRAM_SIZE * 1K)
#define RAM_ADDR CONFIG_SRAM_BASE_ADDRESS
#endif
/* Set alignment to CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE
* to make linker section alignment comply with MPU granularity.
*/
#if defined(CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE)
_region_min_align = CONFIG_ARM_MPU_REGION_MIN_ALIGN_AND_SIZE;
#else
/* If building without MPU support, use default 4-byte alignment. */
_region_min_align = 4;
#endif
#if defined(CONFIG_MPU_REQUIRES_POWER_OF_TWO_ALIGNMENT)
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align); \
. = ALIGN( 1 << LOG2CEIL(region_size))
#else
#define MPU_ALIGN(region_size) \
. = ALIGN(_region_min_align)
#endif
MEMORY
{
FLASH (rx) : ORIGIN = ROM_ADDR, LENGTH = ROM_SIZE
#ifdef CONFIG_TI_CCFG_PRESENT
FLASH_CCFG (rwx): ORIGIN = CCFG_ADDR, LENGTH = CCFG_SIZE
#endif
#ifdef DT_CCM_BASE_ADDRESS
CCM (rw) : ORIGIN = DT_CCM_BASE_ADDRESS, LENGTH = DT_CCM_SIZE * 1K
#endif
SRAM (wx) : ORIGIN = RAM_ADDR, LENGTH = RAM_SIZE
#ifdef CONFIG_BT_STM32_IPM
SRAM1 (rw) : ORIGIN = RAM1_ADDR, LENGTH = RAM1_SIZE
SRAM2 (rw) : ORIGIN = RAM2_ADDR, LENGTH = RAM2_SIZE
#endif
/* Used by and documented in include/linker/intlist.ld */
IDT_LIST (wx) : ORIGIN = (RAM_ADDR + RAM_SIZE), LENGTH = 2K
}
ENTRY(CONFIG_KERNEL_ENTRY)
SECTIONS
{
#include <linker/rel-sections.ld>
/*
* .plt and .iplt are here according to 'arm-zephyr-elf-ld --verbose',
* before text section.
*/
SECTION_PROLOGUE(.plt,,)
{
*(.plt)
}
SECTION_PROLOGUE(.iplt,,)
{
*(.iplt)
}
GROUP_START(ROMABLE_REGION)
_image_rom_start = ROM_ADDR;
SECTION_PROLOGUE(_TEXT_SECTION_NAME,,)
{
#ifdef CONFIG_CC3220SF_DEBUG
/* Add CC3220SF flash header to disable flash verification */
. = 0x0;
KEEP(*(.dbghdr))
KEEP(*(".dbghdr.*"))
#endif
#ifdef CONFIG_NXP_IMX_RT_BOOT_HEADER
KEEP(*(.boot_hdr.conf))
. = CONFIG_IMAGE_VECTOR_TABLE_OFFSET;
KEEP(*(.boot_hdr.ivt))
KEEP(*(.boot_hdr.data))
#ifdef CONFIG_DEVICE_CONFIGURATION_DATA
KEEP(*(.boot_hdr.dcd_data))
#endif
#endif
. = CONFIG_TEXT_SECTION_OFFSET;
#if defined(CONFIG_SW_VECTOR_RELAY)
KEEP(*(.vector_relay_table))
KEEP(*(".vector_relay_table.*"))
KEEP(*(.vector_relay_handler))
KEEP(*(".vector_relay_handler.*"))
#endif
_vector_start = .;
KEEP(*(.exc_vector_table))
KEEP(*(".exc_vector_table.*"))
KEEP(*(IRQ_VECTOR_TABLE))
KEEP(*(.vectors))
KEEP(*(.openocd_dbg))
KEEP(*(".openocd_dbg.*"))
/* Kinetis has to write 16 bytes at 0x400 */
SKIP_TO_KINETIS_FLASH_CONFIG
KEEP(*(.kinetis_flash_config))
KEEP(*(".kinetis_flash_config.*"))
_vector_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_relocate.ld>
#endif /* CONFIG_CODE_DATA_RELOCATION */
SECTION_PROLOGUE(_TEXT_SECTION_NAME_2,,)
{
_image_text_start = .;
*(.text)
*(".text.*")
*(.gnu.linkonce.t.*)
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* after .gnu.linkonce.t.*
*/
*(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
#include <linker/priv_stacks-text.ld>
#include <linker/kobject-text.ld>
} GROUP_LINK_IN(ROMABLE_REGION)
_image_text_end = .;
#if defined (CONFIG_CPLUSPLUS)
SECTION_PROLOGUE(.ARM.extab,,)
{
/*
* .ARM.extab section containing exception unwinding information.
*/
*(.ARM.extab* .gnu.linkonce.armextab.*)
} GROUP_LINK_IN(ROMABLE_REGION)
#endif
SECTION_PROLOGUE(.ARM.exidx,,)
{
/*
* This section, related to stack and exception unwinding, is placed
* explicitly to prevent it from being shared between multiple regions.
* It must be defined for gcc to support 64-bit math and avoid
* section overlap.
*/
__exidx_start = .;
#if defined (__GCC_LINKER_CMD__)
*(.ARM.exidx* gnu.linkonce.armexidx.*)
#endif
__exidx_end = .;
} GROUP_LINK_IN(ROMABLE_REGION)
_image_rodata_start = .;
#include <linker/common-rom.ld>
SECTION_PROLOGUE(_RODATA_SECTION_NAME,,)
{
*(.rodata)
*(".rodata.*")
*(.gnu.linkonce.r.*)
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rodata.ld>
#ifdef CONFIG_SOC_RODATA_LD
#include <soc-rodata.ld>
#endif
#ifdef CONFIG_CUSTOM_RODATA_LD
/* Located in project source directory */
#include <custom-rodata.ld>
#endif
#include <linker/priv_stacks-rom.ld>
#include <linker/kobject-rom.ld>
/*
* For XIP images, in order to avoid the situation when __data_rom_start
* is 32-bit aligned, but the actual data is placed right after rodata
* section, which may not end exactly at 32-bit border, pad rodata
* section, so __data_rom_start points at data and it is 32-bit aligned.
*
* On non-XIP images this may enlarge image size up to 3 bytes. This
* generally is not an issue, since modern ROM and FLASH memory is
* usually 4k aligned.
*/
. = ALIGN(4);
} GROUP_LINK_IN(ROMABLE_REGION)
_image_rodata_end = .;
MPU_ALIGN(_image_rodata_end -_image_rom_start);
_image_rom_end = .;
GROUP_END(ROMABLE_REGION)
/* Some TI SoCs have a special configuration footer, at the end of flash. */
#ifdef CONFIG_TI_CCFG_PRESENT
SECTION_PROLOGUE(.ti_ccfg,,)
{
KEEP(*(TI_CCFG))
} > FLASH_CCFG
#endif
/*
* These are here according to 'arm-zephyr-elf-ld --verbose',
* before data section.
*/
SECTION_PROLOGUE(.got,,)
{
*(.got.plt)
*(.igot.plt)
*(.got)
*(.igot)
}
GROUP_START(RAMABLE_REGION)
. = RAM_ADDR;
/* Align the start of image SRAM with the
* minimum granularity required by MPU.
*/
. = ALIGN(_region_min_align);
_image_ram_start = .;
#if defined(CONFIG_SOC_SERIES_STM32F0X) && !defined(CONFIG_IS_BOOTLOADER)
/* Must be first in ramable region */
SECTION_PROLOGUE(.st_stm32f0x_vt,(NOLOAD),)
{
_ram_vector_start = .;
. += _vector_end - _vector_start;
_ram_vector_end = .;
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-ram-sections.ld>
#if defined(CONFIG_USERSPACE)
#define APP_SHARED_ALIGN . = ALIGN(_region_min_align);
#define SMEM_PARTITION_ALIGN MPU_ALIGN
#include <app_smem.ld>
_app_smem_size = _app_smem_end - _app_smem_start;
_app_smem_rom_start = LOADADDR(_APP_SMEM_SECTION_NAME);
#endif /* CONFIG_USERSPACE */
SECTION_DATA_PROLOGUE(_BSS_SECTION_NAME,(NOLOAD),)
{
/*
* For performance, BSS section is assumed to be 4 byte aligned and
* a multiple of 4 bytes
*/
. = ALIGN(4);
__bss_start = .;
__kernel_ram_start = .;
*(.bss)
*(".bss.*")
*(COMMON)
*(".kernel_bss.*")
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_bss_relocate.ld>
#endif
/*
* As memory is cleared in words only, it is simpler to ensure the BSS
* section ends on a 4 byte boundary. This wastes a maximum of 3 bytes.
*/
__bss_end = ALIGN(4);
} GROUP_DATA_LINK_IN(RAMABLE_REGION, RAMABLE_REGION)
SECTION_PROLOGUE(_NOINIT_SECTION_NAME,(NOLOAD),)
{
/*
* This section is used for non-initialized objects that
* will not be cleared during the boot process.
*/
*(.noinit)
*(".noinit.*")
*(".kernel_noinit.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-noinit.ld>
#ifdef CONFIG_SOC_NOINIT_LD
#include <soc-noinit.ld>
#endif
} GROUP_LINK_IN(RAMABLE_REGION)
SECTION_DATA_PROLOGUE(_DATA_SECTION_NAME,,)
{
__data_ram_start = .;
*(.data)
*(".data.*")
*(".kernel.*")
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-rwdata.ld>
#ifdef CONFIG_SOC_RWDATA_LD
#include <soc-rwdata.ld>
#endif
#ifdef CONFIG_CUSTOM_RWDATA_LD
/* Located in project source directory */
#include <custom-rwdata.ld>
#endif
#ifdef CONFIG_CODE_DATA_RELOCATION
#include <linker_sram_data_relocate.ld>
#endif
} GROUP_DATA_LINK_IN(RAMABLE_REGION, ROMABLE_REGION)
__data_rom_start = LOADADDR(_DATA_SECTION_NAME);
#include <linker/common-ram.ld>
#include <linker/priv_stacks.ld>
#include <linker/kobject.ld>
#include <linker/priv_stacks-noinit.ld>
__data_ram_end = .;
/* Define linker symbols */
_image_ram_end = .;
_end = .; /* end of image */
__kernel_ram_end = RAM_ADDR + RAM_SIZE;
__kernel_ram_size = __kernel_ram_end - __kernel_ram_start;
GROUP_END(RAMABLE_REGION)
#ifdef CONFIG_CUSTOM_SECTIONS_LD
/* Located in project source directory */
#include <custom-sections.ld>
#endif
/* Located in generated directory. This file is populated by the
* zephyr_linker_sources() Cmake function.
*/
#include <snippets-sections.ld>
#include <linker/debug-sections.ld>
SECTION_PROLOGUE(.ARM.attributes, 0,)
{
KEEP(*(.ARM.attributes))
KEEP(*(.gnu.attributes))
}
#if defined(CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS)
#if CONFIG_ARM_NSC_REGION_BASE_ADDRESS != 0
#define NSC_ALIGN . = ABSOLUTE(CONFIG_ARM_NSC_REGION_BASE_ADDRESS)
#elif defined(CONFIG_CPU_HAS_NRF_IDAU)
/* The nRF9160 needs the NSC region to be at the end of a 32 kB region. */
#define NSC_ALIGN . = ALIGN(0x8000) - (1 << LOG2CEIL(__sg_size))
#else
#define NSC_ALIGN . = ALIGN(4)
#endif
#ifdef CONFIG_CPU_HAS_NRF_IDAU
#define NSC_ALIGN_END . = ALIGN(0x8000)
#else
#define NSC_ALIGN_END . = ALIGN(4)
#endif
SECTION_PROLOGUE(.gnu.sgstubs,,)
{
NSC_ALIGN;
__sg_start = .;
/* No input section necessary, since the Secure Entry Veneers are
automatically placed after the .gnu.sgstubs output section. */
} GROUP_LINK_IN(ROMABLE_REGION)
__sg_end = .;
__sg_size = __sg_end - __sg_start;
NSC_ALIGN_END;
__nsc_size = . - __sg_start;
#ifdef CONFIG_CPU_HAS_NRF_IDAU
ASSERT(1 << LOG2CEIL(0x8000 - (__sg_start % 0x8000))
== (0x8000 - (__sg_start % 0x8000))
&& (0x8000 - (__sg_start % 0x8000)) >= 32
&& (0x8000 - (__sg_start % 0x8000)) <= 4096,
"The Non-Secure Callable region size must be a power of 2 \
between 32 and 4096 bytes.")
#endif
#endif /* CONFIG_ARM_FIRMWARE_HAS_SECURE_ENTRY_FUNCS */
/* Must be last in romable region */
SECTION_PROLOGUE(.last_section,(NOLOAD),)
{
} GROUP_LINK_IN(ROMABLE_REGION)
/* To provide the image size as a const expression,
* calculate this value here. */
_flash_used = LOADADDR(.last_section) - _image_rom_start;
}

View file

@ -53,6 +53,8 @@ extern "C" {
: "memory"); \ : "memory"); \
CODE_UNREACHABLE; \ CODE_UNREACHABLE; \
} while (false) } while (false)
#elif defined(CONFIG_ARMV7_R)
/* Pick up the default definition in kernel.h for now */
#else #else
#error Unknown ARM architecture #error Unknown ARM architecture
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */ #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */

View file

@ -34,6 +34,14 @@ extern int z_arch_irq_is_enabled(unsigned int irq);
extern void _IntExit(void); extern void _IntExit(void);
#if defined(CONFIG_ARMV7_R)
static ALWAYS_INLINE void z_IntLibInit(void)
{
}
#else
extern void z_IntLibInit(void);
#endif
/* macros convert value of it's argument to a string */ /* macros convert value of it's argument to a string */
#define DO_TOSTR(s) #s #define DO_TOSTR(s) #s
#define TOSTR(s) DO_TOSTR(s) #define TOSTR(s) DO_TOSTR(s)

View file

@ -16,6 +16,7 @@
#ifndef ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ #ifndef ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
#define ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_ #define ZEPHYR_INCLUDE_ARCH_ARM_SYSCALL_H_
#define _SVC_CALL_CONTEXT_SWITCH 0
#define _SVC_CALL_IRQ_OFFLOAD 1 #define _SVC_CALL_IRQ_OFFLOAD 1
#define _SVC_CALL_RUNTIME_EXCEPT 2 #define _SVC_CALL_RUNTIME_EXCEPT 2
#define _SVC_CALL_SYSTEM_CALL 3 #define _SVC_CALL_SYSTEM_CALL 3

View file

@ -36,6 +36,12 @@ static inline void timestamp_serialize(void)
/* isb is available in all Cortex-M */ /* isb is available in all Cortex-M */
__ISB(); __ISB();
} }
#elif defined(CONFIG_CPU_CORTEX_R)
#include <arch/arm/cortex_r/cpu.h>
static inline void timestamp_serialize(void)
{
__ISB();
}
#elif defined(CONFIG_CPU_ARCV2) #elif defined(CONFIG_CPU_ARCV2)
#define timestamp_serialize() #define timestamp_serialize()
#elif defined(CONFIG_ARCH_POSIX) #elif defined(CONFIG_ARCH_POSIX)