ext: hal: cmsis: Add CMSIS-Core(R).

This commit adds a preliminary implementation of CMSIS-Core(R) for use
with the ARM Cortex-R port.

At this time, CMSIS-Core(R) is not merged back into the upstream CMSIS
repository and therefore is not available from official sources.

Until upstream merge happens, the preliminary version can be obtained
from the following URL:

https://github.com/stephanosio/CMSIS_5/tree/core_r

Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
This commit is contained in:
Stephanos Ioannidis 2019-10-17 14:38:21 +09:00 committed by Anas Nashif
commit 4d30d6b121
12 changed files with 3422 additions and 0 deletions

View file

@ -1,3 +1,4 @@
# SPDX-License-Identifier: Apache-2.0
add_subdirectory_ifdef(CONFIG_HAS_CMSIS_CORE_M Core)
add_subdirectory_ifdef(CONFIG_HAS_CMSIS_CORE_R Core_R)

View file

@ -0,0 +1,11 @@
# SPDX-License-Identifier: Apache-2.0
zephyr_include_directories(Include)
# As of CMSIS v5.6.0, __PROGRAM_START is to indicate whether the
# ARM vendor or the OS supplies data/bss init routine, otherwise
# the default data/bss init routine for the selected toolchain is
# added. We set the macro in build-time to guarantee compatibility
# with all existing ARM platforms.
zephyr_compile_definitions(__PROGRAM_START)

View file

@ -0,0 +1,213 @@
/**************************************************************************//**
* @file cmsis_compiler.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_COMPILER_H
#define __CMSIS_COMPILER_H
#include <stdint.h>
/*
* Arm Compiler 4/5
*/
#if defined ( __CC_ARM )
#include "cmsis_armcc.h"
/*
* Arm Compiler 6 (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#include "cmsis_armclang.h"
/*
* GNU Compiler
*/
#elif defined ( __GNUC__ )
#include "cmsis_gcc.h"
/*
* IAR Compiler
*/
#elif defined ( __ICCARM__ )
#include "cmsis_iccarm.h"
/*
* TI Arm Compiler
*/
#elif defined ( __TI_ARM__ )
#include <cmsis_ccs.h>
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __UNALIGNED_UINT32
struct __attribute__((packed)) T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
/*
* TASKING Compiler
*/
#elif defined ( __TASKING__ )
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __UNALIGNED_UINT32
struct __packed__ T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __align(x)
#endif
#ifndef __PACKED
#define __PACKED __packed__
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
/*
* COSMIC Compiler
*/
#elif defined ( __CSMC__ )
#include <cmsis_csm.h>
#ifndef __ASM
#define __ASM _asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
// NO RETURN is automatically detected hence no warning here
#define __NO_RETURN
#endif
#ifndef __USED
#warning No compiler specific solution for __USED. __USED is ignored.
#define __USED
#endif
#ifndef CMSIS_DEPRECATED
#warning No compiler specific solution for CMSIS_DEPRECATED. CMSIS_DEPRECATED is ignored.
#define CMSIS_DEPRECATED
#endif
#ifndef __WEAK
#define __WEAK __weak
#endif
#ifndef __UNALIGNED_UINT32
@packed struct T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __ALIGNED
#warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#ifndef __PACKED
#define __PACKED @packed
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
#else
#error Unknown compiler.
#endif
#endif /* __CMSIS_COMPILER_H */

View file

@ -0,0 +1,439 @@
/**************************************************************************//**
* @file cmsis_cp15.h for CMSIS-Core(R)
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.0
* @date 17. Oct 2019
******************************************************************************/
/*
* Copyright (c) 2009-2017 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CMSIS_CP15_H
#define __CMSIS_CP15_H
/** \brief Get ACTLR
\return Auxiliary Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_ACTLR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 1);
return(result);
}
/** \brief Set ACTLR
\param [in] actlr Auxiliary Control value to set
*/
__STATIC_FORCEINLINE void __set_ACTLR(uint32_t actlr)
{
__set_CP(15, 0, actlr, 1, 0, 1);
}
/** \brief Get CPACR
\return Coprocessor Access Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_CPACR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 2);
return result;
}
/** \brief Set CPACR
\param [in] cpacr Coprocessor Access Control value to set
*/
__STATIC_FORCEINLINE void __set_CPACR(uint32_t cpacr)
{
__set_CP(15, 0, cpacr, 1, 0, 2);
}
/** \brief Get DFSR
\return Data Fault Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_DFSR(void)
{
uint32_t result;
__get_CP(15, 0, result, 5, 0, 0);
return result;
}
/** \brief Set DFSR
\param [in] dfsr Data Fault Status value to set
*/
__STATIC_FORCEINLINE void __set_DFSR(uint32_t dfsr)
{
__set_CP(15, 0, dfsr, 5, 0, 0);
}
/** \brief Get IFSR
\return Instruction Fault Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_IFSR(void)
{
uint32_t result;
__get_CP(15, 0, result, 5, 0, 1);
return result;
}
/** \brief Set IFSR
\param [in] ifsr Instruction Fault Status value to set
*/
__STATIC_FORCEINLINE void __set_IFSR(uint32_t ifsr)
{
__set_CP(15, 0, ifsr, 5, 0, 1);
}
#if (defined(__CORTEX_R) && (__CORTEX_R == 52U)) || \
defined(DOXYGEN)
/** \brief Get ISR
\return Interrupt Status Register value
*/
__STATIC_FORCEINLINE uint32_t __get_ISR(void)
{
uint32_t result;
__get_CP(15, 0, result, 12, 1, 0);
return result;
}
#endif
/** \brief Get CBAR
\return Configuration Base Address register value
*/
__STATIC_FORCEINLINE uint32_t __get_CBAR(void)
{
uint32_t result;
__get_CP(15, 4, result, 15, 0, 0);
return result;
}
/** \brief Set SCTLR
This function assigns the given value to the System Control Register.
\param [in] sctlr System Control Register value to set
*/
__STATIC_FORCEINLINE void __set_SCTLR(uint32_t sctlr)
{
__set_CP(15, 0, sctlr, 1, 0, 0);
}
/** \brief Get SCTLR
\return System Control Register value
*/
__STATIC_FORCEINLINE uint32_t __get_SCTLR(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 0);
return result;
}
/** \brief Set ACTRL
\param [in] actrl Auxiliary Control Register value to set
*/
__STATIC_FORCEINLINE void __set_ACTRL(uint32_t actrl)
{
__set_CP(15, 0, actrl, 1, 0, 1);
}
/** \brief Get ACTRL
\return Auxiliary Control Register value
*/
__STATIC_FORCEINLINE uint32_t __get_ACTRL(void)
{
uint32_t result;
__get_CP(15, 0, result, 1, 0, 1);
return result;
}
/** \brief Get MPIDR
This function returns the value of the Multiprocessor Affinity Register.
\return Multiprocessor Affinity Register value
*/
__STATIC_FORCEINLINE uint32_t __get_MPIDR(void)
{
uint32_t result;
__get_CP(15, 0, result, 0, 0, 5);
return result;
}
/** \brief Get VBAR
This function returns the value of the Vector Base Address Register.
\return Vector Base Address Register
*/
__STATIC_FORCEINLINE uint32_t __get_VBAR(void)
{
uint32_t result;
__get_CP(15, 0, result, 12, 0, 0);
return result;
}
/** \brief Set VBAR
This function assigns the given value to the Vector Base Address Register.
\param [in] vbar Vector Base Address Register value to set
*/
__STATIC_FORCEINLINE void __set_VBAR(uint32_t vbar)
{
__set_CP(15, 0, vbar, 12, 0, 0);
}
#if (defined(__CORTEX_R) && (__CORTEX_R == 52U) && \
defined(__TIM_PRESENT) && (__TIM_PRESENT == 1U)) || \
defined(DOXYGEN)
/** \brief Set CNTFRQ
This function assigns the given value to PL1 Physical Timer Counter Frequency Register (CNTFRQ).
\param [in] value CNTFRQ Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTFRQ(uint32_t value)
{
__set_CP(15, 0, value, 14, 0, 0);
}
/** \brief Get CNTFRQ
This function returns the value of the PL1 Physical Timer Counter Frequency Register (CNTFRQ).
\return CNTFRQ Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTFRQ(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 0 , 0);
return result;
}
/** \brief Set CNTP_TVAL
This function assigns the given value to PL1 Physical Timer Value Register (CNTP_TVAL).
\param [in] value CNTP_TVAL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_TVAL(uint32_t value)
{
__set_CP(15, 0, value, 14, 2, 0);
}
/** \brief Get CNTP_TVAL
This function returns the value of the PL1 Physical Timer Value Register (CNTP_TVAL).
\return CNTP_TVAL Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTP_TVAL(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 2, 0);
return result;
}
/** \brief Get CNTPCT
This function returns the value of the 64 bits PL1 Physical Count Register (CNTPCT).
\return CNTPCT Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTPCT(void)
{
uint64_t result;
__get_CP64(15, 0, result, 14);
return result;
}
/** \brief Set CNTP_CVAL
This function assigns the given value to 64bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\param [in] value CNTP_CVAL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_CVAL(uint64_t value)
{
__set_CP64(15, 2, value, 14);
}
/** \brief Get CNTP_CVAL
This function returns the value of the 64 bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\return CNTP_CVAL Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTP_CVAL(void)
{
uint64_t result;
__get_CP64(15, 2, result, 14);
return result;
}
/** \brief Set CNTP_CTL
This function assigns the given value to PL1 Physical Timer Control Register (CNTP_CTL).
\param [in] value CNTP_CTL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_CTL(uint32_t value)
{
__set_CP(15, 0, value, 14, 2, 1);
}
/** \brief Get CNTP_CTL register
\return CNTP_CTL Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CNTP_CTL(void)
{
uint32_t result;
__get_CP(15, 0, result, 14, 2, 1);
return result;
}
#endif
/** \brief Set BPIALL.
Branch Predictor Invalidate All
*/
__STATIC_FORCEINLINE void __set_BPIALL(uint32_t value)
{
__set_CP(15, 0, value, 7, 5, 6);
}
/** \brief Set ICIALLU
Instruction Cache Invalidate All
*/
__STATIC_FORCEINLINE void __set_ICIALLU(uint32_t value)
{
__set_CP(15, 0, value, 7, 5, 0);
}
/** \brief Set DCCMVAC
Data cache clean
*/
__STATIC_FORCEINLINE void __set_DCCMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 10, 1);
}
/** \brief Set DCIMVAC
Data cache invalidate
*/
__STATIC_FORCEINLINE void __set_DCIMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 6, 1);
}
/** \brief Set DCCIMVAC
Data cache clean and invalidate
*/
__STATIC_FORCEINLINE void __set_DCCIMVAC(uint32_t value)
{
__set_CP(15, 0, value, 7, 14, 1);
}
/** \brief Set CSSELR
*/
__STATIC_FORCEINLINE void __set_CSSELR(uint32_t value)
{
// __ASM volatile("MCR p15, 2, %0, c0, c0, 0" : : "r"(value) : "memory");
__set_CP(15, 2, value, 0, 0, 0);
}
/** \brief Get CSSELR
\return CSSELR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CSSELR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 2, %0, c0, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 2, result, 0, 0, 0);
return result;
}
/** \brief Set CCSIDR
\deprecated CCSIDR itself is read-only. Use __set_CSSELR to select cache level instead.
*/
CMSIS_DEPRECATED
__STATIC_FORCEINLINE void __set_CCSIDR(uint32_t value)
{
__set_CSSELR(value);
}
/** \brief Get CCSIDR
\return CCSIDR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CCSIDR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 1, %0, c0, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 1, result, 0, 0, 0);
return result;
}
/** \brief Get CLIDR
\return CLIDR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CLIDR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 1, %0, c0, c0, 1" : "=r"(result) : : "memory");
__get_CP(15, 1, result, 0, 0, 1);
return result;
}
/** \brief Set DCISW
*/
__STATIC_FORCEINLINE void __set_DCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c6, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 6, 2);
}
/** \brief Set DCCSW
*/
__STATIC_FORCEINLINE void __set_DCCSW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c10, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 10, 2);
}
/** \brief Set DCCISW
*/
__STATIC_FORCEINLINE void __set_DCCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c14, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 14, 2);
}
#endif

View file

@ -0,0 +1,814 @@
/**************************************************************************//**
* @file cmsis_gcc.h for CMSIS-Core(R)
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_GCC_H
#define __CMSIS_GCC_H
/* ignore some GCC warnings */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wunused-parameter"
/* Fallback for __has_builtin */
#ifndef __has_builtin
#define __has_builtin(x) (0)
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __attribute__((always_inline))
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed, aligned(1)))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_WRITE */
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT16_READ)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT16_READ */
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
/*lint -esym(9058, T_UINT32_WRITE)*/ /* disable MISRA 2012 Rule 2.4 for T_UINT32_WRITE */
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#pragma GCC diagnostic pop
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __COMPILER_BARRIER
#define __COMPILER_BARRIER() __ASM volatile("":::"memory")
#endif
__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
{
int32_t result;
__ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
{
int32_t result;
__ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
{
int32_t result;
__ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
/* ########################## Core Instruction Access ######################### */
/**
\brief No Operation
*/
#define __NOP() __ASM volatile ("nop")
/**
\brief Wait For Interrupt
*/
#define __WFI() __ASM volatile ("wfi":::"memory")
/**
\brief Wait For Event
*/
#define __WFE() __ASM volatile ("wfe":::"memory")
/**
\brief Send Event
*/
#define __SEV() __ASM volatile ("sev")
/**
\brief Instruction Synchronization Barrier
\details Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or memory,
after the instruction has been completed.
*/
__STATIC_FORCEINLINE void __ISB(void)
{
__ASM volatile ("isb 0xF":::"memory");
}
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__STATIC_FORCEINLINE void __DSB(void)
{
__ASM volatile ("dsb 0xF":::"memory");
}
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__STATIC_FORCEINLINE void __DMB(void)
{
__ASM volatile ("dmb 0xF":::"memory");
}
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
return __builtin_bswap32(value);
#else
uint32_t result;
__ASM ("rev %0, %1" : "=r" (result) : "r" (value) );
return result;
#endif
}
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value)
{
uint32_t result;
__ASM ("rev16 %0, %1" : "=r" (result) : "r" (value));
return result;
}
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
{
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
return (int16_t)__builtin_bswap16(value);
#else
int16_t result;
__ASM ("revsh %0, %1" : "=r" (result) : "r" (value) );
return result;
#endif
}
/**
\brief Rotate Right in unsigned value (32 bit)
\details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
op2 %= 32U;
if (op2 == 0U) {
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2));
}
/**
\brief Breakpoint
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
/**
\brief Reverse bit order of value
\details Reverses the bit order of the given value.
\param [in] value Value to reverse
\return Reversed value
*/
__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
{
uint32_t result;
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
__ASM ("rbit %0, %1" : "=r" (result) : "r" (value) );
#else
int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
result = value; /* r will be reversed bits of v; first get LSB of v */
for (value >>= 1U; value; value >>= 1U)
{
result <<= 1U;
result |= value & 1U;
s--;
}
result <<= s; /* shift when v's highest bits are zero */
#endif
return result;
}
/**
\brief Count leading zeros
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint8_t) result); /* Add explicit type cast here */
}
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr)
{
uint32_t result;
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
__ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
#else
/* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
accepted by assembler. So has to use following less efficient pattern.
*/
__ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
#endif
return ((uint16_t) result); /* Add explicit type cast here */
}
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
return(result);
}
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
{
uint32_t result;
__ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
{
uint32_t result;
__ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
return(result);
}
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
{
uint32_t result;
__ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
return(result);
}
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
__STATIC_FORCEINLINE void __CLREX(void)
{
__ASM volatile ("clrex" ::: "memory");
}
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT(ARG1, ARG2) \
__extension__ \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT(ARG1, ARG2) \
__extension__ \
({ \
uint32_t __RES, __ARG1 = (ARG1); \
__ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \
__RES; \
})
/* ########################### Core Function Access ########################### */
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __enable_irq(void)
{
__ASM volatile ("cpsie i" : : : "memory");
}
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __disable_irq(void)
{
__ASM volatile ("cpsid i" : : : "memory");
}
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_get_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
return __builtin_arm_get_fpscr();
#else
uint32_t result;
__ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
return(result);
#endif
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#if __has_builtin(__builtin_arm_set_fpscr)
// Re-enable using built-in when GCC has been fixed
// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2)
/* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */
__builtin_arm_set_fpscr(fpscr);
#else
__ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory");
#endif
#else
(void)fpscr;
#endif
}
/** \brief Get CPSR Register
\return CPSR Register value
*/
__STATIC_FORCEINLINE uint32_t __get_CPSR(void)
{
uint32_t result;
__ASM volatile("MRS %0, cpsr" : "=r" (result) );
return(result);
}
/** \brief Set CPSR Register
\param [in] cpsr CPSR value to set
*/
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
}
/** \brief Get Mode
\return Processor Mode
*/
__STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
\param [in] mode Mode value to set
*/
__STATIC_FORCEINLINE void __set_mode(uint32_t mode)
{
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
}
/** \brief Get Stack Pointer
\return Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP(void)
{
uint32_t result;
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
return result;
}
/** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP(uint32_t stack)
{
__ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
}
/** \brief Get USR/SYS Stack Pointer
\return USR/SYS Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
{
uint32_t cpsr = __get_CPSR();
uint32_t result;
__ASM volatile(
"CPS #0x1F \n"
"MOV %0, sp " : "=r"(result) : : "memory"
);
__set_CPSR(cpsr);
__ISB();
return result;
}
/** \brief Set USR/SYS Stack Pointer
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/
__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
{
uint32_t cpsr = __get_CPSR();
__ASM volatile(
"CPS #0x1F \n"
"MOV sp, %0 " : : "r" (topOfProcStack) : "memory"
);
__set_CPSR(cpsr);
__ISB();
}
/** \brief Get FPEXC
\return Floating Point Exception Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
uint32_t result;
__ASM volatile("VMRS %0, fpexc" : "=r" (result) );
return(result);
#else
return(0);
#endif
}
/** \brief Set FPEXC
\param [in] fpexc Floating Point Exception Control value to set
*/
__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
{
#if (__FPU_PRESENT == 1)
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
#endif
}
/*
* Include common core functions to access Coprocessor 15 registers
*/
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled
*/
__STATIC_INLINE void __FPU_Enable(void)
{
__ASM volatile(
//Permit access to VFP/NEON, registers by modifying CPACR
" MRC p15,0,R1,c1,c0,2 \n"
" ORR R1,R1,#0x00F00000 \n"
" MCR p15,0,R1,c1,c0,2 \n"
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
" ISB \n"
//Enable VFP/NEON
" VMRS R1,FPEXC \n"
" ORR R1,R1,#0x40000000 \n"
" VMSR FPEXC,R1 \n"
//Initialise VFP/NEON registers to 0
" MOV R2,#0 \n"
//Initialise D16 registers to 0
" VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n"
" VMOV D2, R2,R2 \n"
" VMOV D3, R2,R2 \n"
" VMOV D4, R2,R2 \n"
" VMOV D5, R2,R2 \n"
" VMOV D6, R2,R2 \n"
" VMOV D7, R2,R2 \n"
" VMOV D8, R2,R2 \n"
" VMOV D9, R2,R2 \n"
" VMOV D10,R2,R2 \n"
" VMOV D11,R2,R2 \n"
" VMOV D12,R2,R2 \n"
" VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n"
#if (defined(__ARM_NEON) && (__ARM_NEON == 1))
//Initialise D32 registers to 0
" VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n"
" VMOV D18,R2,R2 \n"
" VMOV D19,R2,R2 \n"
" VMOV D20,R2,R2 \n"
" VMOV D21,R2,R2 \n"
" VMOV D22,R2,R2 \n"
" VMOV D23,R2,R2 \n"
" VMOV D24,R2,R2 \n"
" VMOV D25,R2,R2 \n"
" VMOV D26,R2,R2 \n"
" VMOV D27,R2,R2 \n"
" VMOV D28,R2,R2 \n"
" VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n"
#endif
//Initialise FPSCR to a known state
" VMRS R1,FPSCR \n"
" LDR R2,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
" AND R1,R1,R2 \n"
" VMSR FPSCR,R1 "
: : : "cc", "r1", "r2"
);
}
#pragma GCC diagnostic pop
#endif /* __CMSIS_GCC_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,61 @@
/**************************************************************************//**
* @file core_cr4.h
* @brief CMSIS Cortex-R4 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CR4_H_GENERIC
#define __CORE_CR4_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#define __CORTEX_R 4U
/* Configuration of the Cortex-R4 Processor and Core Peripherals */
#ifndef __CR_REV
#define __CR_REV 0U
#endif
#ifndef __FPU_PRESENT
#define __FPU_PRESENT 0U
#endif
#define __GIC_PRESENT 0U
#define __TIM_PRESENT 0U
#define __MPU_PRESENT 1U
/* Include Cortex-R Common Peripheral Access Layer header */
#include "core_cr.h"
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CR4_H_GENERIC */

View file

@ -0,0 +1,61 @@
/**************************************************************************//**
* @file core_cr5.h
* @brief CMSIS Cortex-R5 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CR5_H_GENERIC
#define __CORE_CR5_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#define __CORTEX_R 5U
/* Configuration of the Cortex-R5 Processor and Core Peripherals */
#ifndef __CR_REV
#define __CR_REV 0U
#endif
#ifndef __FPU_PRESENT
#define __FPU_PRESENT 0U
#endif
#define __GIC_PRESENT 0U
#define __TIM_PRESENT 0U
#define __MPU_PRESENT 1U
/* Include Cortex-R Common Peripheral Access Layer header */
#include "core_cr.h"
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CR5_H_GENERIC */

View file

@ -0,0 +1,61 @@
/**************************************************************************//**
* @file core_cr52.h
* @brief CMSIS Cortex-R52 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CR52_H_GENERIC
#define __CORE_CR52_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#define __CORTEX_R 52U
/* Configuration of the Cortex-R52 Processor and Core Peripherals */
#ifndef __CR_REV
#define __CR_REV 0U
#endif
#ifndef __FPU_PRESENT
#define __FPU_PRESENT 1U
#endif
#define __GIC_PRESENT 1U
#define __TIM_PRESENT 1U
#define __MPU_PRESENT 1U
/* Include Cortex-R Common Peripheral Access Layer header */
#include "core_cr.h"
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CR52_H_GENERIC */

View file

@ -0,0 +1,61 @@
/**************************************************************************//**
* @file core_cr7.h
* @brief CMSIS Cortex-R7 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CR7_H_GENERIC
#define __CORE_CR7_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#define __CORTEX_R 7U
/* Configuration of the Cortex-R7 Processor and Core Peripherals */
#ifndef __CR_REV
#define __CR_REV 0U
#endif
#ifndef __FPU_PRESENT
#define __FPU_PRESENT 0U
#endif
#define __GIC_PRESENT 1U
#define __TIM_PRESENT 1U
#define __MPU_PRESENT 1U
/* Include Cortex-R Common Peripheral Access Layer header */
#include "core_cr.h"
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CR7_H_GENERIC */

View file

@ -0,0 +1,61 @@
/**************************************************************************//**
* @file core_cr8.h
* @brief CMSIS Cortex-R8 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 17. October 2019
******************************************************************************/
/*
* Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* Copyright (c) 2019 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CR8_H_GENERIC
#define __CORE_CR8_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
#define __CORTEX_R 8U
/* Configuration of the Cortex-R8 Processor and Core Peripherals */
#ifndef __CR_REV
#define __CR_REV 0U
#endif
#ifndef __FPU_PRESENT
#define __FPU_PRESENT 0U
#endif
#define __GIC_PRESENT 1U
#define __TIM_PRESENT 1U
#define __MPU_PRESENT 1U
/* Include Cortex-R Common Peripheral Access Layer header */
#include "core_cr.h"
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CR8_H_GENERIC */

View file

@ -4,10 +4,14 @@
config HAS_CMSIS_CORE
bool
select HAS_CMSIS_CORE_M if CPU_CORTEX_M
select HAS_CMSIS_CORE_R if CPU_CORTEX_R
if HAS_CMSIS_CORE
config HAS_CMSIS_CORE_M
bool
config HAS_CMSIS_CORE_R
bool
endif