From ffacae20d0d3c022be57d897cde590d3309960e2 Mon Sep 17 00:00:00 2001 From: Ricardo Salveti Date: Wed, 5 Oct 2016 19:43:36 -0300 Subject: [PATCH] arch/arm: add initial support for Cortex-M0/M0+ Not disabling SysTick as it is optional by the spec. SVC not used as there is no priority-based interrupt masking (only PendSV is used). Largely based on a previous work done by Euan Mutch . Jira: ZEP-783 Change-Id: I38e29bfcf0624c1aea5f9fd7a74230faa1b59e8b Signed-off-by: Ricardo Salveti --- arch/arm/Makefile | 8 ++ arch/arm/core/cortex_m/Kconfig | 27 +++++- arch/arm/core/cortex_m/reset.S | 4 + arch/arm/core/cortex_m/vector_table.S | 12 +++ arch/arm/core/cortex_m/vector_table.h | 2 + arch/arm/core/cpu_idle.S | 33 +++++-- arch/arm/core/exc_exit.S | 27 +++--- arch/arm/core/fault.c | 43 ++++++--- arch/arm/core/fault_s.S | 21 ++++ arch/arm/core/gdb_stub.S | 34 +++++-- arch/arm/core/isr_wrapper.S | 27 +++++- arch/arm/core/swap.S | 106 +++++++++++++++++++-- arch/arm/include/cortex_m/exc.h | 2 + include/arch/arm/cortex_m/asm_inline_gcc.h | 48 +++++----- include/arch/arm/cortex_m/gdb_stub.h | 6 +- include/arch/arm/cortex_m/memory_map.h | 2 +- include/arch/arm/cortex_m/nvic.h | 2 + include/arch/arm/cortex_m/scb.h | 5 + 18 files changed, 325 insertions(+), 84 deletions(-) diff --git a/arch/arm/Makefile b/arch/arm/Makefile index b0964e659ce..bc187eda169 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -27,6 +27,14 @@ endif cflags-cortex-m7 = $(call cc-option,-mabi=aapcs -mthumb -mcpu=cortex-m7) \ $(call cc-option,-mthumb -march=armv7e-m) +aflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0) +cflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0) +cxxflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0) + +aflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+) +cflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+) +cxxflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+) + aflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) cflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) cxxflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) diff --git a/arch/arm/core/cortex_m/Kconfig b/arch/arm/core/cortex_m/Kconfig index dcf2e6a9b39..09824736031 100644 --- a/arch/arm/core/cortex_m/Kconfig +++ b/arch/arm/core/cortex_m/Kconfig @@ -45,6 +45,15 @@ config ISA_THUMB2 technology is featured in the processor, and in all ARMv7 architecture-based processors. +config CPU_CORTEX_M0_M0PLUS + bool + # Omit prompt to signify "hidden" option + default n + select ATOMIC_OPERATIONS_C + select ISA_THUMB2 + help + This option signifies the use of either a Cortex-M0 or Cortex-M0+ CPU. + config CPU_CORTEX_M3_M4 bool # Omit prompt to signify "hidden" option @@ -54,6 +63,20 @@ config CPU_CORTEX_M3_M4 help This option signifies the use of either a Cortex-M3 or Cortex-M4 CPU. +config CPU_CORTEX_M0 + bool + # Omit prompt to signify "hidden" option + select CPU_CORTEX_M0_M0PLUS + help + This option signifies the use of a Cortex-M0 CPU + +config CPU_CORTEX_M0PLUS + bool + # Omit prompt to signify "hidden" option + select CPU_CORTEX_M0_M0PLUS + help + This option signifies the use of a Cortex-M0+ CPU + config CPU_CORTEX_M3 bool # Omit prompt to signify "hidden" option @@ -158,8 +181,8 @@ config FLASH_BASE_ADDRESS avoid modifying it via the menu configuration. endmenu -menu "ARM Cortex-M3/M4 options" - depends on CPU_CORTEX_M3_M4 +menu "ARM Cortex-M0/M0+/M3/M4 options" + depends on CPU_CORTEX_M0_M0PLUS || CPU_CORTEX_M3_M4 config IRQ_OFFLOAD bool "Enable IRQ offload" diff --git a/arch/arm/core/cortex_m/reset.S b/arch/arm/core/cortex_m/reset.S index f19c3349399..f980048059a 100644 --- a/arch/arm/core/cortex_m/reset.S +++ b/arch/arm/core/cortex_m/reset.S @@ -70,8 +70,12 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start) #endif /* lock interrupts: will get unlocked when switch to main task */ +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + cpsid i +#else /* CONFIG_CPU_CORTEX_M3_M4 */ movs.n r0, #_EXC_IRQ_DEFAULT_PRIO msr BASEPRI, r0 +#endif /* * Set PSP and use it to boot without using MSP, so that it diff --git a/arch/arm/core/cortex_m/vector_table.S b/arch/arm/core/cortex_m/vector_table.S index 246a90cee1d..bea6ee6e155 100644 --- a/arch/arm/core/cortex_m/vector_table.S +++ b/arch/arm/core/cortex_m/vector_table.S @@ -48,6 +48,17 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start) .word __nmi .word __hard_fault +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + .word __reserved + .word __reserved + .word __reserved + .word __reserved + .word __reserved + .word __reserved + .word __reserved + .word __reserved /* SVC not used for now (PendSV used instead) */ + .word __reserved +#else /* CONFIG_CPU_CORTEX_M3_M4 */ .word __mpu_fault .word __bus_fault .word __usage_fault @@ -57,6 +68,7 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start) .word __reserved .word __svc .word __debug_monitor +#endif .word __reserved .word __pendsv .word _timer_int_handler diff --git a/arch/arm/core/cortex_m/vector_table.h b/arch/arm/core/cortex_m/vector_table.h index 3199868dbea..be706e85e82 100644 --- a/arch/arm/core/cortex_m/vector_table.h +++ b/arch/arm/core/cortex_m/vector_table.h @@ -52,11 +52,13 @@ GTEXT(_vector_table) GTEXT(__reset) GTEXT(__nmi) GTEXT(__hard_fault) +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) GTEXT(__mpu_fault) GTEXT(__bus_fault) GTEXT(__usage_fault) GTEXT(__svc) GTEXT(__debug_monitor) +#endif GTEXT(__pendsv) GTEXT(__reserved) diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index 3cf36767db6..b0c30ba43ac 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -123,12 +123,17 @@ SECTION_FUNC(TEXT, nano_cpu_idle) #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP push {lr} bl _sys_k_event_logger_enter_sleep - pop {lr} + pop {r0} + mov lr, r0 #endif +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + cpsie i +#else /* CONFIG_CPU_CORTEX_M3_M4 */ /* clear BASEPRI so wfi is awakened by incoming interrupts */ eors.n r0, r0 msr BASEPRI, r0 +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ wfi @@ -164,16 +169,10 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle) #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP push {lr} bl _sys_k_event_logger_enter_sleep - pop {lr} + pop {r1} + mov lr, r1 #endif - /* - * r0: interrupt mask from caller - * r1: zero, for setting BASEPRI (needs a register) - */ - - eors.n r1, r1 - /* * Lock PRIMASK while sleeping: wfe will still get interrupted by incoming * interrupts but the CPU will not service them right away. @@ -185,6 +184,21 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle) * touched again. */ + /* r0: interrupt mask from caller */ + +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* No BASEPRI, call wfe directly (SEVONPEND set in _CpuIdleInit()) */ + wfe + + cmp r0, #0 + bne _irq_disabled + cpsie i +_irq_disabled: + +#else /* CONFIG_CPU_CORTEX_M3_M4 */ + /* r1: zero, for setting BASEPRI (needs a register) */ + eors.n r1, r1 + /* unlock BASEPRI so wfe gets interrupted by incoming interrupts */ msr BASEPRI, r1 @@ -192,4 +206,5 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle) msr BASEPRI, r0 cpsie i +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ bx lr diff --git a/arch/arm/core/exc_exit.S b/arch/arm/core/exc_exit.S index 1ac0aa6980a..b6f2a5285d3 100644 --- a/arch/arm/core/exc_exit.S +++ b/arch/arm/core/exc_exit.S @@ -16,7 +16,7 @@ /** * @file - * @brief ARM CORTEX-M3 exception/interrupt exit API + * @brief ARM CORTEX-M exception/interrupt exit API * * * Provides functions for performing kernel handling when exiting exceptions or @@ -44,10 +44,7 @@ GTEXT(_is_next_thread_current) #if CONFIG_GDB_INFO #define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub #else - _EXIT_EXC_IF_FIBER_PREEMPTED: .macro - it eq - bxeq lr - .endm + #define _EXIT_EXC_IF_FIBER_PREEMPTED beq _EXIT_EXC #endif #define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED @@ -104,26 +101,28 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit) /* coop thread ? do not schedule */ cmp r2, #0 - it lt - bxlt lr + blt _EXIT_EXC /* scheduler locked ? do not schedule */ cmp r3, #0 - it gt - bxgt lr + bgt _EXIT_EXC push {lr} blx _is_next_thread_current +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + pop {r1} + mov lr, r1 +#else pop {lr} +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ cmp r0, #0 - it ne - bxne lr - + bne _EXIT_EXC #else /* is the current thread preemptible (task) ? */ ldr r2, [r1, #__tNANO_flags_OFFSET] - ands.w r2, #PREEMPTIBLE + ldr r3, =PREEMPTIBLE + ands r2, r3 _EXIT_EXC_IF_FIBER_PREEMPTED /* is there a fiber ready ? */ @@ -142,4 +141,6 @@ _ExcExitWithGdbStub: _GDB_STUB_EXC_EXIT +_EXIT_EXC: + bx lr diff --git a/arch/arm/core/fault.c b/arch/arm/core/fault.c index b061ddfa694..8bc14a25bdd 100644 --- a/arch/arm/core/fault.c +++ b/arch/arm/core/fault.c @@ -64,13 +64,14 @@ */ void _FaultDump(const NANO_ESF *esf, int fault) { - int escalation = 0; - PR_EXC("Fault! EXC #%d, Thread: %p, instr @ 0x%" PRIx32 "\n", fault, sys_thread_self_get(), esf->pc); +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + int escalation = 0; + if (3 == fault) { /* hard fault */ escalation = _ScbHardFaultIsForced(); PR_EXC("HARD FAULT: %s\n", @@ -99,6 +100,7 @@ void _FaultDump(const NANO_ESF *esf, int fault) /* clear USFR sticky bits */ _ScbUsageFaultAllFaultsReset(); +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ } #endif @@ -119,6 +121,8 @@ static void _FaultThreadShow(const NANO_ESF *esf) esf->pc); } +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /** * * @brief Dump MPU fault information @@ -226,6 +230,21 @@ static void _UsageFault(const NANO_ESF *esf) _ScbUsageFaultAllFaultsReset(); } +/** + * + * @brief Dump debug monitor exception information + * + * See _FaultDump() for example. + * + * @return N/A + */ +static void _DebugMonitor(const NANO_ESF *esf) +{ + PR_EXC("***** Debug monitor exception (not implemented) *****\n"); +} + +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ + /** * * @brief Dump hard fault information @@ -237,6 +256,10 @@ static void _UsageFault(const NANO_ESF *esf) static void _HardFault(const NANO_ESF *esf) { PR_EXC("***** HARD FAULT *****\n"); + +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + _FaultThreadShow(esf); +#else /* CONFIG_CPU_CORTEX_M3_M4 */ if (_ScbHardFaultIsBusErrOnVectorRead()) { PR_EXC(" Bus fault on vector table read\n"); } else if (_ScbHardFaultIsForced()) { @@ -249,19 +272,7 @@ static void _HardFault(const NANO_ESF *esf) _UsageFault(esf); } } -} - -/** - * - * @brief Dump debug monitor exception information - * - * See _FaultDump() for example. - * - * @return N/A - */ -static void _DebugMonitor(const NANO_ESF *esf) -{ - PR_EXC("***** Debug monitor exception (not implemented) *****\n"); +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ } /** @@ -304,6 +315,7 @@ static void _FaultDump(const NANO_ESF *esf, int fault) case 3: _HardFault(esf); break; +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) case 4: _MpuFault(esf, 0); break; @@ -316,6 +328,7 @@ static void _FaultDump(const NANO_ESF *esf, int fault) case 12: _DebugMonitor(esf); break; +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ default: _ReservedException(esf, fault); break; diff --git a/arch/arm/core/fault_s.S b/arch/arm/core/fault_s.S index bb2577c2da1..cc09acb8842 100644 --- a/arch/arm/core/fault_s.S +++ b/arch/arm/core/fault_s.S @@ -32,10 +32,12 @@ _ASM_FILE_PROLOGUE GTEXT(_Fault) GTEXT(__hard_fault) +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) GTEXT(__mpu_fault) GTEXT(__bus_fault) GTEXT(__usage_fault) GTEXT(__debug_monitor) +#endif GTEXT(__reserved) /** @@ -62,14 +64,32 @@ GTEXT(__reserved) */ SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault) +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor) +#endif SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved) _GDB_STUB_EXC_ENTRY +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* force unlock interrupts */ + cpsie i + + /* Use EXC_RETURN state to find out if stack frame is on the MSP or PSP */ + ldr r0, =0x4 + mov r1, lr + tst r1, r0 + beq _stack_frame_msp + mrs r0, PSP + bne _stack_frame_endif +_stack_frame_msp: + mrs r0, MSP +_stack_frame_endif: + +#else /* CONFIG_CPU_CORTEX_M3_M4 */ /* force unlock interrupts */ eors.n r0, r0 msr BASEPRI, r0 @@ -84,6 +104,7 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved) * is a nested exception: the stack frame is on the MSP */ mrsne r0, PSP /* if not, we are returning to thread mode, thus this is * not a nested exception: the stack frame is on the PSP */ +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ push {lr} bl _Fault diff --git a/arch/arm/core/gdb_stub.S b/arch/arm/core/gdb_stub.S index c957681f885..fefcdda5e3f 100644 --- a/arch/arm/core/gdb_stub.S +++ b/arch/arm/core/gdb_stub.S @@ -65,11 +65,14 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry) ldr r2, [r1, #__tNANO_flags_OFFSET] /* already in an exception, do not update the registers */ - ands r3, r2, #EXC_ACTIVE - it ne - bxne lr + ldr r3, =EXC_ACTIVE + ands r3, r2 + beq _GdbStubEditReg + bx lr - orrs r2, #EXC_ACTIVE +_GdbStubEditReg: + ldr r3, =EXC_ACTIVE + orrs r2, r3 str r2, [r1, #__tNANO_flags_OFFSET] ldr r1, [r1, #__tNANO_current_OFFSET] str r2, [r1, #__tTCS_flags_OFFSET] @@ -77,7 +80,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry) /* save callee-saved + psp in TCS */ adds r1, #__tTCS_preempReg_OFFSET mrs ip, PSP +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* Store current r4-r7 */ + stmea r1!, {r4-r7} + /* copy r8-r12 into r3-r7 */ + mov r3, r8 + mov r4, r9 + mov r5, r10 + mov r6, r11 + mov r7, ip + /* store r8-12 */ + stmea r1!, {r3-r7} +#else /* CONFIG_CPU_CORTEX_M0_M0PLUS */ stmia r1, {v1-v8, ip} +#endif bx lr @@ -98,17 +114,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry) SECTION_FUNC(TEXT, _GdbStubExcExit) +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) /* if we're nested (ie. !RETTOBASE), do not reset EXC_ACTIVE */ ldr r1, =_SCS_ICSR ldr r1, [r1] ands r1, #_SCS_ICSR_RETTOBASE it eq bxeq lr +#endif ldr r1, =_nanokernel ldr r2, [r1, #__tNANO_flags_OFFSET] - bic r2, #EXC_ACTIVE + ldr r3, =EXC_ACTIVE + bics r2, r3 str r2, [r1, #__tNANO_flags_OFFSET] ldr r1, [r1, #__tNANO_current_OFFSET] str r2, [r1, #__tTCS_flags_OFFSET] @@ -138,11 +157,12 @@ SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub) _GDB_STUB_EXC_ENTRY mrs r0, IPSR /* get exception number */ - sub r0, r0, #16 /* get IRQ number */ + subs r0, #16 /* get IRQ number */ ldr r1, =_irq_vector_table /* grab real ISR at address: r1 + (r0 << 2) (table is 4-byte wide) */ - ldr r1, [r1, r0, LSL #2] + lsls r3, r0, #2 + ldr r1, [r1, r3] /* jump to ISR, no return: ISR is responsible for calling _IntExit */ bx r1 diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index 84d6d8ac069..dded16b4521 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -58,15 +58,11 @@ SECTION_FUNC(TEXT, _isr_wrapper) push {lr} /* lr is now the first item on the stack */ #ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT - push {lr} bl _sys_k_event_logger_interrupt - pop {lr} #endif #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP - push {lr} bl _sys_k_event_logger_exit_sleep - pop {lr} #endif #ifdef CONFIG_SYS_POWER_MANAGEMENT @@ -84,25 +80,46 @@ SECTION_FUNC(TEXT, _isr_wrapper) ldr r2, =_nanokernel ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */ cmp r0, #0 + +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + beq _idle_state_cleared + movs.n r1, #0 + str r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */ + blx _sys_power_save_idle_exit +_idle_state_cleared: + +#else ittt ne movne r1, #0 strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */ blxne _sys_power_save_idle_exit +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ cpsie i /* re-enable interrupts (PRIMASK = 0) */ #endif mrs r0, IPSR /* get exception number */ +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + ldr r1, =16 + subs r0, r1 /* get IRQ number */ + lsls r0, #3 /* table is 8-byte wide */ +#else sub r0, r0, #16 /* get IRQ number */ lsl r0, r0, #3 /* table is 8-byte wide */ +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ ldr r1, =_sw_isr_table add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay * in thumb mode */ - ldmia r1,{r0,r3} /* arg in r0, ISR in r3 */ + ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */ blx r3 /* call ISR */ +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + pop {r3} + mov lr, r3 +#else pop {lr} +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ /* exception return is done in _IntExit(), including _GDB_STUB_EXC_EXIT */ b _IntExit diff --git a/arch/arm/core/swap.S b/arch/arm/core/swap.S index 9f061e81dba..28e9e5efa98 100644 --- a/arch/arm/core/swap.S +++ b/arch/arm/core/swap.S @@ -32,7 +32,9 @@ _ASM_FILE_PROLOGUE GTEXT(_Swap) +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) GTEXT(__svc) +#endif GTEXT(__pendsv) #ifdef CONFIG_KERNEL_V2 GTEXT(_get_next_ready_thread) @@ -66,7 +68,8 @@ SECTION_FUNC(TEXT, __pendsv) /* Register the context switch */ push {lr} bl _sys_k_event_logger_context_switch - pop {lr} + pop {r0} + mov lr, r0 #endif /* load _Nanokernel into r1 and current tTCS into r2 */ @@ -74,16 +77,30 @@ SECTION_FUNC(TEXT, __pendsv) ldr r2, [r1, #__tNANO_current_OFFSET] /* addr of callee-saved regs in TCS in r0 */ - add r0, r2, #__tTCS_preempReg_OFFSET + ldr r0, =__tTCS_preempReg_OFFSET + add r0, r2 /* save callee-saved + psp in TCS */ mrs ip, PSP - stmia r0, {v1-v8, ip} +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* Store current r4-r7 */ + stmea r0!, {r4-r7} + /* copy r8-r12 into r3-r7 */ + mov r3, r8 + mov r4, r9 + mov r5, r10 + mov r6, r11 + mov r7, ip + /* store r8-12 */ + stmea r0!, {r3-r7} +#else + stmia r0, {v1-v8, ip} #ifdef CONFIG_FP_SHARING add r0, r2, #__tTCS_preemp_float_regs_OFFSET vstmia r0, {s16-s31} -#endif +#endif /* CONFIG_FP_SHARING */ +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ /* * Prepare to clear PendSV with interrupts unlocked, but @@ -96,8 +113,12 @@ SECTION_FUNC(TEXT, __pendsv) ldr v3, =_SCS_ICSR_UNPENDSV /* protect the kernel state while we play with the thread lists */ +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + cpsid i +#else /* CONFIG_CPU_CORTEX_M3_M4 */ movs.n r0, #_EXC_IRQ_DEFAULT_PRIO msr BASEPRI, r0 +#endif /* find out incoming thread (fiber or task) */ @@ -117,11 +138,23 @@ SECTION_FUNC(TEXT, __pendsv) * if so, remove fiber from list * else, the task is the thread we're switching in */ +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* branch over remove if eq otherwise we branch over switch */ + beq _switch_in_task + ldr r0, [r2, #__tTCS_link_OFFSET] /* then */ + str r0, [r1, #__tNANO_fiber_OFFSET] /* then */ + bne _switch_in_task_endif +_switch_in_task: + ldr r2, [r1, #__tNANO_task_OFFSET] /* else */ +_switch_in_task_endif: + +#else /* CONFIG_CPU_CORTEX_M3_M4 */ itte ne ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */ strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */ ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */ -#endif +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ +#endif /* CONFIG_KERNEL_V2 */ /* r2 contains the new thread */ #if !defined(CONFIG_KERNEL_V2) @@ -142,10 +175,41 @@ SECTION_FUNC(TEXT, __pendsv) /* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */ str v3, [v4, #0] - /* restore BASEPRI for the incoming thread */ + /* Restore previous interrupt disable state (irq_lock key) */ ldr r0, [r2, #__tTCS_basepri_OFFSET] - mov ip, #0 - str ip, [r2, #__tTCS_basepri_OFFSET] + movs.n r3, #0 + str r3, [r2, #__tTCS_basepri_OFFSET] + +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* BASEPRI not available, previous interrupt disable state + * maps to PRIMASK. + * + * Only enable interrupts if value is 0, meaning interrupts + * were enabled before irq_lock was called. + */ + cmp r0, #0 + bne _thread_irq_disabled + cpsie i +_thread_irq_disabled: + + ldr r4, =__tTCS_preempReg_OFFSET + adds r0, r2, r4 + + /* restore r4-r12 for new thread */ + /* first restore r8-r12 located after r4-r7 (4*4bytes) */ + adds r0, #16 + ldmia r0!, {r3-r7} + /* move to correct registers */ + mov r8, r3 + mov r9, r4 + mov r10, r5 + mov r11, r6 + mov ip, r7 + /* restore r4-r7, go back 9*4 bytes to the start of the stored block */ + subs r0, #36 + ldmia r0!, {r4-r7} +#else /* CONFIG_CPU_CORTEX_M3_M4 */ + /* restore BASEPRI for the incoming thread */ msr BASEPRI, r0 #ifdef CONFIG_FP_SHARING @@ -156,6 +220,8 @@ SECTION_FUNC(TEXT, __pendsv) /* load callee-saved + psp from TCS */ add r0, r2, #__tTCS_preempReg_OFFSET ldmia r0, {v1-v8, ip} +#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ + msr PSP, ip _GDB_STUB_EXC_EXIT @@ -163,6 +229,7 @@ SECTION_FUNC(TEXT, __pendsv) /* exc return */ bx lr +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) /** * * @brief Service call handler @@ -233,6 +300,7 @@ _context_switch: /* handler mode exit, to PendSV */ bx lr +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ /** * @@ -259,6 +327,9 @@ _context_switch: * outgoing thread. This is all performed by the hardware, which stores it in * its exception stack frame, created when handling the svc exception. * + * On Cortex-M0/M0+ the intlock key is represented by the PRIMASK register, + * as BASEPRI is not available. + * * @return may contain a return value setup by a call to fiberRtnValueSet() * * C function prototype: @@ -273,7 +344,26 @@ SECTION_FUNC(TEXT, _Swap) ldr r2, [r1, #__tNANO_current_OFFSET] str r0, [r2, #__tTCS_basepri_OFFSET] +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /* No priority-based interrupt masking on M0/M0+, + * pending PendSV is used instead of svc + */ + ldr r1, =_SCS_ICSR + ldr r2, =_SCS_ICSR_PENDSV + str r2, [r1, #0] + + /* Unlock interrupts to allow PendSV, since it's running at prio 0xff + * + * PendSV handler will be called if there are no other interrupts + * of a higher priority pending. + */ + cpsie i + + /* PC stored in stack frame by the hw */ + bx lr +#else /* CONFIG_CPU_CORTEX_M3_M4 */ svc #0 /* r0 contains the return value if needed */ bx lr +#endif diff --git a/arch/arm/include/cortex_m/exc.h b/arch/arm/include/cortex_m/exc.h index 2fb05e64db7..6157f61e798 100644 --- a/arch/arm/include/cortex_m/exc.h +++ b/arch/arm/include/cortex_m/exc.h @@ -72,6 +72,7 @@ static ALWAYS_INLINE int _IsInIsr(void) static ALWAYS_INLINE void _ExcSetup(void) { _ScbExcPrioSet(_EXC_PENDSV, _EXC_PRIO(0xff)); +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) _ScbExcPrioSet(_EXC_SVC, _EXC_PRIO(0x01)); _ScbExcPrioSet(_EXC_MPU_FAULT, _EXC_PRIO(0x01)); _ScbExcPrioSet(_EXC_BUS_FAULT, _EXC_PRIO(0x01)); @@ -80,6 +81,7 @@ static ALWAYS_INLINE void _ExcSetup(void) _ScbUsageFaultEnable(); _ScbBusFaultEnable(); _ScbMemFaultEnable(); +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ } #endif /* _ASMLANGUAGE */ diff --git a/include/arch/arm/cortex_m/asm_inline_gcc.h b/include/arch/arm/cortex_m/asm_inline_gcc.h index 85a4a1c9101..ccce5f08ef7 100644 --- a/include/arch/arm/cortex_m/asm_inline_gcc.h +++ b/include/arch/arm/cortex_m/asm_inline_gcc.h @@ -57,17 +57,11 @@ extern "C" { static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op) { - unsigned int bit; + if (!op) { + return 0; + } - __asm__ volatile( - "cmp %1, #0;\n\t" - "itt ne;\n\t" - " clzne %1, %1;\n\t" - " rsbne %0, %1, #32;\n\t" - : "=r"(bit) - : "r"(op)); - - return bit; + return 32 - __builtin_clz(op); } @@ -85,18 +79,7 @@ static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op) static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op) { - unsigned int bit; - - __asm__ volatile( - "rsb %0, %1, #0;\n\t" - "ands %0, %0, %1;\n\t" /* r0 = x & (-x): only LSB set */ - "itt ne;\n\t" - " clzne %0, %0;\n\t" /* count leading zeroes */ - " rsbne %0, %0, #32;\n\t" - : "=&r"(bit) - : "r"(op)); - - return bit; + return __builtin_ffs(op); } @@ -135,12 +118,21 @@ static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op) * * On Cortex-M3/M4, this function prevents exceptions of priority lower than * the two highest priorities from interrupting the CPU. + * + * On Cortex-M0/M0+, this function reads the value of PRIMASK which shows + * if interrupts are enabled, then disables all interrupts except NMI. + * */ static ALWAYS_INLINE unsigned int _arch_irq_lock(void) { unsigned int key; +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + __asm__ volatile("mrs %0, PRIMASK;\n\t" + "cpsid i;\n\t" + : "=r" (key)); +#else /* CONFIG_CPU_CORTEX_M3_M4 */ __asm__ volatile( "movs.n %%r1, %1;\n\t" "mrs %0, BASEPRI;\n\t" @@ -148,6 +140,7 @@ static ALWAYS_INLINE unsigned int _arch_irq_lock(void) : "=r"(key) : "i"(_EXC_IRQ_DEFAULT_PRIO) : "r1"); +#endif return key; } @@ -166,11 +159,22 @@ static ALWAYS_INLINE unsigned int _arch_irq_lock(void) * @param key architecture-dependent lock-out key * * @return N/A + * + * On Cortex-M0/M0+, this enables all interrupts if they were not + * previously disabled. + * */ static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key) { +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + if (key) { + return; + } + __asm__ volatile("cpsie i;\n\t"); +#else /* CONFIG_CPU_CORTEX_M3_M4 */ __asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key)); +#endif } diff --git a/include/arch/arm/cortex_m/gdb_stub.h b/include/arch/arm/cortex_m/gdb_stub.h index 37c30ae6d69..744a990be85 100644 --- a/include/arch/arm/cortex_m/gdb_stub.h +++ b/include/arch/arm/cortex_m/gdb_stub.h @@ -40,7 +40,8 @@ _GDB_STUB_EXC_ENTRY : .macro bl irq_lock bl _GdbStubExcEntry bl irq_unlock - pop {lr} + pop {r1} + move lr, r1 .endm GTEXT(_GdbStubExcExit) @@ -49,7 +50,8 @@ _GDB_STUB_EXC_EXIT : .macro bl irq_lock bl _GdbStubExcExit bl irq_unlock - pop {lr} + pop {r1} + move lr, r1 .endm GTEXT(_irq_vector_table_entry_with_gdb_stub) diff --git a/include/arch/arm/cortex_m/memory_map.h b/include/arch/arm/cortex_m/memory_map.h index 6f1f3da0d96..eb0edadf44d 100644 --- a/include/arch/arm/cortex_m/memory_map.h +++ b/include/arch/arm/cortex_m/memory_map.h @@ -57,7 +57,7 @@ /* 0xe0000000 -> 0xffffffff: varies by processor (see below) */ -#if defined(CONFIG_CPU_CORTEX_M3_M4) +#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) || defined(CONFIG_CPU_CORTEX_M3_M4) /* 0xe0000000 -> 0xe00fffff: private peripheral bus */ /* 0xe0000000 -> 0xe003ffff: internal [256KB] */ diff --git a/include/arch/arm/cortex_m/nvic.h b/include/arch/arm/cortex_m/nvic.h index 0ed2fbafb5f..64e63d739f8 100644 --- a/include/arch/arm/cortex_m/nvic.h +++ b/include/arch/arm/cortex_m/nvic.h @@ -204,6 +204,7 @@ static inline uint32_t _NvicIrqPrioGet(unsigned int irq) return __scs.nvic.ipr[irq]; } +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) /** * * @brief Trigger an interrupt via software @@ -224,6 +225,7 @@ static inline void _NvicSwInterruptTrigger(unsigned int irq) __scs.stir = irq; #endif } +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ #endif /* !_ASMLANGUAGE */ diff --git a/include/arch/arm/cortex_m/scb.h b/include/arch/arm/cortex_m/scb.h index 79f1f215cf2..6c5284cc7ca 100644 --- a/include/arch/arm/cortex_m/scb.h +++ b/include/arch/arm/cortex_m/scb.h @@ -597,6 +597,8 @@ static inline void _ScbExcPrioSet(uint8_t exc, uint8_t pri) __scs.scb.shpr[exc - 4] = pri; } +#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS) + /** * * @brief Enable usage fault exceptions @@ -1204,6 +1206,9 @@ static inline void _ScbUsageFaultAllFaultsReset(void) { __scs.scb.cfsr.byte.ufsr.val = 0xffff; } + +#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */ + #endif /* _ASMLANGUAGE */ #ifdef __cplusplus