arch/arm: add initial support for Cortex-M0/M0+

Not disabling SysTick as it is optional by the spec.

SVC not used as there is no priority-based interrupt masking (only
PendSV is used).

Largely based on a previous work done by Euan Mutch <euan@abelon.com>.

Jira: ZEP-783

Change-Id: I38e29bfcf0624c1aea5f9fd7a74230faa1b59e8b
Signed-off-by: Ricardo Salveti <ricardo.salveti@linaro.org>
This commit is contained in:
Ricardo Salveti 2016-10-05 19:43:36 -03:00 committed by Anas Nashif
commit ffacae20d0
18 changed files with 325 additions and 84 deletions

View file

@ -27,6 +27,14 @@ endif
cflags-cortex-m7 = $(call cc-option,-mabi=aapcs -mthumb -mcpu=cortex-m7) \ cflags-cortex-m7 = $(call cc-option,-mabi=aapcs -mthumb -mcpu=cortex-m7) \
$(call cc-option,-mthumb -march=armv7e-m) $(call cc-option,-mthumb -march=armv7e-m)
aflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0)
cflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0)
cxxflags-$(CONFIG_CPU_CORTEX_M0) += $(cflags-cortex-m0)
aflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+)
cflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+)
cxxflags-$(CONFIG_CPU_CORTEX_M0PLUS) += $(cflags-cortex-m0+)
aflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) aflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3)
cflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) cflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3)
cxxflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3) cxxflags-$(CONFIG_CPU_CORTEX_M3) += $(cflags-cortex-m3)

View file

@ -45,6 +45,15 @@ config ISA_THUMB2
technology is featured in the processor, and in all ARMv7 technology is featured in the processor, and in all ARMv7
architecture-based processors. architecture-based processors.
config CPU_CORTEX_M0_M0PLUS
bool
# Omit prompt to signify "hidden" option
default n
select ATOMIC_OPERATIONS_C
select ISA_THUMB2
help
This option signifies the use of either a Cortex-M0 or Cortex-M0+ CPU.
config CPU_CORTEX_M3_M4 config CPU_CORTEX_M3_M4
bool bool
# Omit prompt to signify "hidden" option # Omit prompt to signify "hidden" option
@ -54,6 +63,20 @@ config CPU_CORTEX_M3_M4
help help
This option signifies the use of either a Cortex-M3 or Cortex-M4 CPU. This option signifies the use of either a Cortex-M3 or Cortex-M4 CPU.
config CPU_CORTEX_M0
bool
# Omit prompt to signify "hidden" option
select CPU_CORTEX_M0_M0PLUS
help
This option signifies the use of a Cortex-M0 CPU
config CPU_CORTEX_M0PLUS
bool
# Omit prompt to signify "hidden" option
select CPU_CORTEX_M0_M0PLUS
help
This option signifies the use of a Cortex-M0+ CPU
config CPU_CORTEX_M3 config CPU_CORTEX_M3
bool bool
# Omit prompt to signify "hidden" option # Omit prompt to signify "hidden" option
@ -158,8 +181,8 @@ config FLASH_BASE_ADDRESS
avoid modifying it via the menu configuration. avoid modifying it via the menu configuration.
endmenu endmenu
menu "ARM Cortex-M3/M4 options" menu "ARM Cortex-M0/M0+/M3/M4 options"
depends on CPU_CORTEX_M3_M4 depends on CPU_CORTEX_M0_M0PLUS || CPU_CORTEX_M3_M4
config IRQ_OFFLOAD config IRQ_OFFLOAD
bool "Enable IRQ offload" bool "Enable IRQ offload"

View file

@ -70,8 +70,12 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
#endif #endif
/* lock interrupts: will get unlocked when switch to main task */ /* lock interrupts: will get unlocked when switch to main task */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
cpsid i
#else /* CONFIG_CPU_CORTEX_M3_M4 */
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0 msr BASEPRI, r0
#endif
/* /*
* Set PSP and use it to boot without using MSP, so that it * Set PSP and use it to boot without using MSP, so that it

View file

@ -48,6 +48,17 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start)
.word __nmi .word __nmi
.word __hard_fault .word __hard_fault
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved
.word __reserved /* SVC not used for now (PendSV used instead) */
.word __reserved
#else /* CONFIG_CPU_CORTEX_M3_M4 */
.word __mpu_fault .word __mpu_fault
.word __bus_fault .word __bus_fault
.word __usage_fault .word __usage_fault
@ -57,6 +68,7 @@ SECTION_SUBSEC_FUNC(exc_vector_table,_vector_table_section,__start)
.word __reserved .word __reserved
.word __svc .word __svc
.word __debug_monitor .word __debug_monitor
#endif
.word __reserved .word __reserved
.word __pendsv .word __pendsv
.word _timer_int_handler .word _timer_int_handler

View file

@ -52,11 +52,13 @@ GTEXT(_vector_table)
GTEXT(__reset) GTEXT(__reset)
GTEXT(__nmi) GTEXT(__nmi)
GTEXT(__hard_fault) GTEXT(__hard_fault)
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
GTEXT(__mpu_fault) GTEXT(__mpu_fault)
GTEXT(__bus_fault) GTEXT(__bus_fault)
GTEXT(__usage_fault) GTEXT(__usage_fault)
GTEXT(__svc) GTEXT(__svc)
GTEXT(__debug_monitor) GTEXT(__debug_monitor)
#endif
GTEXT(__pendsv) GTEXT(__pendsv)
GTEXT(__reserved) GTEXT(__reserved)

View file

@ -123,12 +123,17 @@ SECTION_FUNC(TEXT, nano_cpu_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push {lr} push {lr}
bl _sys_k_event_logger_enter_sleep bl _sys_k_event_logger_enter_sleep
pop {lr} pop {r0}
mov lr, r0
#endif #endif
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
cpsie i
#else /* CONFIG_CPU_CORTEX_M3_M4 */
/* clear BASEPRI so wfi is awakened by incoming interrupts */ /* clear BASEPRI so wfi is awakened by incoming interrupts */
eors.n r0, r0 eors.n r0, r0
msr BASEPRI, r0 msr BASEPRI, r0
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
wfi wfi
@ -164,16 +169,10 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push {lr} push {lr}
bl _sys_k_event_logger_enter_sleep bl _sys_k_event_logger_enter_sleep
pop {lr} pop {r1}
mov lr, r1
#endif #endif
/*
* r0: interrupt mask from caller
* r1: zero, for setting BASEPRI (needs a register)
*/
eors.n r1, r1
/* /*
* Lock PRIMASK while sleeping: wfe will still get interrupted by incoming * Lock PRIMASK while sleeping: wfe will still get interrupted by incoming
* interrupts but the CPU will not service them right away. * interrupts but the CPU will not service them right away.
@ -185,6 +184,21 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
* touched again. * touched again.
*/ */
/* r0: interrupt mask from caller */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* No BASEPRI, call wfe directly (SEVONPEND set in _CpuIdleInit()) */
wfe
cmp r0, #0
bne _irq_disabled
cpsie i
_irq_disabled:
#else /* CONFIG_CPU_CORTEX_M3_M4 */
/* r1: zero, for setting BASEPRI (needs a register) */
eors.n r1, r1
/* unlock BASEPRI so wfe gets interrupted by incoming interrupts */ /* unlock BASEPRI so wfe gets interrupted by incoming interrupts */
msr BASEPRI, r1 msr BASEPRI, r1
@ -192,4 +206,5 @@ SECTION_FUNC(TEXT, nano_cpu_atomic_idle)
msr BASEPRI, r0 msr BASEPRI, r0
cpsie i cpsie i
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
bx lr bx lr

View file

@ -16,7 +16,7 @@
/** /**
* @file * @file
* @brief ARM CORTEX-M3 exception/interrupt exit API * @brief ARM CORTEX-M exception/interrupt exit API
* *
* *
* Provides functions for performing kernel handling when exiting exceptions or * Provides functions for performing kernel handling when exiting exceptions or
@ -44,10 +44,7 @@ GTEXT(_is_next_thread_current)
#if CONFIG_GDB_INFO #if CONFIG_GDB_INFO
#define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub #define _EXIT_EXC_IF_FIBER_PREEMPTED beq _ExcExitWithGdbStub
#else #else
_EXIT_EXC_IF_FIBER_PREEMPTED: .macro #define _EXIT_EXC_IF_FIBER_PREEMPTED beq _EXIT_EXC
it eq
bxeq lr
.endm
#endif #endif
#define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED #define _EXIT_EXC_IF_FIBER_NOT_READY _EXIT_EXC_IF_FIBER_PREEMPTED
@ -104,26 +101,28 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, _ExcExit)
/* coop thread ? do not schedule */ /* coop thread ? do not schedule */
cmp r2, #0 cmp r2, #0
it lt blt _EXIT_EXC
bxlt lr
/* scheduler locked ? do not schedule */ /* scheduler locked ? do not schedule */
cmp r3, #0 cmp r3, #0
it gt bgt _EXIT_EXC
bxgt lr
push {lr} push {lr}
blx _is_next_thread_current blx _is_next_thread_current
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
pop {r1}
mov lr, r1
#else
pop {lr} pop {lr}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
cmp r0, #0 cmp r0, #0
it ne bne _EXIT_EXC
bxne lr
#else #else
/* is the current thread preemptible (task) ? */ /* is the current thread preemptible (task) ? */
ldr r2, [r1, #__tNANO_flags_OFFSET] ldr r2, [r1, #__tNANO_flags_OFFSET]
ands.w r2, #PREEMPTIBLE ldr r3, =PREEMPTIBLE
ands r2, r3
_EXIT_EXC_IF_FIBER_PREEMPTED _EXIT_EXC_IF_FIBER_PREEMPTED
/* is there a fiber ready ? */ /* is there a fiber ready ? */
@ -142,4 +141,6 @@ _ExcExitWithGdbStub:
_GDB_STUB_EXC_EXIT _GDB_STUB_EXC_EXIT
_EXIT_EXC:
bx lr bx lr

View file

@ -64,13 +64,14 @@
*/ */
void _FaultDump(const NANO_ESF *esf, int fault) void _FaultDump(const NANO_ESF *esf, int fault)
{ {
int escalation = 0;
PR_EXC("Fault! EXC #%d, Thread: %p, instr @ 0x%" PRIx32 "\n", PR_EXC("Fault! EXC #%d, Thread: %p, instr @ 0x%" PRIx32 "\n",
fault, fault,
sys_thread_self_get(), sys_thread_self_get(),
esf->pc); esf->pc);
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
int escalation = 0;
if (3 == fault) { /* hard fault */ if (3 == fault) { /* hard fault */
escalation = _ScbHardFaultIsForced(); escalation = _ScbHardFaultIsForced();
PR_EXC("HARD FAULT: %s\n", PR_EXC("HARD FAULT: %s\n",
@ -99,6 +100,7 @@ void _FaultDump(const NANO_ESF *esf, int fault)
/* clear USFR sticky bits */ /* clear USFR sticky bits */
_ScbUsageFaultAllFaultsReset(); _ScbUsageFaultAllFaultsReset();
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
} }
#endif #endif
@ -119,6 +121,8 @@ static void _FaultThreadShow(const NANO_ESF *esf)
esf->pc); esf->pc);
} }
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/** /**
* *
* @brief Dump MPU fault information * @brief Dump MPU fault information
@ -226,6 +230,21 @@ static void _UsageFault(const NANO_ESF *esf)
_ScbUsageFaultAllFaultsReset(); _ScbUsageFaultAllFaultsReset();
} }
/**
*
* @brief Dump debug monitor exception information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
}
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
/** /**
* *
* @brief Dump hard fault information * @brief Dump hard fault information
@ -237,6 +256,10 @@ static void _UsageFault(const NANO_ESF *esf)
static void _HardFault(const NANO_ESF *esf) static void _HardFault(const NANO_ESF *esf)
{ {
PR_EXC("***** HARD FAULT *****\n"); PR_EXC("***** HARD FAULT *****\n");
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
_FaultThreadShow(esf);
#else /* CONFIG_CPU_CORTEX_M3_M4 */
if (_ScbHardFaultIsBusErrOnVectorRead()) { if (_ScbHardFaultIsBusErrOnVectorRead()) {
PR_EXC(" Bus fault on vector table read\n"); PR_EXC(" Bus fault on vector table read\n");
} else if (_ScbHardFaultIsForced()) { } else if (_ScbHardFaultIsForced()) {
@ -249,19 +272,7 @@ static void _HardFault(const NANO_ESF *esf)
_UsageFault(esf); _UsageFault(esf);
} }
} }
} #endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
/**
*
* @brief Dump debug monitor exception information
*
* See _FaultDump() for example.
*
* @return N/A
*/
static void _DebugMonitor(const NANO_ESF *esf)
{
PR_EXC("***** Debug monitor exception (not implemented) *****\n");
} }
/** /**
@ -304,6 +315,7 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
case 3: case 3:
_HardFault(esf); _HardFault(esf);
break; break;
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
case 4: case 4:
_MpuFault(esf, 0); _MpuFault(esf, 0);
break; break;
@ -316,6 +328,7 @@ static void _FaultDump(const NANO_ESF *esf, int fault)
case 12: case 12:
_DebugMonitor(esf); _DebugMonitor(esf);
break; break;
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
default: default:
_ReservedException(esf, fault); _ReservedException(esf, fault);
break; break;

View file

@ -32,10 +32,12 @@ _ASM_FILE_PROLOGUE
GTEXT(_Fault) GTEXT(_Fault)
GTEXT(__hard_fault) GTEXT(__hard_fault)
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
GTEXT(__mpu_fault) GTEXT(__mpu_fault)
GTEXT(__bus_fault) GTEXT(__bus_fault)
GTEXT(__usage_fault) GTEXT(__usage_fault)
GTEXT(__debug_monitor) GTEXT(__debug_monitor)
#endif
GTEXT(__reserved) GTEXT(__reserved)
/** /**
@ -62,14 +64,32 @@ GTEXT(__reserved)
*/ */
SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__hard_fault)
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__mpu_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__bus_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault) SECTION_SUBSEC_FUNC(TEXT,__fault,__usage_fault)
SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor) SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor)
#endif
SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved) SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved)
_GDB_STUB_EXC_ENTRY _GDB_STUB_EXC_ENTRY
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* force unlock interrupts */
cpsie i
/* Use EXC_RETURN state to find out if stack frame is on the MSP or PSP */
ldr r0, =0x4
mov r1, lr
tst r1, r0
beq _stack_frame_msp
mrs r0, PSP
bne _stack_frame_endif
_stack_frame_msp:
mrs r0, MSP
_stack_frame_endif:
#else /* CONFIG_CPU_CORTEX_M3_M4 */
/* force unlock interrupts */ /* force unlock interrupts */
eors.n r0, r0 eors.n r0, r0
msr BASEPRI, r0 msr BASEPRI, r0
@ -84,6 +104,7 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved)
* is a nested exception: the stack frame is on the MSP */ * is a nested exception: the stack frame is on the MSP */
mrsne r0, PSP /* if not, we are returning to thread mode, thus this is mrsne r0, PSP /* if not, we are returning to thread mode, thus this is
* not a nested exception: the stack frame is on the PSP */ * not a nested exception: the stack frame is on the PSP */
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
push {lr} push {lr}
bl _Fault bl _Fault

View file

@ -65,11 +65,14 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
ldr r2, [r1, #__tNANO_flags_OFFSET] ldr r2, [r1, #__tNANO_flags_OFFSET]
/* already in an exception, do not update the registers */ /* already in an exception, do not update the registers */
ands r3, r2, #EXC_ACTIVE ldr r3, =EXC_ACTIVE
it ne ands r3, r2
bxne lr beq _GdbStubEditReg
bx lr
orrs r2, #EXC_ACTIVE _GdbStubEditReg:
ldr r3, =EXC_ACTIVE
orrs r2, r3
str r2, [r1, #__tNANO_flags_OFFSET] str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET] ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tTCS_flags_OFFSET] str r2, [r1, #__tTCS_flags_OFFSET]
@ -77,7 +80,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
/* save callee-saved + psp in TCS */ /* save callee-saved + psp in TCS */
adds r1, #__tTCS_preempReg_OFFSET adds r1, #__tTCS_preempReg_OFFSET
mrs ip, PSP mrs ip, PSP
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* Store current r4-r7 */
stmea r1!, {r4-r7}
/* copy r8-r12 into r3-r7 */
mov r3, r8
mov r4, r9
mov r5, r10
mov r6, r11
mov r7, ip
/* store r8-12 */
stmea r1!, {r3-r7}
#else /* CONFIG_CPU_CORTEX_M0_M0PLUS */
stmia r1, {v1-v8, ip} stmia r1, {v1-v8, ip}
#endif
bx lr bx lr
@ -98,17 +114,20 @@ SECTION_FUNC(TEXT, _GdbStubExcEntry)
SECTION_FUNC(TEXT, _GdbStubExcExit) SECTION_FUNC(TEXT, _GdbStubExcExit)
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* if we're nested (ie. !RETTOBASE), do not reset EXC_ACTIVE */ /* if we're nested (ie. !RETTOBASE), do not reset EXC_ACTIVE */
ldr r1, =_SCS_ICSR ldr r1, =_SCS_ICSR
ldr r1, [r1] ldr r1, [r1]
ands r1, #_SCS_ICSR_RETTOBASE ands r1, #_SCS_ICSR_RETTOBASE
it eq it eq
bxeq lr bxeq lr
#endif
ldr r1, =_nanokernel ldr r1, =_nanokernel
ldr r2, [r1, #__tNANO_flags_OFFSET] ldr r2, [r1, #__tNANO_flags_OFFSET]
bic r2, #EXC_ACTIVE ldr r3, =EXC_ACTIVE
bics r2, r3
str r2, [r1, #__tNANO_flags_OFFSET] str r2, [r1, #__tNANO_flags_OFFSET]
ldr r1, [r1, #__tNANO_current_OFFSET] ldr r1, [r1, #__tNANO_current_OFFSET]
str r2, [r1, #__tTCS_flags_OFFSET] str r2, [r1, #__tTCS_flags_OFFSET]
@ -138,11 +157,12 @@ SECTION_FUNC(TEXT, _irq_vector_table_entry_with_gdb_stub)
_GDB_STUB_EXC_ENTRY _GDB_STUB_EXC_ENTRY
mrs r0, IPSR /* get exception number */ mrs r0, IPSR /* get exception number */
sub r0, r0, #16 /* get IRQ number */ subs r0, #16 /* get IRQ number */
ldr r1, =_irq_vector_table ldr r1, =_irq_vector_table
/* grab real ISR at address: r1 + (r0 << 2) (table is 4-byte wide) */ /* grab real ISR at address: r1 + (r0 << 2) (table is 4-byte wide) */
ldr r1, [r1, r0, LSL #2] lsls r3, r0, #2
ldr r1, [r1, r3]
/* jump to ISR, no return: ISR is responsible for calling _IntExit */ /* jump to ISR, no return: ISR is responsible for calling _IntExit */
bx r1 bx r1

View file

@ -58,15 +58,11 @@ SECTION_FUNC(TEXT, _isr_wrapper)
push {lr} /* lr is now the first item on the stack */ push {lr} /* lr is now the first item on the stack */
#ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT #ifdef CONFIG_KERNEL_EVENT_LOGGER_INTERRUPT
push {lr}
bl _sys_k_event_logger_interrupt bl _sys_k_event_logger_interrupt
pop {lr}
#endif #endif
#ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP #ifdef CONFIG_KERNEL_EVENT_LOGGER_SLEEP
push {lr}
bl _sys_k_event_logger_exit_sleep bl _sys_k_event_logger_exit_sleep
pop {lr}
#endif #endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT #ifdef CONFIG_SYS_POWER_MANAGEMENT
@ -84,25 +80,46 @@ SECTION_FUNC(TEXT, _isr_wrapper)
ldr r2, =_nanokernel ldr r2, =_nanokernel
ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */ ldr r0, [r2, #__tNANO_idle_OFFSET] /* requested idle duration, in ticks */
cmp r0, #0 cmp r0, #0
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
beq _idle_state_cleared
movs.n r1, #0
str r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
blx _sys_power_save_idle_exit
_idle_state_cleared:
#else
ittt ne ittt ne
movne r1, #0 movne r1, #0
strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */ strne r1, [r2, #__tNANO_idle_OFFSET] /* clear kernel idle state */
blxne _sys_power_save_idle_exit blxne _sys_power_save_idle_exit
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
cpsie i /* re-enable interrupts (PRIMASK = 0) */ cpsie i /* re-enable interrupts (PRIMASK = 0) */
#endif #endif
mrs r0, IPSR /* get exception number */ mrs r0, IPSR /* get exception number */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
ldr r1, =16
subs r0, r1 /* get IRQ number */
lsls r0, #3 /* table is 8-byte wide */
#else
sub r0, r0, #16 /* get IRQ number */ sub r0, r0, #16 /* get IRQ number */
lsl r0, r0, #3 /* table is 8-byte wide */ lsl r0, r0, #3 /* table is 8-byte wide */
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
ldr r1, =_sw_isr_table ldr r1, =_sw_isr_table
add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay
* in thumb mode */ * in thumb mode */
ldmia r1,{r0,r3} /* arg in r0, ISR in r3 */ ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */
blx r3 /* call ISR */ blx r3 /* call ISR */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
pop {r3}
mov lr, r3
#else
pop {lr} pop {lr}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
/* exception return is done in _IntExit(), including _GDB_STUB_EXC_EXIT */ /* exception return is done in _IntExit(), including _GDB_STUB_EXC_EXIT */
b _IntExit b _IntExit

View file

@ -32,7 +32,9 @@
_ASM_FILE_PROLOGUE _ASM_FILE_PROLOGUE
GTEXT(_Swap) GTEXT(_Swap)
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
GTEXT(__svc) GTEXT(__svc)
#endif
GTEXT(__pendsv) GTEXT(__pendsv)
#ifdef CONFIG_KERNEL_V2 #ifdef CONFIG_KERNEL_V2
GTEXT(_get_next_ready_thread) GTEXT(_get_next_ready_thread)
@ -66,7 +68,8 @@ SECTION_FUNC(TEXT, __pendsv)
/* Register the context switch */ /* Register the context switch */
push {lr} push {lr}
bl _sys_k_event_logger_context_switch bl _sys_k_event_logger_context_switch
pop {lr} pop {r0}
mov lr, r0
#endif #endif
/* load _Nanokernel into r1 and current tTCS into r2 */ /* load _Nanokernel into r1 and current tTCS into r2 */
@ -74,16 +77,30 @@ SECTION_FUNC(TEXT, __pendsv)
ldr r2, [r1, #__tNANO_current_OFFSET] ldr r2, [r1, #__tNANO_current_OFFSET]
/* addr of callee-saved regs in TCS in r0 */ /* addr of callee-saved regs in TCS in r0 */
add r0, r2, #__tTCS_preempReg_OFFSET ldr r0, =__tTCS_preempReg_OFFSET
add r0, r2
/* save callee-saved + psp in TCS */ /* save callee-saved + psp in TCS */
mrs ip, PSP mrs ip, PSP
stmia r0, {v1-v8, ip}
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* Store current r4-r7 */
stmea r0!, {r4-r7}
/* copy r8-r12 into r3-r7 */
mov r3, r8
mov r4, r9
mov r5, r10
mov r6, r11
mov r7, ip
/* store r8-12 */
stmea r0!, {r3-r7}
#else
stmia r0, {v1-v8, ip}
#ifdef CONFIG_FP_SHARING #ifdef CONFIG_FP_SHARING
add r0, r2, #__tTCS_preemp_float_regs_OFFSET add r0, r2, #__tTCS_preemp_float_regs_OFFSET
vstmia r0, {s16-s31} vstmia r0, {s16-s31}
#endif #endif /* CONFIG_FP_SHARING */
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
/* /*
* Prepare to clear PendSV with interrupts unlocked, but * Prepare to clear PendSV with interrupts unlocked, but
@ -96,8 +113,12 @@ SECTION_FUNC(TEXT, __pendsv)
ldr v3, =_SCS_ICSR_UNPENDSV ldr v3, =_SCS_ICSR_UNPENDSV
/* protect the kernel state while we play with the thread lists */ /* protect the kernel state while we play with the thread lists */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
cpsid i
#else /* CONFIG_CPU_CORTEX_M3_M4 */
movs.n r0, #_EXC_IRQ_DEFAULT_PRIO movs.n r0, #_EXC_IRQ_DEFAULT_PRIO
msr BASEPRI, r0 msr BASEPRI, r0
#endif
/* find out incoming thread (fiber or task) */ /* find out incoming thread (fiber or task) */
@ -117,11 +138,23 @@ SECTION_FUNC(TEXT, __pendsv)
* if so, remove fiber from list * if so, remove fiber from list
* else, the task is the thread we're switching in * else, the task is the thread we're switching in
*/ */
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* branch over remove if eq otherwise we branch over switch */
beq _switch_in_task
ldr r0, [r2, #__tTCS_link_OFFSET] /* then */
str r0, [r1, #__tNANO_fiber_OFFSET] /* then */
bne _switch_in_task_endif
_switch_in_task:
ldr r2, [r1, #__tNANO_task_OFFSET] /* else */
_switch_in_task_endif:
#else /* CONFIG_CPU_CORTEX_M3_M4 */
itte ne itte ne
ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */ ldrne.w r0, [r2, #__tTCS_link_OFFSET] /* then */
strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */ strne.w r0, [r1, #__tNANO_fiber_OFFSET] /* then */
ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */ ldreq.w r2, [r1, #__tNANO_task_OFFSET] /* else */
#endif #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
#endif /* CONFIG_KERNEL_V2 */
/* r2 contains the new thread */ /* r2 contains the new thread */
#if !defined(CONFIG_KERNEL_V2) #if !defined(CONFIG_KERNEL_V2)
@ -142,10 +175,41 @@ SECTION_FUNC(TEXT, __pendsv)
/* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */ /* _SCS_ICSR is still in v4 and _SCS_ICSR_UNPENDSV in v3 */
str v3, [v4, #0] str v3, [v4, #0]
/* restore BASEPRI for the incoming thread */ /* Restore previous interrupt disable state (irq_lock key) */
ldr r0, [r2, #__tTCS_basepri_OFFSET] ldr r0, [r2, #__tTCS_basepri_OFFSET]
mov ip, #0 movs.n r3, #0
str ip, [r2, #__tTCS_basepri_OFFSET] str r3, [r2, #__tTCS_basepri_OFFSET]
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* BASEPRI not available, previous interrupt disable state
* maps to PRIMASK.
*
* Only enable interrupts if value is 0, meaning interrupts
* were enabled before irq_lock was called.
*/
cmp r0, #0
bne _thread_irq_disabled
cpsie i
_thread_irq_disabled:
ldr r4, =__tTCS_preempReg_OFFSET
adds r0, r2, r4
/* restore r4-r12 for new thread */
/* first restore r8-r12 located after r4-r7 (4*4bytes) */
adds r0, #16
ldmia r0!, {r3-r7}
/* move to correct registers */
mov r8, r3
mov r9, r4
mov r10, r5
mov r11, r6
mov ip, r7
/* restore r4-r7, go back 9*4 bytes to the start of the stored block */
subs r0, #36
ldmia r0!, {r4-r7}
#else /* CONFIG_CPU_CORTEX_M3_M4 */
/* restore BASEPRI for the incoming thread */
msr BASEPRI, r0 msr BASEPRI, r0
#ifdef CONFIG_FP_SHARING #ifdef CONFIG_FP_SHARING
@ -156,6 +220,8 @@ SECTION_FUNC(TEXT, __pendsv)
/* load callee-saved + psp from TCS */ /* load callee-saved + psp from TCS */
add r0, r2, #__tTCS_preempReg_OFFSET add r0, r2, #__tTCS_preempReg_OFFSET
ldmia r0, {v1-v8, ip} ldmia r0, {v1-v8, ip}
#endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */
msr PSP, ip msr PSP, ip
_GDB_STUB_EXC_EXIT _GDB_STUB_EXC_EXIT
@ -163,6 +229,7 @@ SECTION_FUNC(TEXT, __pendsv)
/* exc return */ /* exc return */
bx lr bx lr
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/** /**
* *
* @brief Service call handler * @brief Service call handler
@ -233,6 +300,7 @@ _context_switch:
/* handler mode exit, to PendSV */ /* handler mode exit, to PendSV */
bx lr bx lr
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
/** /**
* *
@ -259,6 +327,9 @@ _context_switch:
* outgoing thread. This is all performed by the hardware, which stores it in * outgoing thread. This is all performed by the hardware, which stores it in
* its exception stack frame, created when handling the svc exception. * its exception stack frame, created when handling the svc exception.
* *
* On Cortex-M0/M0+ the intlock key is represented by the PRIMASK register,
* as BASEPRI is not available.
*
* @return may contain a return value setup by a call to fiberRtnValueSet() * @return may contain a return value setup by a call to fiberRtnValueSet()
* *
* C function prototype: * C function prototype:
@ -273,7 +344,26 @@ SECTION_FUNC(TEXT, _Swap)
ldr r2, [r1, #__tNANO_current_OFFSET] ldr r2, [r1, #__tNANO_current_OFFSET]
str r0, [r2, #__tTCS_basepri_OFFSET] str r0, [r2, #__tTCS_basepri_OFFSET]
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/* No priority-based interrupt masking on M0/M0+,
* pending PendSV is used instead of svc
*/
ldr r1, =_SCS_ICSR
ldr r2, =_SCS_ICSR_PENDSV
str r2, [r1, #0]
/* Unlock interrupts to allow PendSV, since it's running at prio 0xff
*
* PendSV handler will be called if there are no other interrupts
* of a higher priority pending.
*/
cpsie i
/* PC stored in stack frame by the hw */
bx lr
#else /* CONFIG_CPU_CORTEX_M3_M4 */
svc #0 svc #0
/* r0 contains the return value if needed */ /* r0 contains the return value if needed */
bx lr bx lr
#endif

View file

@ -72,6 +72,7 @@ static ALWAYS_INLINE int _IsInIsr(void)
static ALWAYS_INLINE void _ExcSetup(void) static ALWAYS_INLINE void _ExcSetup(void)
{ {
_ScbExcPrioSet(_EXC_PENDSV, _EXC_PRIO(0xff)); _ScbExcPrioSet(_EXC_PENDSV, _EXC_PRIO(0xff));
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
_ScbExcPrioSet(_EXC_SVC, _EXC_PRIO(0x01)); _ScbExcPrioSet(_EXC_SVC, _EXC_PRIO(0x01));
_ScbExcPrioSet(_EXC_MPU_FAULT, _EXC_PRIO(0x01)); _ScbExcPrioSet(_EXC_MPU_FAULT, _EXC_PRIO(0x01));
_ScbExcPrioSet(_EXC_BUS_FAULT, _EXC_PRIO(0x01)); _ScbExcPrioSet(_EXC_BUS_FAULT, _EXC_PRIO(0x01));
@ -80,6 +81,7 @@ static ALWAYS_INLINE void _ExcSetup(void)
_ScbUsageFaultEnable(); _ScbUsageFaultEnable();
_ScbBusFaultEnable(); _ScbBusFaultEnable();
_ScbMemFaultEnable(); _ScbMemFaultEnable();
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
} }
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */

View file

@ -57,17 +57,11 @@ extern "C" {
static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op) static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op)
{ {
unsigned int bit; if (!op) {
return 0;
}
__asm__ volatile( return 32 - __builtin_clz(op);
"cmp %1, #0;\n\t"
"itt ne;\n\t"
" clzne %1, %1;\n\t"
" rsbne %0, %1, #32;\n\t"
: "=r"(bit)
: "r"(op));
return bit;
} }
@ -85,18 +79,7 @@ static ALWAYS_INLINE unsigned int find_msb_set(uint32_t op)
static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op) static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op)
{ {
unsigned int bit; return __builtin_ffs(op);
__asm__ volatile(
"rsb %0, %1, #0;\n\t"
"ands %0, %0, %1;\n\t" /* r0 = x & (-x): only LSB set */
"itt ne;\n\t"
" clzne %0, %0;\n\t" /* count leading zeroes */
" rsbne %0, %0, #32;\n\t"
: "=&r"(bit)
: "r"(op));
return bit;
} }
@ -135,12 +118,21 @@ static ALWAYS_INLINE unsigned int find_lsb_set(uint32_t op)
* *
* On Cortex-M3/M4, this function prevents exceptions of priority lower than * On Cortex-M3/M4, this function prevents exceptions of priority lower than
* the two highest priorities from interrupting the CPU. * the two highest priorities from interrupting the CPU.
*
* On Cortex-M0/M0+, this function reads the value of PRIMASK which shows
* if interrupts are enabled, then disables all interrupts except NMI.
*
*/ */
static ALWAYS_INLINE unsigned int _arch_irq_lock(void) static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
{ {
unsigned int key; unsigned int key;
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
__asm__ volatile("mrs %0, PRIMASK;\n\t"
"cpsid i;\n\t"
: "=r" (key));
#else /* CONFIG_CPU_CORTEX_M3_M4 */
__asm__ volatile( __asm__ volatile(
"movs.n %%r1, %1;\n\t" "movs.n %%r1, %1;\n\t"
"mrs %0, BASEPRI;\n\t" "mrs %0, BASEPRI;\n\t"
@ -148,6 +140,7 @@ static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
: "=r"(key) : "=r"(key)
: "i"(_EXC_IRQ_DEFAULT_PRIO) : "i"(_EXC_IRQ_DEFAULT_PRIO)
: "r1"); : "r1");
#endif
return key; return key;
} }
@ -166,11 +159,22 @@ static ALWAYS_INLINE unsigned int _arch_irq_lock(void)
* @param key architecture-dependent lock-out key * @param key architecture-dependent lock-out key
* *
* @return N/A * @return N/A
*
* On Cortex-M0/M0+, this enables all interrupts if they were not
* previously disabled.
*
*/ */
static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key) static ALWAYS_INLINE void _arch_irq_unlock(unsigned int key)
{ {
#if defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
if (key) {
return;
}
__asm__ volatile("cpsie i;\n\t");
#else /* CONFIG_CPU_CORTEX_M3_M4 */
__asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key)); __asm__ volatile("msr BASEPRI, %0;\n\t" : : "r"(key));
#endif
} }

View file

@ -40,7 +40,8 @@ _GDB_STUB_EXC_ENTRY : .macro
bl irq_lock bl irq_lock
bl _GdbStubExcEntry bl _GdbStubExcEntry
bl irq_unlock bl irq_unlock
pop {lr} pop {r1}
move lr, r1
.endm .endm
GTEXT(_GdbStubExcExit) GTEXT(_GdbStubExcExit)
@ -49,7 +50,8 @@ _GDB_STUB_EXC_EXIT : .macro
bl irq_lock bl irq_lock
bl _GdbStubExcExit bl _GdbStubExcExit
bl irq_unlock bl irq_unlock
pop {lr} pop {r1}
move lr, r1
.endm .endm
GTEXT(_irq_vector_table_entry_with_gdb_stub) GTEXT(_irq_vector_table_entry_with_gdb_stub)

View file

@ -57,7 +57,7 @@
/* 0xe0000000 -> 0xffffffff: varies by processor (see below) */ /* 0xe0000000 -> 0xffffffff: varies by processor (see below) */
#if defined(CONFIG_CPU_CORTEX_M3_M4) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) || defined(CONFIG_CPU_CORTEX_M3_M4)
/* 0xe0000000 -> 0xe00fffff: private peripheral bus */ /* 0xe0000000 -> 0xe00fffff: private peripheral bus */
/* 0xe0000000 -> 0xe003ffff: internal [256KB] */ /* 0xe0000000 -> 0xe003ffff: internal [256KB] */

View file

@ -204,6 +204,7 @@ static inline uint32_t _NvicIrqPrioGet(unsigned int irq)
return __scs.nvic.ipr[irq]; return __scs.nvic.ipr[irq];
} }
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/** /**
* *
* @brief Trigger an interrupt via software * @brief Trigger an interrupt via software
@ -224,6 +225,7 @@ static inline void _NvicSwInterruptTrigger(unsigned int irq)
__scs.stir = irq; __scs.stir = irq;
#endif #endif
} }
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
#endif /* !_ASMLANGUAGE */ #endif /* !_ASMLANGUAGE */

View file

@ -597,6 +597,8 @@ static inline void _ScbExcPrioSet(uint8_t exc, uint8_t pri)
__scs.scb.shpr[exc - 4] = pri; __scs.scb.shpr[exc - 4] = pri;
} }
#if !defined(CONFIG_CPU_CORTEX_M0_M0PLUS)
/** /**
* *
* @brief Enable usage fault exceptions * @brief Enable usage fault exceptions
@ -1204,6 +1206,9 @@ static inline void _ScbUsageFaultAllFaultsReset(void)
{ {
__scs.scb.cfsr.byte.ufsr.val = 0xffff; __scs.scb.cfsr.byte.ufsr.val = 0xffff;
} }
#endif /* !CONFIG_CPU_CORTEX_M0_M0PLUS */
#endif /* _ASMLANGUAGE */ #endif /* _ASMLANGUAGE */
#ifdef __cplusplus #ifdef __cplusplus