From 3de84ae88ef9ee1faa0098ba709daeaf46321e69 Mon Sep 17 00:00:00 2001 From: Marcus Shawcroft Date: Sat, 31 Dec 2016 13:03:53 +0000 Subject: [PATCH] arm: Fix assembler layout. Adjust the layout of various ARM assember files to conform to the norm used in the majority of files. Change-Id: Ia5007628be5ad36ef587946861c6ea90a8062585 Signed-off-by: Marcus Shawcroft --- arch/arm/core/cpu_idle.S | 80 ++++++++++++++++++------------------- arch/arm/core/fault_s.S | 52 +++++++++++++----------- arch/arm/core/isr_wrapper.S | 2 +- 3 files changed, 69 insertions(+), 65 deletions(-) diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index cf095a52502..9a2f6aa657f 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -57,10 +57,10 @@ GTEXT(k_cpu_atomic_idle) */ SECTION_FUNC(TEXT, _CpuIdleInit) - ldr r1, =_SCB_SCR - movs.n r2, #_SCR_INIT_BITS - str r2, [r1] - bx lr + ldr r1, =_SCB_SCR + movs.n r2, #_SCR_INIT_BITS + str r2, [r1] + bx lr #ifdef CONFIG_SYS_POWER_MANAGEMENT @@ -78,9 +78,9 @@ SECTION_FUNC(TEXT, _CpuIdleInit) */ SECTION_FUNC(TEXT, _NanoIdleValGet) - ldr r0, =_kernel - ldr r0, [r0, #_kernel_offset_to_idle] - bx lr + ldr r0, =_kernel + ldr r0, [r0, #_kernel_offset_to_idle] + bx lr /** * @@ -96,10 +96,10 @@ SECTION_FUNC(TEXT, _NanoIdleValGet) */ SECTION_FUNC(TEXT, _NanoIdleValClear) - ldr r0, =_kernel - eors.n r1, r1 - str r1, [r0, #_kernel_offset_to_idle] - bx lr + ldr r0, =_kernel + eors.n r1, r1 + str r1, [r0, #_kernel_offset_to_idle] + bx lr #endif /* CONFIG_SYS_POWER_MANAGEMENT */ @@ -128,16 +128,16 @@ SECTION_FUNC(TEXT, k_cpu_idle) #endif #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) - cpsie i + cpsie i #else /* CONFIG_CPU_CORTEX_M3_M4 */ - /* clear BASEPRI so wfi is awakened by incoming interrupts */ - eors.n r0, r0 - msr BASEPRI, r0 + /* clear BASEPRI so wfi is awakened by incoming interrupts */ + eors.n r0, r0 + msr BASEPRI, r0 #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ - wfi + wfi - bx lr + bx lr /** * @@ -169,38 +169,38 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle) mov lr, r1 #endif - /* - * Lock PRIMASK while sleeping: wfe will still get interrupted by incoming - * interrupts but the CPU will not service them right away. - */ - cpsid i + /* + * Lock PRIMASK while sleeping: wfe will still get interrupted by + * incoming interrupts but the CPU will not service them right away. + */ + cpsid i - /* - * No need to set SEVONPEND, it's set once in _CpuIdleInit() and never - * touched again. - */ + /* + * No need to set SEVONPEND, it's set once in _CpuIdleInit() and never + * touched again. + */ - /* r0: interrupt mask from caller */ + /* r0: interrupt mask from caller */ #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) - /* No BASEPRI, call wfe directly (SEVONPEND set in _CpuIdleInit()) */ - wfe + /* No BASEPRI, call wfe directly (SEVONPEND set in _CpuIdleInit()) */ + wfe - cmp r0, #0 - bne _irq_disabled - cpsie i + cmp r0, #0 + bne _irq_disabled + cpsie i _irq_disabled: #else /* CONFIG_CPU_CORTEX_M3_M4 */ - /* r1: zero, for setting BASEPRI (needs a register) */ - eors.n r1, r1 + /* r1: zero, for setting BASEPRI (needs a register) */ + eors.n r1, r1 - /* unlock BASEPRI so wfe gets interrupted by incoming interrupts */ - msr BASEPRI, r1 + /* unlock BASEPRI so wfe gets interrupted by incoming interrupts */ + msr BASEPRI, r1 - wfe + wfe - msr BASEPRI, r0 - cpsie i + msr BASEPRI, r0 + cpsie i #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ - bx lr + bx lr diff --git a/arch/arm/core/fault_s.S b/arch/arm/core/fault_s.S index d9e3fdd2899..773a1d59026 100644 --- a/arch/arm/core/fault_s.S +++ b/arch/arm/core/fault_s.S @@ -73,40 +73,44 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__debug_monitor) SECTION_SUBSEC_FUNC(TEXT,__fault,__reserved) #if defined(CONFIG_CPU_CORTEX_M0_M0PLUS) - /* force unlock interrupts */ - cpsie i + /* force unlock interrupts */ + cpsie i - /* Use EXC_RETURN state to find out if stack frame is on the MSP or PSP */ - ldr r0, =0x4 - mov r1, lr - tst r1, r0 - beq _stack_frame_msp - mrs r0, PSP - bne _stack_frame_endif + /* Use EXC_RETURN state to find out if stack frame is on the + * MSP or PSP + */ + ldr r0, =0x4 + mov r1, lr + tst r1, r0 + beq _stack_frame_msp + mrs r0, PSP + bne _stack_frame_endif _stack_frame_msp: - mrs r0, MSP + mrs r0, MSP _stack_frame_endif: #else /* CONFIG_CPU_CORTEX_M3_M4 */ - /* force unlock interrupts */ - eors.n r0, r0 - msr BASEPRI, r0 + /* force unlock interrupts */ + eors.n r0, r0 + msr BASEPRI, r0 /* this reimplements _ScbIsNestedExc() */ - ldr ip, =_SCS_ICSR - ldr ip, [ip] - ands.w ip, #_SCS_ICSR_RETTOBASE + ldr ip, =_SCS_ICSR + ldr ip, [ip] + ands.w ip, #_SCS_ICSR_RETTOBASE ite eq /* is the RETTOBASE bit zero ? */ - mrseq r0, MSP /* if so, we're not returning to thread mode, thus this - * is a nested exception: the stack frame is on the MSP */ - mrsne r0, PSP /* if not, we are returning to thread mode, thus this is - * not a nested exception: the stack frame is on the PSP */ + mrseq r0, MSP /* if so, we're not returning to thread mode, + * thus this is a nested exception: the stack + * frame is on the MSP */ + mrsne r0, PSP /* if not, we are returning to thread mode, thus + * this is not a nested exception: the stack + * frame is on the PSP */ #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ - push {lr} - bl _Fault + push {lr} + bl _Fault - pop {pc} + pop {pc} - .end + .end diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index c41a6c6fcb5..13cb69493c2 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -110,7 +110,7 @@ _idle_state_cleared: #endif /* CONFIG_CPU_CORTEX_M0_M0PLUS */ ldr r1, =_sw_isr_table add r1, r1, r0 /* table entry: ISRs must have their MSB set to stay - * in thumb mode */ + * in thumb mode */ ldm r1!,{r0,r3} /* arg in r0, ISR in r3 */ blx r3 /* call ISR */