arm: asm: Fix some asm issues when building with clang
The clang ARM assembler is a bit stricter than GNU as, make a few changes so things build with both Signed-off-by: Peter Smith <peter.smith@linaro.org> Signed-off-by: Kumar Gala <kumar.gala@linaro.org>
This commit is contained in:
parent
b0d46a3175
commit
4e90103fcb
4 changed files with 10 additions and 10 deletions
|
@ -72,7 +72,7 @@ SECTION_SUBSEC_FUNC(TEXT, _HandlerModeExit, z_ExcExit)
|
||||||
|
|
||||||
ldr r1, [r0, #_kernel_offset_to_current]
|
ldr r1, [r0, #_kernel_offset_to_current]
|
||||||
|
|
||||||
ldr r0, [r0, _kernel_offset_to_ready_q_cache]
|
ldr r0, [r0, #_kernel_offset_to_ready_q_cache]
|
||||||
cmp r0, r1
|
cmp r0, r1
|
||||||
beq _EXIT_EXC
|
beq _EXIT_EXC
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
||||||
movs.n r1, #0
|
movs.n r1, #0
|
||||||
/* clear kernel idle state */
|
/* clear kernel idle state */
|
||||||
str r1, [r2, #_kernel_offset_to_idle]
|
str r1, [r2, #_kernel_offset_to_idle]
|
||||||
blx z_sys_power_save_idle_exit
|
bl z_sys_power_save_idle_exit
|
||||||
_idle_state_cleared:
|
_idle_state_cleared:
|
||||||
|
|
||||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||||
|
@ -81,7 +81,7 @@ _idle_state_cleared:
|
||||||
movne r1, #0
|
movne r1, #0
|
||||||
/* clear kernel idle state */
|
/* clear kernel idle state */
|
||||||
strne r1, [r2, #_kernel_offset_to_idle]
|
strne r1, [r2, #_kernel_offset_to_idle]
|
||||||
blxne z_sys_power_save_idle_exit
|
blne z_sys_power_save_idle_exit
|
||||||
#else
|
#else
|
||||||
#error Unknown ARM architecture
|
#error Unknown ARM architecture
|
||||||
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
#endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
|
||||||
|
|
|
@ -110,7 +110,7 @@ SECTION_FUNC(TEXT, __pendsv)
|
||||||
/* _kernel is still in r1 */
|
/* _kernel is still in r1 */
|
||||||
|
|
||||||
/* fetch the thread to run from the ready queue cache */
|
/* fetch the thread to run from the ready queue cache */
|
||||||
ldr r2, [r1, _kernel_offset_to_ready_q_cache]
|
ldr r2, [r1, #_kernel_offset_to_ready_q_cache]
|
||||||
|
|
||||||
str r2, [r1, #_kernel_offset_to_current]
|
str r2, [r1, #_kernel_offset_to_current]
|
||||||
|
|
||||||
|
@ -226,7 +226,7 @@ _thread_irq_disabled:
|
||||||
/* r2 contains k_thread */
|
/* r2 contains k_thread */
|
||||||
add r0, r2, #0
|
add r0, r2, #0
|
||||||
push {r2, lr}
|
push {r2, lr}
|
||||||
blx configure_builtin_stack_guard
|
bl configure_builtin_stack_guard
|
||||||
pop {r2, lr}
|
pop {r2, lr}
|
||||||
#endif /* CONFIG_BUILTIN_STACK_GUARD */
|
#endif /* CONFIG_BUILTIN_STACK_GUARD */
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ _stack_frame_endif:
|
||||||
|
|
||||||
#if CONFIG_IRQ_OFFLOAD
|
#if CONFIG_IRQ_OFFLOAD
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
blx _irq_do_offload /* call C routine which executes the offload */
|
bl _irq_do_offload /* call C routine which executes the offload */
|
||||||
pop {r0, r1}
|
pop {r0, r1}
|
||||||
mov lr, r1
|
mov lr, r1
|
||||||
#endif /* CONFIG_IRQ_OFFLOAD */
|
#endif /* CONFIG_IRQ_OFFLOAD */
|
||||||
|
@ -312,7 +312,7 @@ _stack_frame_endif:
|
||||||
|
|
||||||
_oops:
|
_oops:
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
blx _do_kernel_oops
|
bl _do_kernel_oops
|
||||||
pop {r0, pc}
|
pop {r0, pc}
|
||||||
|
|
||||||
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
|
||||||
|
@ -368,7 +368,7 @@ SECTION_FUNC(TEXT, __svc)
|
||||||
|
|
||||||
#if CONFIG_IRQ_OFFLOAD
|
#if CONFIG_IRQ_OFFLOAD
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
blx _irq_do_offload /* call C routine which executes the offload */
|
bl _irq_do_offload /* call C routine which executes the offload */
|
||||||
pop {r0, lr}
|
pop {r0, lr}
|
||||||
|
|
||||||
/* exception return is done in _IntExit() */
|
/* exception return is done in _IntExit() */
|
||||||
|
@ -377,7 +377,7 @@ SECTION_FUNC(TEXT, __svc)
|
||||||
|
|
||||||
_oops:
|
_oops:
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
blx _do_kernel_oops
|
bl _do_kernel_oops
|
||||||
pop {r0, pc}
|
pop {r0, pc}
|
||||||
|
|
||||||
#if CONFIG_USERSPACE
|
#if CONFIG_USERSPACE
|
||||||
|
|
|
@ -46,7 +46,7 @@ SECTION_FUNC(TEXT,_force_exit_one_nested_irq)
|
||||||
ldrne r2, =_do_software_reboot
|
ldrne r2, =_do_software_reboot
|
||||||
|
|
||||||
ldr ip, =_interrupt_stack
|
ldr ip, =_interrupt_stack
|
||||||
add.w ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */
|
add.w ip, ip, #(___esf_t_SIZEOF * 2) /* enough for a stack frame */
|
||||||
ldr r1, =0xfffffffe
|
ldr r1, =0xfffffffe
|
||||||
and.w r2, r1
|
and.w r2, r1
|
||||||
str r2, [ip, #(6 * 4)]
|
str r2, [ip, #(6 * 4)]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue