arc: replace 32-bit instructions with possible 16-bit instructions

replace 32-bit instructions with possible 16-bit instructions to
get better code density

Signed-off-by: Wayne Ren <wei.ren@synopsys.com>
This commit is contained in:
Wayne Ren 2019-08-29 18:52:04 +08:00 committed by Carles Cufí
commit a75b0014fb
8 changed files with 124 additions and 124 deletions

View file

@ -80,7 +80,7 @@ SECTION_FUNC(TEXT, _firq_enter)
_check_and_inc_int_nest_counter r0, r1
bne.d firq_nest
mov r0, sp
mov_s r0, sp
_get_curr_cpu_irq_stack sp
#if CONFIG_RGF_NUM_BANKS != 1
@ -230,7 +230,7 @@ _firq_reschedule:
#endif
#ifdef CONFIG_SMP
mov r2, r1
mov_s r2, r1
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
@ -240,7 +240,7 @@ _firq_reschedule:
st _CAUSE_FIRQ, [r2, _thread_offset_to_relinquish_cause]
#ifdef CONFIG_SMP
mov r2, r0
mov_s r2, r0
#else
ld_s r2, [r1, _kernel_offset_to_ready_q_cache]
st_s r2, [r1, _kernel_offset_to_current]
@ -257,7 +257,7 @@ _firq_reschedule:
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov r0, r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
@ -273,9 +273,9 @@ _firq_reschedule:
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _firq_return_from_rirq
nop
nop_s
breq r3, _CAUSE_FIRQ, _firq_return_from_firq
nop
nop_s
/* fall through */
@ -283,7 +283,7 @@ _firq_reschedule:
_firq_return_from_coop:
/* pc into ilink */
pop_s r0
mov ilink, r0
mov_s ilink, r0
pop_s r0 /* status32 into r0 */
sr r0, [_ARC_V2_STATUS32_P0]

View file

@ -65,7 +65,7 @@ _exc_entry:
* and exception is raised, then here it's guaranteed that
* exception handling has necessary stack to use
*/
mov ilink, sp
mov_s ilink, sp
_get_curr_cpu_irq_stack sp
sub sp, sp, (CONFIG_ISR_STACK_SIZE - EXCEPTION_STACK_SIZE)
@ -91,9 +91,9 @@ _exc_entry:
st_s r0, [sp, ___isf_t_pc_OFFSET] /* eret into pc */
/* sp is parameter of _Fault */
mov r0, sp
mov_s r0, sp
/* ilink is the thread's original sp */
mov r1, ilink
mov_s r1, ilink
jl _Fault
_exc_return:
@ -109,7 +109,7 @@ _exc_return:
#ifdef CONFIG_SMP
bl z_arch_smp_switch_in_isr
breq r0, 0, _exc_return_from_exc
mov r2, r0
mov_s r2, r0
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
@ -143,7 +143,7 @@ _exc_return:
/* save r2 in ilink because of the possible following reg
* bank switch
*/
mov ilink, r2
mov_s ilink, r2
#endif
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~(_ARC_V2_STATUS32_AE | _ARC_V2_STATUS32_RB(7)))
@ -156,18 +156,18 @@ _exc_return:
*/
#ifdef CONFIG_ARC_SECURE_FIRMWARE
mov r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
mov_s r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
mov r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
mov_s r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
push r2
mov r0, _ARC_V2_AUX_IRQ_ACT
mov r1, r3
mov r6, ARC_S_CALL_AUX_WRITE
push_s r2
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
pop r2
pop_s r2
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
@ -185,7 +185,7 @@ _exc_return_from_exc:
sr r0, [_ARC_V2_ERET]
_pop_irq_stack_frame
mov sp, ilink
mov_s sp, ilink
rtie
@ -197,12 +197,12 @@ SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
cmp ilink, _TRAP_S_CALL_SYSTEM_CALL
bne _do_non_syscall_trap
/* do sys_call */
mov ilink, K_SYSCALL_LIMIT
mov_s ilink, K_SYSCALL_LIMIT
cmp r6, ilink
blt valid_syscall_id
mov r0, r6
mov r6, K_SYSCALL_BAD
mov_s r0, r6
mov_s r6, K_SYSCALL_BAD
valid_syscall_id:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
@ -218,7 +218,7 @@ valid_syscall_id:
bclr ilink, ilink, _ARC_V2_STATUS32_U_BIT
sr ilink, [_ARC_V2_ERSTATUS]
mov ilink, _arc_do_syscall
mov_s ilink, _arc_do_syscall
sr ilink, [_ARC_V2_ERET]
rtie
@ -250,7 +250,7 @@ _do_non_syscall_trap:
_check_and_inc_int_nest_counter r0, r1
bne.d exc_nest_handle
mov r0, sp
mov_s r0, sp
_get_curr_cpu_irq_stack sp
exc_nest_handle:
@ -271,12 +271,12 @@ exc_nest_handle:
#ifdef CONFIG_SMP
bl z_arch_smp_switch_in_isr
breq r0, 0, _exc_return_from_irqoffload_trap
mov r2, r1
mov_s r2, r1
_save_callee_saved_regs
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
mov r2, r0
mov_s r2, r0
#else
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
@ -299,7 +299,7 @@ exc_nest_handle:
sflag r3
/* save _ARC_V2_SEC_STAT */
and r3, r3, 0xff
push r3
push_s r3
#endif
_save_callee_saved_regs
@ -307,7 +307,7 @@ exc_nest_handle:
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
/* note: Ok to use _CAUSE_RIRQ since everything is saved */
mov r2, r0
mov_s r2, r0
#ifndef CONFIG_SMP
st_s r2, [r1, _kernel_offset_to_current]
#endif
@ -326,9 +326,9 @@ exc_nest_handle:
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
push_s r2
mov r0, _ARC_V2_AUX_IRQ_ACT
mov r1, r3
mov r6, ARC_S_CALL_AUX_WRITE
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
pop_s r2
#else

View file

@ -209,27 +209,27 @@ SECTION_FUNC(TEXT, _isr_wrapper)
* in fact is an action like nop.
* for firq, r0 will be restored later
*/
st r0, [sp]
st_s r0, [sp]
#endif
lr r0, [_ARC_V2_AUX_IRQ_ACT]
ffs r0, r0
cmp r0, 0
#if CONFIG_RGF_NUM_BANKS == 1
bnz rirq_path
ld r0, [sp]
ld_s r0, [sp]
/* 1-register bank FIRQ handling must save registers on stack */
_create_irq_stack_frame
lr r0, [_ARC_V2_STATUS32_P0]
st_s r0, [sp, ___isf_t_status32_OFFSET]
lr r0, [_ARC_V2_ERET]
lr r0, [_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET]
mov r3, _firq_exit
mov r2, _firq_enter
mov_s r3, _firq_exit
mov_s r2, _firq_enter
j_s [r2]
rirq_path:
mov r3, _rirq_exit
mov r2, _rirq_enter
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
j_s [r2]
#else
mov.z r3, _firq_exit
@ -239,8 +239,8 @@ rirq_path:
j_s [r2]
#endif
#else
mov r3, _rirq_exit
mov r2, _rirq_enter
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
j_s [r2]
#endif
@ -314,7 +314,7 @@ irq_hint_handled:
sub r0, r0, 16
mov r1, _sw_isr_table
mov_s r1, _sw_isr_table
add3 r0, r1, r0 /* table entries are 8-bytes wide */
ld_s r1, [r0, 4] /* ISR into r1 */
@ -339,4 +339,4 @@ irq_hint_handled:
/* back from ISR, jump to exit stub */
pop_s r3
j_s [r3]
nop
nop_s

View file

@ -233,7 +233,7 @@ SECTION_FUNC(TEXT, _rirq_enter)
_check_and_inc_int_nest_counter r0, r1
bne.d rirq_nest
mov r0, sp
mov_s r0, sp
_get_curr_cpu_irq_stack sp
rirq_nest:
@ -270,11 +270,11 @@ SECTION_FUNC(TEXT, _rirq_exit)
#ifdef CONFIG_SMP
bl z_arch_smp_switch_in_isr
/* r0 points to new thread, r1 points to old thread */
cmp r0, 0
cmp_s r0, 0
beq _rirq_no_reschedule
mov r2, r1
mov_s r2, r1
#else
mov r1, _kernel
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
/*
@ -300,7 +300,7 @@ _rirq_reschedule:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to remember SEC_STAT.IRM bit */
lr r3, [_ARC_V2_SEC_STAT]
push r3
push_s r3
#endif
/* _save_callee_saved_regs expects outgoing thread in r2 */
_save_callee_saved_regs
@ -308,10 +308,10 @@ _rirq_reschedule:
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
#ifdef CONFIG_SMP
mov r2, r0
mov_s r2, r0
#else
/* incoming thread is in r0: it becomes the new 'current' */
mov r2, r0
mov_s r2, r0
st_s r2, [r1, _kernel_offset_to_current]
#endif
@ -330,7 +330,7 @@ _rirq_common_interrupt_swap:
#if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
push_s r2
mov r0, r2
mov_s r0, r2
bl configure_mpu_thread
pop_s r2
#endif
@ -354,9 +354,9 @@ _rirq_common_interrupt_swap:
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _rirq_return_from_rirq
nop
nop_s
breq r3, _CAUSE_FIRQ, _rirq_return_from_firq
nop
nop_s
/* fall through */
@ -401,7 +401,7 @@ _rirq_return_from_firq:
_rirq_return_from_rirq:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop r3
pop_s r3
sflag r3
#endif
_rirq_no_reschedule:

View file

@ -48,7 +48,7 @@ SECTION_FUNC(TEXT,__start)
/* lock interrupts: will get unlocked when switch to main task
* also make sure the processor in the correct status
*/
mov r0, 0
mov_s r0, 0
kflag r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
@ -60,9 +60,9 @@ SECTION_FUNC(TEXT,__start)
* ARCV2 timer (timer0) is a free run timer, let it start to count
* here.
*/
mov r0, 0xffffffff
mov_s r0, 0xffffffff
sr r0, [_ARC_V2_TMR0_LIMIT]
mov r0, 0
mov_s r0, 0
sr r0, [_ARC_V2_TMR0_COUNT]
#endif
/* interrupt related init */
@ -76,7 +76,7 @@ SECTION_FUNC(TEXT,__start)
/* set the vector table base early,
* so that exception vectors can be handled.
*/
mov r0, _VectorTable
mov_s r0, _VectorTable
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sr r0, [_ARC_V2_IRQ_VECT_BASE_S]
#else
@ -95,7 +95,7 @@ SECTION_FUNC(TEXT,__start)
kflag r0
#endif
mov r1, 1
mov_s r1, 1
invalidate_and_disable_icache:
@ -106,9 +106,9 @@ invalidate_and_disable_icache:
mov_s r2, 0
sr r2, [_ARC_V2_IC_IVIC]
/* writing to IC_IVIC needs 3 NOPs */
nop
nop
nop
nop_s
nop_s
nop_s
sr r1, [_ARC_V2_IC_CTRL]
invalidate_dcache:
@ -154,7 +154,7 @@ _master_core_startup:
* FIRQ stack when CONFIG_INIT_STACKS is enabled before switching to
* one of them for the rest of the early boot
*/
mov sp, _main_stack
mov_s sp, _main_stack
add sp, sp, CONFIG_MAIN_STACK_SIZE
mov_s r0, _interrupt_stack
@ -164,7 +164,7 @@ _master_core_startup:
#endif /* CONFIG_INIT_STACKS */
mov sp, INIT_STACK
mov_s sp, INIT_STACK
add sp, sp, INIT_STACK_SIZE
j @_PrepC

View file

@ -89,7 +89,7 @@ SECTION_FUNC(TEXT, z_arch_switch)
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r3, [_ARC_V2_SEC_STAT]
#else
mov r3, 0
mov_s r3, 0
#endif
push_s r3
#endif
@ -113,7 +113,7 @@ SECTION_FUNC(TEXT, z_arch_switch)
_switch_to_target_thread:
mov r2, r0
mov_s r2, r0
/* entering here, r2 contains the new current thread */
#ifdef CONFIG_ARC_STACK_CHECKING
@ -131,9 +131,9 @@ _switch_to_target_thread:
ld r3, [r2, _thread_offset_to_relinquish_cause]
breq r3, _CAUSE_RIRQ, _switch_return_from_rirq
nop
nop_s
breq r3, _CAUSE_FIRQ, _switch_return_from_firq
nop
nop_s
/* fall through to _switch_return_from_coop */
@ -163,7 +163,7 @@ _switch_return_from_firq:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/* here need to recover SEC_STAT.IRM bit */
pop r3
pop_s r3
sflag r3
#endif
@ -176,9 +176,9 @@ _switch_return_from_firq:
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
mov r0, _ARC_V2_AUX_IRQ_ACT
mov r1, r3
mov r6, ARC_S_CALL_AUX_WRITE
mov_s r0, _ARC_V2_AUX_IRQ_ACT
mov_s r1, r3
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]

View file

@ -14,35 +14,35 @@
#include <v2/irq.h>
.macro clear_scratch_regs
mov r1, 0
mov r2, 0
mov r3, 0
mov r4, 0
mov r5, 0
mov r6, 0
mov r7, 0
mov r8, 0
mov r9, 0
mov r10, 0
mov r11, 0
mov r12, 0
mov_s r1, 0
mov_s r2, 0
mov_s r3, 0
mov_s r4, 0
mov_s r5, 0
mov_s r6, 0
mov_s r7, 0
mov_s r8, 0
mov_s r9, 0
mov_s r10, 0
mov_s r11, 0
mov_s r12, 0
.endm
.macro clear_callee_regs
mov r25, 0
mov r24, 0
mov r23, 0
mov r22, 0
mov r21, 0
mov r20, 0
mov r19, 0
mov r18, 0
mov r17, 0
mov r16, 0
mov_s r25, 0
mov_s r24, 0
mov_s r23, 0
mov_s r22, 0
mov_s r21, 0
mov_s r20, 0
mov_s r19, 0
mov_s r18, 0
mov_s r17, 0
mov_s r16, 0
mov r15, 0
mov r14, 0
mov r13, 0
mov_s r15, 0
mov_s r14, 0
mov_s r13, 0
.endm
GTEXT(z_arc_userspace_enter)
@ -67,7 +67,7 @@ SECTION_FUNC(TEXT, z_user_thread_entry_wrapper)
/* the start of user sp is in r5 */
pop r5
/* start of privilege stack in blink */
mov blink, sp
mov_s blink, sp
st.aw r0, [r5, -4]
st.aw r1, [r5, -4]
@ -109,7 +109,7 @@ SECTION_FUNC(TEXT, z_arc_userspace_enter)
add r5, r4, r5
/* start of privilege stack */
add blink, r5, CONFIG_PRIVILEGED_STACK_SIZE+STACK_GUARD_SIZE
mov sp, r5
mov_s sp, r5
push_s r0
push_s r1
@ -119,9 +119,9 @@ SECTION_FUNC(TEXT, z_arc_userspace_enter)
mov r5, sp /* skip r0, r1, r2, r3 */
#ifdef CONFIG_INIT_STACKS
mov r0, 0xaaaaaaaa
mov_s r0, 0xaaaaaaaa
#else
mov r0, 0x0
mov_s r0, 0x0
#endif
_clear_user_stack:
st.ab r0, [r4, 4]
@ -129,7 +129,7 @@ _clear_user_stack:
jlt _clear_user_stack
#ifdef CONFIG_ARC_STACK_CHECKING
mov r1, _kernel
mov_s r1, _kernel
ld_s r2, [r1, _kernel_offset_to_current]
_load_stack_check_regs
@ -149,7 +149,7 @@ _arc_go_to_user_space:
lr r0, [_ARC_V2_STATUS32]
bset r0, r0, _ARC_V2_STATUS32_U_BIT
mov r1, z_thread_entry_wrapper1
mov_s r1, z_thread_entry_wrapper1
sr r0, [_ARC_V2_ERSTATUS]
sr r1, [_ARC_V2_ERET]
@ -171,18 +171,18 @@ _arc_go_to_user_space:
#else
sr r5, [_ARC_V2_USER_SP]
#endif
mov sp, blink
mov_s sp, blink
mov r0, 0
mov_s r0, 0
clear_callee_regs
clear_scratch_regs
mov fp, 0
mov r29, 0
mov r30, 0
mov blink, 0
mov_s fp, 0
mov_s r29, 0
mov_s r30, 0
mov_s blink, 0
#ifdef CONFIG_EXECUTION_BENCHMARKING
b _capture_value_for_benchmarking_userspace
@ -206,7 +206,7 @@ SECTION_FUNC(TEXT, _arc_do_syscall)
/* the call id is already checked in trap_s handler */
push_s blink
mov blink, _k_syscall_table
mov_s blink, _k_syscall_table
ld.as r6, [blink, r6]
jl [r6]
@ -217,8 +217,8 @@ SECTION_FUNC(TEXT, _arc_do_syscall)
*/
clear_scratch_regs
mov r29, 0
mov r30, 0
mov_s r29, 0
mov_s r30, 0
pop_s blink
@ -237,7 +237,7 @@ SECTION_FUNC(TEXT, _arc_do_syscall)
sr r6, [_ARC_V2_ERSEC_STAT]
#endif
mov r6, 0
mov_s r6, 0
rtie

View file

@ -42,18 +42,18 @@
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
lr r13, [_ARC_V2_SEC_U_SP]
st r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_SEC_K_SP]
st r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#else
lr r13, [_ARC_V2_USER_SP]
st r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
lr r13, [_ARC_V2_KERNEL_SP]
st r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
st_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
lr r13, [_ARC_V2_USER_SP]
st r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
st r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
@ -64,7 +64,7 @@
#endif
#ifdef CONFIG_FP_SHARING
ld r13, [r2, ___thread_base_t_user_options_OFFSET]
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
/* K_FP_REGS is bit 1 */
bbit0 r13, 1, 1f
lr r13, [_ARC_V2_FPU_STATUS]
@ -100,7 +100,7 @@
#endif
#ifdef CONFIG_FP_SHARING
ld r13, [r2, ___thread_base_t_user_options_OFFSET]
ld_s r13, [r2, ___thread_base_t_user_options_OFFSET]
/* K_FP_REGS is bit 1 */
bbit0 r13, 1, 2f
@ -125,18 +125,18 @@
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
ld r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_SEC_U_SP]
ld r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_SEC_K_SP]
#else
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
ld r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
ld_s r13, [sp, ___callee_saved_stack_t_kernel_sp_OFFSET]
sr r13, [_ARC_V2_KERNEL_SP]
#endif /* CONFIG_ARC_SECURE_FIRMWARE */
#else
ld r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
ld_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
sr r13, [_ARC_V2_USER_SP]
#endif
#endif