ARC: rewrite ASM code with asm-compat macroses

Rewrite ARC assembler code with asm-compat macroses, so the same
code can be used for both ARCv2 (GNU and MWDT assemblers) and
ARCv3 (GNU assembler)

Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Evgeniy Paltsev <PaltsevEvgeniy@gmail.com>
This commit is contained in:
Evgeniy Paltsev 2021-04-06 15:58:53 +03:00 committed by Kumar Gala
commit c2b61dfe72
8 changed files with 189 additions and 175 deletions

View file

@ -16,6 +16,7 @@
#include <toolchain.h>
#include <linker/sections.h>
#include <arch/cpu.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(arch_cpu_idle)
GTEXT(arch_cpu_atomic_idle)
@ -36,11 +37,12 @@ SECTION_VAR(BSS, z_arc_cpu_sleep_mode)
SECTION_FUNC(TEXT, arch_cpu_idle)
#ifdef CONFIG_TRACING
push_s blink
PUSHR blink
jl sys_trace_idle
pop_s blink
POPR blink
#endif
/* z_arc_cpu_sleep_mode is 32 bit despite of platform bittnes */
ld r1, [z_arc_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1
@ -57,11 +59,12 @@ SECTION_FUNC(TEXT, arch_cpu_idle)
SECTION_FUNC(TEXT, arch_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push_s blink
PUSHR blink
jl sys_trace_idle
pop_s blink
POPR blink
#endif
/* z_arc_cpu_sleep_mode is 32 bit despite of platform bittnes */
ld r1, [z_arc_cpu_sleep_mode]
or r1, r1, (1 << 4) /* set IRQ-enabled bit */
sleep r1

View file

@ -17,6 +17,7 @@
#include <arch/cpu.h>
#include <swap_macros.h>
#include <syscall.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(_Fault)
GTEXT(__reset)
@ -43,10 +44,10 @@ GTEXT(z_irq_do_offload);
lr r0,[_ARC_V2_ERSEC_STAT]
st_s r0, [sp, ___isf_t_sec_stat_OFFSET]
#endif
lr r0,[_ARC_V2_ERET]
st_s r0, [sp, ___isf_t_pc_OFFSET]
lr r0,[_ARC_V2_ERSTATUS]
st_s r0, [sp, ___isf_t_status32_OFFSET]
LRR r0, [_ARC_V2_ERET]
STR r0, sp, ___isf_t_pc_OFFSET
LRR r0, [_ARC_V2_ERSTATUS]
STR r0, sp, ___isf_t_status32_OFFSET
.endm
/*
@ -84,9 +85,9 @@ _exc_entry:
* and exception is raised, then here it's guaranteed that
* exception handling has necessary stack to use
*/
mov ilink, sp
MOVR ilink, sp
_get_curr_cpu_irq_stack sp
sub sp, sp, (CONFIG_ISR_STACK_SIZE - CONFIG_ARC_EXCEPTION_STACK_SIZE)
SUBR sp, sp, (CONFIG_ISR_STACK_SIZE - CONFIG_ARC_EXCEPTION_STACK_SIZE)
/*
* save caller saved registers
@ -102,9 +103,9 @@ _exc_entry:
_save_exc_regs_into_stack
/* sp is parameter of _Fault */
mov_s r0, sp
MOVR r0, sp
/* ilink is the thread's original sp */
mov r1, ilink
MOVR r1, ilink
jl _Fault
_exc_return:
@ -118,9 +119,9 @@ _exc_return:
_get_next_switch_handle
breq r0, r2, _exc_return_from_exc
BREQR r0, r2, _exc_return_from_exc
mov_s r2, r0
MOVR r2, r0
#ifdef CONFIG_ARC_SECURE_FIRMWARE
/*
@ -145,8 +146,8 @@ _exc_return:
*/
mov ilink, r2
#endif
lr r3, [_ARC_V2_STATUS32]
and r3,r3,(~(_ARC_V2_STATUS32_AE | _ARC_V2_STATUS32_RB(7)))
LRR r3, [_ARC_V2_STATUS32]
ANDR r3, r3, (~(_ARC_V2_STATUS32_AE | _ARC_V2_STATUS32_RB(7)))
kflag r3
/* pretend lowest priority interrupt happened to use common handler
* if exception is raised in irq, i.e., _ARC_V2_AUX_IRQ_ACT !=0,
@ -158,7 +159,7 @@ _exc_return:
#ifdef CONFIG_ARC_SECURE_FIRMWARE
mov_s r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
mov_s r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
MOVR r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
@ -169,7 +170,7 @@ _exc_return:
sjli SJLI_CALL_ARC_SECURE
pop_s r2
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]
SRR r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
#if defined(CONFIG_ARC_FIRQ) && CONFIG_RGF_NUM_BANKS != 1
@ -183,18 +184,18 @@ _exc_return_from_exc:
/* exception handler may change return address.
* reload it
*/
ld_s r0, [sp, ___isf_t_pc_OFFSET]
sr r0, [_ARC_V2_ERET]
LDR r0, sp, ___isf_t_pc_OFFSET
SRR r0, [_ARC_V2_ERET]
_pop_irq_stack_frame
mov sp, ilink
MOVR sp, ilink
rtie
/* separated entry for trap which may be used by irq_offload, USERPSACE */
SECTION_SUBSEC_FUNC(TEXT,__fault,__ev_trap)
/* get the id of trap_s */
lr ilink, [_ARC_V2_ECR]
and ilink, ilink, 0x3f
LRR ilink, [_ARC_V2_ECR]
ANDR ilink, ilink, 0x3f
#ifdef CONFIG_USERSPACE
cmp ilink, _TRAP_S_CALL_SYSTEM_CALL
bne _do_non_syscall_trap
@ -236,7 +237,7 @@ _do_non_syscall_trap:
* so its entry is different with normal exception handling, it is
* handled in isr stack
*/
cmp ilink, _TRAP_S_SCALL_IRQ_OFFLOAD
CMPR ilink, _TRAP_S_SCALL_IRQ_OFFLOAD
bne _exc_entry
/* save caller saved registers */
_create_irq_stack_frame
@ -247,15 +248,15 @@ _do_non_syscall_trap:
_check_and_inc_int_nest_counter r0, r1
bne.d exc_nest_handle
mov_s r0, sp
MOVR r0, sp
_get_curr_cpu_irq_stack sp
exc_nest_handle:
push_s r0
PUSHR r0
jl z_irq_do_offload
pop sp
POPR sp
_dec_int_nest_counter r0, r1

View file

@ -20,6 +20,7 @@
#include <kernel_structs.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(_isr_wrapper)
GTEXT(_isr_demux)
@ -238,8 +239,8 @@ rirq_path:
j_s [r2]
#endif
#else
mov_s r3, _rirq_exit
mov_s r2, _rirq_enter
MOVR r3, _rirq_exit
MOVR r2, _rirq_enter
j_s [r2]
#endif
@ -247,14 +248,14 @@ rirq_path:
.macro exit_tickless_idle
#if defined(CONFIG_PM)
clri r0 /* do not interrupt exiting tickless idle operations */
mov_s r1, _kernel
MOVR r1, _kernel
ld_s r3, [r1, _kernel_offset_to_idle] /* requested idle duration */
breq r3, 0, _skip_pm_save_idle_exit
st 0, [r1, _kernel_offset_to_idle] /* zero idle duration */
push_s blink
PUSHR blink
jl z_pm_save_idle_exit
pop_s blink
POPR blink
_skip_pm_save_idle_exit:
seti r0
@ -263,16 +264,16 @@ _skip_pm_save_idle_exit:
/* when getting here, r3 contains the interrupt exit stub to call */
SECTION_FUNC(TEXT, _isr_demux)
push_s r3
PUSHR r3
/* according to ARCv2 ISA, r25, r30, r58, r59 are caller-saved
* scratch registers, possibly used by interrupt handlers
*/
push r25
push r30
PUSHR r25
PUSHR r30
#ifdef CONFIG_ARC_HAS_ACCL_REGS
push r58
push r59
PUSHR r58
PUSHR r59
#endif
#ifdef CONFIG_TRACING_ISR
@ -290,26 +291,30 @@ irq_hint_handled:
sub r0, r0, 16
mov_s r1, _sw_isr_table
add3 r0, r1, r0 /* table entries are 8-bytes wide */
ld_s r1, [r0, 4] /* ISR into r1 */
MOVR r1, _sw_isr_table
/* SW ISR table entries are 8-bytes wide for 32bit ISA and
* 16-bytes wide for 64bit ISA */
ASLR r0, r0, (ARC_REGSHIFT + 1)
ADDR r0, r1, r0
/* ISR into r1 */
LDR r1, r0, ARC_REGSZ
jl_s.d [r1]
ld_s r0, [r0] /* delay slot: ISR parameter into r0 */
/* delay slot: ISR parameter into r0 */
LDR r0, r0
#ifdef CONFIG_TRACING_ISR
bl sys_trace_isr_exit
#endif
#ifdef CONFIG_ARC_HAS_ACCL_REGS
pop r59
pop r58
POPR r59
POPR r58
#endif
pop r30
pop r25
POPR r30
POPR r25
/* back from ISR, jump to exit stub */
pop_s r3
POPR r3
j_s [r3]
nop_s

View file

@ -20,6 +20,7 @@
#include <linker/sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(_rirq_enter)
GTEXT(_rirq_exit)
@ -216,11 +217,11 @@ SECTION_FUNC(TEXT, _rirq_enter)
_check_and_inc_int_nest_counter r0, r1
bne.d rirq_nest
mov_s r0, sp
MOVR r0, sp
_get_curr_cpu_irq_stack sp
rirq_nest:
push_s r0
PUSHR r0
seti
j _isr_demux
@ -236,7 +237,7 @@ rirq_nest:
SECTION_FUNC(TEXT, _rirq_exit)
clri
pop sp
POPR sp
_dec_int_nest_counter r0, r1
@ -251,7 +252,7 @@ SECTION_FUNC(TEXT, _rirq_exit)
*/
_get_next_switch_handle
cmp r0, r2
CMPR r0, r2
beq _rirq_no_switch
#ifdef CONFIG_ARC_SECURE_FIRMWARE
@ -266,7 +267,7 @@ SECTION_FUNC(TEXT, _rirq_exit)
st _CAUSE_RIRQ, [r2, _thread_offset_to_relinquish_cause]
/* mov new thread (r0) to r2 */
mov r2, r0
MOVR r2, r0
/* _rirq_newthread_switch required by exception handling */
.align 4
@ -295,11 +296,11 @@ _rirq_switch_from_coop:
*/
/* carve fake stack */
sub sp, sp, ___isf_t_pc_OFFSET
SUBR sp, sp, ___isf_t_pc_OFFSET
/* reset zero-overhead loops */
st 0, [sp, ___isf_t_lp_end_OFFSET]
STR 0, sp, ___isf_t_lp_end_OFFSET
/*
* r13 is part of both the callee and caller-saved register sets because
@ -307,14 +308,14 @@ _rirq_switch_from_coop:
* IRQ prologue. r13 thus has to be set to its correct value in the IRQ
* stack frame.
*/
st_s r13, [sp, ___isf_t_r13_OFFSET]
STR r13, sp, ___isf_t_r13_OFFSET
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
PUSHR blink
bl z_thread_mark_switched_in
pop_s blink
POPR blink
#endif
/* stack now has the IRQ stack frame layout, pointing to sp */
/* rtie will pop the rest from the stack */
@ -327,11 +328,11 @@ _rirq_switch_from_rirq:
_set_misc_regs_irq_switch_from_irq
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
PUSHR blink
bl z_thread_mark_switched_in
pop_s blink
POPR blink
#endif
_rirq_no_switch:
rtie

View file

@ -15,6 +15,7 @@
#include <linker/sections.h>
#include <arch/cpu.h>
#include <swap_macros.h>
#include <arch/arc/asm-compat/assembler.h>
GDATA(z_interrupt_stacks)
GDATA(z_main_stack)
@ -65,11 +66,11 @@ SECTION_FUNC(TEXT,__start)
/* set the vector table base early,
* so that exception vectors can be handled.
*/
mov_s r0, _VectorTable
MOVR r0, _VectorTable
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sr r0, [_ARC_V2_IRQ_VECT_BASE_S]
#else
sr r0, [_ARC_V2_IRQ_VECT_BASE]
SRR r0, [_ARC_V2_IRQ_VECT_BASE]
#endif
lr r0, [_ARC_V2_STATUS32]

View file

@ -21,6 +21,7 @@
#include <arch/cpu.h>
#include <v2/irq.h>
#include <swap_macros.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(z_arc_switch)
@ -60,7 +61,7 @@ SECTION_FUNC(TEXT, z_arc_switch)
* get old_thread from r1
*/
sub r2, r1, ___thread_t_switch_handle_OFFSET
SUBR r2, r1, ___thread_t_switch_handle_OFFSET
st _CAUSE_COOP, [r2, _thread_offset_to_relinquish_cause]
@ -69,8 +70,8 @@ SECTION_FUNC(TEXT, z_arc_switch)
* Save status32 and blink on the stack before the callee-saved registers.
* This is the same layout as the start of an IRQ stack frame.
*/
lr r3, [_ARC_V2_STATUS32]
push_s r3
LRR r3, [_ARC_V2_STATUS32]
PUSHR r3
#ifdef CONFIG_ARC_HAS_SECURE
#ifdef CONFIG_ARC_SECURE_FIRMWARE
@ -81,7 +82,7 @@ SECTION_FUNC(TEXT, z_arc_switch)
push_s r3
#endif
push_s blink
PUSHR blink
_store_old_thread_callee_regs
@ -90,7 +91,7 @@ SECTION_FUNC(TEXT, z_arc_switch)
*/
_disable_stack_checking r3
mov_s r2, r0
MOVR r2, r0
_load_new_thread_callee_regs
@ -104,22 +105,22 @@ SECTION_FUNC(TEXT, z_arc_switch)
.align 4
_switch_return_from_coop:
pop_s blink /* pc into blink */
POPR blink /* pc into blink */
#ifdef CONFIG_ARC_HAS_SECURE
pop_s r3 /* pop SEC_STAT */
#ifdef CONFIG_ARC_SECURE_FIRMWARE
sflag r3
#endif
#endif
pop_s r3 /* status32 into r3 */
POPR r3 /* status32 into r3 */
kflag r3 /* write status32 */
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
PUSHR blink
bl z_thread_mark_switched_in
pop_s blink
POPR blink
#endif
j_s [blink]
@ -135,11 +136,11 @@ _switch_return_from_firq:
* thread
*/
lr r3, [_ARC_V2_AUX_IRQ_ACT]
LRR r3, [_ARC_V2_AUX_IRQ_ACT]
#ifdef CONFIG_ARC_SECURE_FIRMWARE
or r3, r3, (1 << (ARC_N_IRQ_START_LEVEL - 1))
#else
or r3, r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
ORR r3, r3, (1 << (CONFIG_NUM_IRQ_PRIO_LEVELS - 1))
#endif
#ifdef CONFIG_ARC_NORMAL_FIRMWARE
@ -148,13 +149,13 @@ _switch_return_from_firq:
mov_s r6, ARC_S_CALL_AUX_WRITE
sjli SJLI_CALL_ARC_SECURE
#else
sr r3, [_ARC_V2_AUX_IRQ_ACT]
SRR r3, [_ARC_V2_AUX_IRQ_ACT]
#endif
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
push_s blink
PUSHR blink
bl z_thread_mark_switched_in
pop_s blink
POPR blink
#endif
rtie

View file

@ -14,6 +14,7 @@
#include <toolchain.h>
#include <linker/sections.h>
#include <v2/irq.h>
#include <arch/arc/asm-compat/assembler.h>
GTEXT(z_thread_entry_wrapper)
GTEXT(z_thread_entry_wrapper1)
@ -30,9 +31,9 @@ GTEXT(z_thread_entry_wrapper1)
SECTION_FUNC(TEXT, z_thread_entry_wrapper)
seti _ARC_V2_INIT_IRQ_LOCK_KEY
z_thread_entry_wrapper1:
pop_s r3
pop_s r2
pop_s r1
pop_s r0
POPR r3
POPR r2
POPR r1
POPR r0
j z_thread_entry
nop

View file

@ -14,30 +14,31 @@
#include <toolchain.h>
#include <arch/cpu.h>
#include <arch/arc/tool-compat.h>
#include <arch/arc/asm-compat/assembler.h>
#ifdef _ASMLANGUAGE
/* save callee regs of current thread in r2 */
.macro _save_callee_saved_regs
sub_s sp, sp, ___callee_saved_stack_t_SIZEOF
SUBR sp, sp, ___callee_saved_stack_t_SIZEOF
/* save regs on stack */
st_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
st_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
st_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
st r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
st r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
st r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
st r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
st r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
st r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
st r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
st r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
st r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
st r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
st r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
st fp, [sp, ___callee_saved_stack_t_fp_OFFSET]
STR r13, sp, ___callee_saved_stack_t_r13_OFFSET
STR r14, sp, ___callee_saved_stack_t_r14_OFFSET
STR r15, sp, ___callee_saved_stack_t_r15_OFFSET
STR r16, sp, ___callee_saved_stack_t_r16_OFFSET
STR r17, sp, ___callee_saved_stack_t_r17_OFFSET
STR r18, sp, ___callee_saved_stack_t_r18_OFFSET
STR r19, sp, ___callee_saved_stack_t_r19_OFFSET
STR r20, sp, ___callee_saved_stack_t_r20_OFFSET
STR r21, sp, ___callee_saved_stack_t_r21_OFFSET
STR r22, sp, ___callee_saved_stack_t_r22_OFFSET
STR r23, sp, ___callee_saved_stack_t_r23_OFFSET
STR r24, sp, ___callee_saved_stack_t_r24_OFFSET
STR r25, sp, ___callee_saved_stack_t_r25_OFFSET
STR r26, sp, ___callee_saved_stack_t_r26_OFFSET
STR fp, sp, ___callee_saved_stack_t_fp_OFFSET
#ifdef CONFIG_USERSPACE
#ifdef CONFIG_ARC_HAS_SECURE
@ -57,11 +58,11 @@
st_s r13, [sp, ___callee_saved_stack_t_user_sp_OFFSET]
#endif
#endif
st r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
STR r30, sp, ___callee_saved_stack_t_r30_OFFSET
#ifdef CONFIG_ARC_HAS_ACCL_REGS
st r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
st r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
STR r58, sp, ___callee_saved_stack_t_r58_OFFSET
STR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif
#ifdef CONFIG_FPU_SHARING
@ -87,17 +88,17 @@
#endif
/* save stack pointer in struct k_thread */
st sp, [r2, _thread_offset_to_sp]
STR sp, r2, _thread_offset_to_sp
.endm
/* load the callee regs of thread (in r2)*/
.macro _load_callee_saved_regs
/* restore stack pointer from struct k_thread */
ld sp, [r2, _thread_offset_to_sp]
LDR sp, r2, _thread_offset_to_sp
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ld r58, [sp, ___callee_saved_stack_t_r58_OFFSET]
ld r59, [sp, ___callee_saved_stack_t_r59_OFFSET]
LDR r58, sp, ___callee_saved_stack_t_r58_OFFSET
LDR r59, sp, ___callee_saved_stack_t_r59_OFFSET
#endif
#ifdef CONFIG_FPU_SHARING
@ -142,30 +143,30 @@
#endif
#endif
ld_s r13, [sp, ___callee_saved_stack_t_r13_OFFSET]
ld_s r14, [sp, ___callee_saved_stack_t_r14_OFFSET]
ld_s r15, [sp, ___callee_saved_stack_t_r15_OFFSET]
ld r16, [sp, ___callee_saved_stack_t_r16_OFFSET]
ld r17, [sp, ___callee_saved_stack_t_r17_OFFSET]
ld r18, [sp, ___callee_saved_stack_t_r18_OFFSET]
ld r19, [sp, ___callee_saved_stack_t_r19_OFFSET]
ld r20, [sp, ___callee_saved_stack_t_r20_OFFSET]
ld r21, [sp, ___callee_saved_stack_t_r21_OFFSET]
ld r22, [sp, ___callee_saved_stack_t_r22_OFFSET]
ld r23, [sp, ___callee_saved_stack_t_r23_OFFSET]
ld r24, [sp, ___callee_saved_stack_t_r24_OFFSET]
ld r25, [sp, ___callee_saved_stack_t_r25_OFFSET]
ld r26, [sp, ___callee_saved_stack_t_r26_OFFSET]
ld fp, [sp, ___callee_saved_stack_t_fp_OFFSET]
ld r30, [sp, ___callee_saved_stack_t_r30_OFFSET]
LDR r13, sp, ___callee_saved_stack_t_r13_OFFSET
LDR r14, sp, ___callee_saved_stack_t_r14_OFFSET
LDR r15, sp, ___callee_saved_stack_t_r15_OFFSET
LDR r16, sp, ___callee_saved_stack_t_r16_OFFSET
LDR r17, sp, ___callee_saved_stack_t_r17_OFFSET
LDR r18, sp, ___callee_saved_stack_t_r18_OFFSET
LDR r19, sp, ___callee_saved_stack_t_r19_OFFSET
LDR r20, sp, ___callee_saved_stack_t_r20_OFFSET
LDR r21, sp, ___callee_saved_stack_t_r21_OFFSET
LDR r22, sp, ___callee_saved_stack_t_r22_OFFSET
LDR r23, sp, ___callee_saved_stack_t_r23_OFFSET
LDR r24, sp, ___callee_saved_stack_t_r24_OFFSET
LDR r25, sp, ___callee_saved_stack_t_r25_OFFSET
LDR r26, sp, ___callee_saved_stack_t_r26_OFFSET
LDR fp, sp, ___callee_saved_stack_t_fp_OFFSET
LDR r30, sp, ___callee_saved_stack_t_r30_OFFSET
add_s sp, sp, ___callee_saved_stack_t_SIZEOF
ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
/* discard callee regs */
.macro _discard_callee_saved_regs
add_s sp, sp, ___callee_saved_stack_t_SIZEOF
ADDR sp, sp, ___callee_saved_stack_t_SIZEOF
.endm
/*
@ -174,33 +175,33 @@
*/
.macro _create_irq_stack_frame
sub_s sp, sp, ___isf_t_SIZEOF
SUBR sp, sp, ___isf_t_SIZEOF
st blink, [sp, ___isf_t_blink_OFFSET]
STR blink, sp, ___isf_t_blink_OFFSET
/* store these right away so we can use them if needed */
st_s r13, [sp, ___isf_t_r13_OFFSET]
st_s r12, [sp, ___isf_t_r12_OFFSET]
st r11, [sp, ___isf_t_r11_OFFSET]
st r10, [sp, ___isf_t_r10_OFFSET]
st r9, [sp, ___isf_t_r9_OFFSET]
st r8, [sp, ___isf_t_r8_OFFSET]
st r7, [sp, ___isf_t_r7_OFFSET]
st r6, [sp, ___isf_t_r6_OFFSET]
st r5, [sp, ___isf_t_r5_OFFSET]
st r4, [sp, ___isf_t_r4_OFFSET]
st_s r3, [sp, ___isf_t_r3_OFFSET]
st_s r2, [sp, ___isf_t_r2_OFFSET]
st_s r1, [sp, ___isf_t_r1_OFFSET]
st_s r0, [sp, ___isf_t_r0_OFFSET]
STR r13, sp, ___isf_t_r13_OFFSET
STR r12, sp, ___isf_t_r12_OFFSET
STR r11, sp, ___isf_t_r11_OFFSET
STR r10, sp, ___isf_t_r10_OFFSET
STR r9, sp, ___isf_t_r9_OFFSET
STR r8, sp, ___isf_t_r8_OFFSET
STR r7, sp, ___isf_t_r7_OFFSET
STR r6, sp, ___isf_t_r6_OFFSET
STR r5, sp, ___isf_t_r5_OFFSET
STR r4, sp, ___isf_t_r4_OFFSET
STR r3, sp, ___isf_t_r3_OFFSET
STR r2, sp, ___isf_t_r2_OFFSET
STR r1, sp, ___isf_t_r1_OFFSET
STR r0, sp, ___isf_t_r0_OFFSET
mov r0, lp_count
st_s r0, [sp, ___isf_t_lp_count_OFFSET]
lr r1, [_ARC_V2_LP_START]
lr r0, [_ARC_V2_LP_END]
st_s r1, [sp, ___isf_t_lp_start_OFFSET]
st_s r0, [sp, ___isf_t_lp_end_OFFSET]
MOVR r0, lp_count
STR r0, sp, ___isf_t_lp_count_OFFSET
LRR r1, [_ARC_V2_LP_START]
LRR r0, [_ARC_V2_LP_END]
STR r1, sp, ___isf_t_lp_start_OFFSET
STR r0, sp, ___isf_t_lp_end_OFFSET
#ifdef CONFIG_CODE_DENSITY
lr r1, [_ARC_V2_JLI_BASE]
@ -219,7 +220,7 @@
*/
.macro _pop_irq_stack_frame
ld blink, [sp, ___isf_t_blink_OFFSET]
LDR blink, sp, ___isf_t_blink_OFFSET
#ifdef CONFIG_CODE_DENSITY
ld_s r1, [sp, ___isf_t_jli_base_OFFSET]
@ -230,27 +231,27 @@
sr r2, [_ARC_V2_EI_BASE]
#endif
ld_s r0, [sp, ___isf_t_lp_count_OFFSET]
mov lp_count, r0
ld_s r1, [sp, ___isf_t_lp_start_OFFSET]
ld_s r0, [sp, ___isf_t_lp_end_OFFSET]
sr r1, [_ARC_V2_LP_START]
sr r0, [_ARC_V2_LP_END]
LDR r0, sp, ___isf_t_lp_count_OFFSET
MOVR lp_count, r0
LDR r1, sp, ___isf_t_lp_start_OFFSET
LDR r0, sp, ___isf_t_lp_end_OFFSET
SRR r1, [_ARC_V2_LP_START]
SRR r0, [_ARC_V2_LP_END]
ld_s r13, [sp, ___isf_t_r13_OFFSET]
ld_s r12, [sp, ___isf_t_r12_OFFSET]
ld r11, [sp, ___isf_t_r11_OFFSET]
ld r10, [sp, ___isf_t_r10_OFFSET]
ld r9, [sp, ___isf_t_r9_OFFSET]
ld r8, [sp, ___isf_t_r8_OFFSET]
ld r7, [sp, ___isf_t_r7_OFFSET]
ld r6, [sp, ___isf_t_r6_OFFSET]
ld r5, [sp, ___isf_t_r5_OFFSET]
ld r4, [sp, ___isf_t_r4_OFFSET]
ld_s r3, [sp, ___isf_t_r3_OFFSET]
ld_s r2, [sp, ___isf_t_r2_OFFSET]
ld_s r1, [sp, ___isf_t_r1_OFFSET]
ld_s r0, [sp, ___isf_t_r0_OFFSET]
LDR r13, sp, ___isf_t_r13_OFFSET
LDR r12, sp, ___isf_t_r12_OFFSET
LDR r11, sp, ___isf_t_r11_OFFSET
LDR r10, sp, ___isf_t_r10_OFFSET
LDR r9, sp, ___isf_t_r9_OFFSET
LDR r8, sp, ___isf_t_r8_OFFSET
LDR r7, sp, ___isf_t_r7_OFFSET
LDR r6, sp, ___isf_t_r6_OFFSET
LDR r5, sp, ___isf_t_r5_OFFSET
LDR r4, sp, ___isf_t_r4_OFFSET
LDR r3, sp, ___isf_t_r3_OFFSET
LDR r2, sp, ___isf_t_r2_OFFSET
LDR r1, sp, ___isf_t_r1_OFFSET
LDR r0, sp, ___isf_t_r0_OFFSET
/*
@ -262,7 +263,7 @@
* status32 differently depending on the execution context they are
* running in (arch_switch(), firq or exception).
*/
add_s sp, sp, ___isf_t_SIZEOF
ADDR sp, sp, ___isf_t_SIZEOF
.endm
@ -307,7 +308,7 @@
ld.as MACRO_ARG(reg1), [_curr_cpu, MACRO_ARG(reg1)]
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
mov MACRO_ARG(reg1), _kernel
MOVR MACRO_ARG(reg1), _kernel
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
add MACRO_ARG(reg2), MACRO_ARG(reg2), 1
@ -329,7 +330,7 @@
ld.as MACRO_ARG(reg1), [_curr_cpu, MACRO_ARG(reg1)]
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), ___cpu_t_nested_OFFSET]
#else
mov MACRO_ARG(reg1), _kernel
MOVR MACRO_ARG(reg1), _kernel
ld MACRO_ARG(reg2), [MACRO_ARG(reg1), _kernel_offset_to_nested]
#endif
sub MACRO_ARG(reg2), MACRO_ARG(reg2), 1
@ -374,21 +375,21 @@
ld.as MACRO_ARG(irq_sp), [_curr_cpu, MACRO_ARG(irq_sp)]
ld MACRO_ARG(irq_sp), [MACRO_ARG(irq_sp), ___cpu_t_irq_stack_OFFSET]
#else
mov MACRO_ARG(irq_sp), _kernel
ld MACRO_ARG(irq_sp), [MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack]
MOVR MACRO_ARG(irq_sp), _kernel
LDR MACRO_ARG(irq_sp), MACRO_ARG(irq_sp), _kernel_offset_to_irq_stack
#endif
.endm
/* macro to push aux reg through reg */
.macro PUSHAX, reg, aux
lr MACRO_ARG(reg), [MACRO_ARG(aux)]
st.a MACRO_ARG(reg), [sp, -4]
LRR MACRO_ARG(reg), [MACRO_ARG(aux)]
PUSHR MACRO_ARG(reg)
.endm
/* macro to pop aux reg through reg */
.macro POPAX, reg, aux
ld.ab MACRO_ARG(reg), [sp, 4]
sr MACRO_ARG(reg), [MACRO_ARG(aux)]
POPR MACRO_ARG(reg)
SRR MACRO_ARG(reg), [MACRO_ARG(aux)]
.endm
@ -479,10 +480,10 @@
/* macro to get next switch handle in assembly */
.macro _get_next_switch_handle
push_s r2
mov r0, sp
PUSHR r2
MOVR r0, sp
bl z_arch_get_next_switch_handle
pop_s r2
POPR r2
.endm
/* macro to disable stack checking in assembly, need a GPR