arch: remove the use of z_arch_esf_t completely from internal

Created `GEN_OFFSET_STRUCT` & `GEN_NAMED_OFFSET_STRUCT` that
works for `struct`, and remove the use of `z_arch_esf_t`
completely.

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
This commit is contained in:
Yong Cong Sin 2024-06-03 18:02:05 +08:00 committed by Maureen Helm
commit 6a3cb93d88
12 changed files with 195 additions and 190 deletions

View file

@ -14,7 +14,7 @@
#include <mips/regdef.h>
#include <mips/mipsregs.h>
#define ESF_O(FIELD) __z_arch_esf_t_##FIELD##_OFFSET
#define ESF_O(FIELD) __struct_arch_esf_##FIELD##_OFFSET
#define THREAD_O(FIELD) _thread_offset_to_##FIELD
/* Convenience macros for loading/storing register states. */
@ -58,12 +58,12 @@
op v1, ESF_O(v1)(sp) ;
#define STORE_CALLER_SAVED() \
addi sp, sp, -__z_arch_esf_t_SIZEOF ;\
addi sp, sp, -__struct_arch_esf_SIZEOF ;\
DO_CALLER_SAVED(OP_STOREREG) ;
#define LOAD_CALLER_SAVED() \
DO_CALLER_SAVED(OP_LOADREG) ;\
addi sp, sp, __z_arch_esf_t_SIZEOF ;
addi sp, sp, __struct_arch_esf_SIZEOF ;
/* imports */
GTEXT(_Fault)

View file

@ -23,32 +23,32 @@ GEN_OFFSET_SYM(_callee_saved_t, s6);
GEN_OFFSET_SYM(_callee_saved_t, s7);
GEN_OFFSET_SYM(_callee_saved_t, s8);
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, gp);
GEN_OFFSET_SYM(z_arch_esf_t, t0);
GEN_OFFSET_SYM(z_arch_esf_t, t1);
GEN_OFFSET_SYM(z_arch_esf_t, t2);
GEN_OFFSET_SYM(z_arch_esf_t, t3);
GEN_OFFSET_SYM(z_arch_esf_t, t4);
GEN_OFFSET_SYM(z_arch_esf_t, t5);
GEN_OFFSET_SYM(z_arch_esf_t, t6);
GEN_OFFSET_SYM(z_arch_esf_t, t7);
GEN_OFFSET_SYM(z_arch_esf_t, t8);
GEN_OFFSET_SYM(z_arch_esf_t, t9);
GEN_OFFSET_SYM(z_arch_esf_t, a0);
GEN_OFFSET_SYM(z_arch_esf_t, a1);
GEN_OFFSET_SYM(z_arch_esf_t, a2);
GEN_OFFSET_SYM(z_arch_esf_t, a3);
GEN_OFFSET_SYM(z_arch_esf_t, v0);
GEN_OFFSET_SYM(z_arch_esf_t, v1);
GEN_OFFSET_SYM(z_arch_esf_t, at);
GEN_OFFSET_SYM(z_arch_esf_t, epc);
GEN_OFFSET_SYM(z_arch_esf_t, badvaddr);
GEN_OFFSET_SYM(z_arch_esf_t, hi);
GEN_OFFSET_SYM(z_arch_esf_t, lo);
GEN_OFFSET_SYM(z_arch_esf_t, status);
GEN_OFFSET_SYM(z_arch_esf_t, cause);
GEN_OFFSET_STRUCT(arch_esf, ra);
GEN_OFFSET_STRUCT(arch_esf, gp);
GEN_OFFSET_STRUCT(arch_esf, t0);
GEN_OFFSET_STRUCT(arch_esf, t1);
GEN_OFFSET_STRUCT(arch_esf, t2);
GEN_OFFSET_STRUCT(arch_esf, t3);
GEN_OFFSET_STRUCT(arch_esf, t4);
GEN_OFFSET_STRUCT(arch_esf, t5);
GEN_OFFSET_STRUCT(arch_esf, t6);
GEN_OFFSET_STRUCT(arch_esf, t7);
GEN_OFFSET_STRUCT(arch_esf, t8);
GEN_OFFSET_STRUCT(arch_esf, t9);
GEN_OFFSET_STRUCT(arch_esf, a0);
GEN_OFFSET_STRUCT(arch_esf, a1);
GEN_OFFSET_STRUCT(arch_esf, a2);
GEN_OFFSET_STRUCT(arch_esf, a3);
GEN_OFFSET_STRUCT(arch_esf, v0);
GEN_OFFSET_STRUCT(arch_esf, v1);
GEN_OFFSET_STRUCT(arch_esf, at);
GEN_OFFSET_STRUCT(arch_esf, epc);
GEN_OFFSET_STRUCT(arch_esf, badvaddr);
GEN_OFFSET_STRUCT(arch_esf, hi);
GEN_OFFSET_STRUCT(arch_esf, lo);
GEN_OFFSET_STRUCT(arch_esf, status);
GEN_OFFSET_STRUCT(arch_esf, cause);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, STACK_ROUND_UP(sizeof(struct arch_esf)));
GEN_ABS_SYM_END

View file

@ -35,35 +35,35 @@ GTEXT(_offload_routine)
*/
SECTION_FUNC(exception.entry, _exception)
/* Reserve thread stack space for saving context */
subi sp, sp, __z_arch_esf_t_SIZEOF
subi sp, sp, __struct_arch_esf_SIZEOF
/* Preserve all caller-saved registers onto the thread's stack */
stw ra, __z_arch_esf_t_ra_OFFSET(sp)
stw r1, __z_arch_esf_t_r1_OFFSET(sp)
stw r2, __z_arch_esf_t_r2_OFFSET(sp)
stw r3, __z_arch_esf_t_r3_OFFSET(sp)
stw r4, __z_arch_esf_t_r4_OFFSET(sp)
stw r5, __z_arch_esf_t_r5_OFFSET(sp)
stw r6, __z_arch_esf_t_r6_OFFSET(sp)
stw r7, __z_arch_esf_t_r7_OFFSET(sp)
stw r8, __z_arch_esf_t_r8_OFFSET(sp)
stw r9, __z_arch_esf_t_r9_OFFSET(sp)
stw r10, __z_arch_esf_t_r10_OFFSET(sp)
stw r11, __z_arch_esf_t_r11_OFFSET(sp)
stw r12, __z_arch_esf_t_r12_OFFSET(sp)
stw r13, __z_arch_esf_t_r13_OFFSET(sp)
stw r14, __z_arch_esf_t_r14_OFFSET(sp)
stw r15, __z_arch_esf_t_r15_OFFSET(sp)
stw ra, __struct_arch_esf_ra_OFFSET(sp)
stw r1, __struct_arch_esf_r1_OFFSET(sp)
stw r2, __struct_arch_esf_r2_OFFSET(sp)
stw r3, __struct_arch_esf_r3_OFFSET(sp)
stw r4, __struct_arch_esf_r4_OFFSET(sp)
stw r5, __struct_arch_esf_r5_OFFSET(sp)
stw r6, __struct_arch_esf_r6_OFFSET(sp)
stw r7, __struct_arch_esf_r7_OFFSET(sp)
stw r8, __struct_arch_esf_r8_OFFSET(sp)
stw r9, __struct_arch_esf_r9_OFFSET(sp)
stw r10, __struct_arch_esf_r10_OFFSET(sp)
stw r11, __struct_arch_esf_r11_OFFSET(sp)
stw r12, __struct_arch_esf_r12_OFFSET(sp)
stw r13, __struct_arch_esf_r13_OFFSET(sp)
stw r14, __struct_arch_esf_r14_OFFSET(sp)
stw r15, __struct_arch_esf_r15_OFFSET(sp)
/* Store value of estatus control register */
rdctl et, estatus
stw et, __z_arch_esf_t_estatus_OFFSET(sp)
stw et, __struct_arch_esf_estatus_OFFSET(sp)
/* ea-4 is the address of the instruction when the exception happened,
* put this in the stack frame as well
*/
addi r15, ea, -4
stw r15, __z_arch_esf_t_instr_OFFSET(sp)
stw r15, __struct_arch_esf_instr_OFFSET(sp)
/* Figure out whether we are here because of an interrupt or an
* exception. If an interrupt, switch stacks and enter IRQ handling
@ -157,7 +157,7 @@ not_interrupt:
*
* We earlier put ea - 4 in the stack frame, replace it with just ea
*/
stw ea, __z_arch_esf_t_instr_OFFSET(sp)
stw ea, __struct_arch_esf_instr_OFFSET(sp)
#ifdef CONFIG_IRQ_OFFLOAD
/* Check the contents of _offload_routine. If non-NULL, jump into
@ -193,35 +193,35 @@ _exception_exit:
* and return to the interrupted context */
/* Return address from the exception */
ldw ea, __z_arch_esf_t_instr_OFFSET(sp)
ldw ea, __struct_arch_esf_instr_OFFSET(sp)
/* Restore estatus
* XXX is this right??? */
ldw r5, __z_arch_esf_t_estatus_OFFSET(sp)
ldw r5, __struct_arch_esf_estatus_OFFSET(sp)
wrctl estatus, r5
/* Restore caller-saved registers */
ldw ra, __z_arch_esf_t_ra_OFFSET(sp)
ldw r1, __z_arch_esf_t_r1_OFFSET(sp)
ldw r2, __z_arch_esf_t_r2_OFFSET(sp)
ldw r3, __z_arch_esf_t_r3_OFFSET(sp)
ldw r4, __z_arch_esf_t_r4_OFFSET(sp)
ldw r5, __z_arch_esf_t_r5_OFFSET(sp)
ldw r6, __z_arch_esf_t_r6_OFFSET(sp)
ldw r7, __z_arch_esf_t_r7_OFFSET(sp)
ldw r8, __z_arch_esf_t_r8_OFFSET(sp)
ldw r9, __z_arch_esf_t_r9_OFFSET(sp)
ldw r10, __z_arch_esf_t_r10_OFFSET(sp)
ldw r11, __z_arch_esf_t_r11_OFFSET(sp)
ldw r12, __z_arch_esf_t_r12_OFFSET(sp)
ldw r13, __z_arch_esf_t_r13_OFFSET(sp)
ldw r14, __z_arch_esf_t_r14_OFFSET(sp)
ldw r15, __z_arch_esf_t_r15_OFFSET(sp)
ldw ra, __struct_arch_esf_ra_OFFSET(sp)
ldw r1, __struct_arch_esf_r1_OFFSET(sp)
ldw r2, __struct_arch_esf_r2_OFFSET(sp)
ldw r3, __struct_arch_esf_r3_OFFSET(sp)
ldw r4, __struct_arch_esf_r4_OFFSET(sp)
ldw r5, __struct_arch_esf_r5_OFFSET(sp)
ldw r6, __struct_arch_esf_r6_OFFSET(sp)
ldw r7, __struct_arch_esf_r7_OFFSET(sp)
ldw r8, __struct_arch_esf_r8_OFFSET(sp)
ldw r9, __struct_arch_esf_r9_OFFSET(sp)
ldw r10, __struct_arch_esf_r10_OFFSET(sp)
ldw r11, __struct_arch_esf_r11_OFFSET(sp)
ldw r12, __struct_arch_esf_r12_OFFSET(sp)
ldw r13, __struct_arch_esf_r13_OFFSET(sp)
ldw r14, __struct_arch_esf_r14_OFFSET(sp)
ldw r15, __struct_arch_esf_r15_OFFSET(sp)
/* Put the stack pointer back where it was when we entered
* exception state
*/
addi sp, sp, __z_arch_esf_t_SIZEOF
addi sp, sp, __struct_arch_esf_SIZEOF
/* All done, copy estatus into status and transfer to ea */
eret

View file

@ -44,24 +44,24 @@ GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, key);
GEN_OFFSET_SYM(_callee_saved_t, retval);
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, r1);
GEN_OFFSET_SYM(z_arch_esf_t, r2);
GEN_OFFSET_SYM(z_arch_esf_t, r3);
GEN_OFFSET_SYM(z_arch_esf_t, r4);
GEN_OFFSET_SYM(z_arch_esf_t, r5);
GEN_OFFSET_SYM(z_arch_esf_t, r6);
GEN_OFFSET_SYM(z_arch_esf_t, r7);
GEN_OFFSET_SYM(z_arch_esf_t, r8);
GEN_OFFSET_SYM(z_arch_esf_t, r9);
GEN_OFFSET_SYM(z_arch_esf_t, r10);
GEN_OFFSET_SYM(z_arch_esf_t, r11);
GEN_OFFSET_SYM(z_arch_esf_t, r12);
GEN_OFFSET_SYM(z_arch_esf_t, r13);
GEN_OFFSET_SYM(z_arch_esf_t, r14);
GEN_OFFSET_SYM(z_arch_esf_t, r15);
GEN_OFFSET_SYM(z_arch_esf_t, estatus);
GEN_OFFSET_SYM(z_arch_esf_t, instr);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
GEN_OFFSET_STRUCT(arch_esf, ra);
GEN_OFFSET_STRUCT(arch_esf, r1);
GEN_OFFSET_STRUCT(arch_esf, r2);
GEN_OFFSET_STRUCT(arch_esf, r3);
GEN_OFFSET_STRUCT(arch_esf, r4);
GEN_OFFSET_STRUCT(arch_esf, r5);
GEN_OFFSET_STRUCT(arch_esf, r6);
GEN_OFFSET_STRUCT(arch_esf, r7);
GEN_OFFSET_STRUCT(arch_esf, r8);
GEN_OFFSET_STRUCT(arch_esf, r9);
GEN_OFFSET_STRUCT(arch_esf, r10);
GEN_OFFSET_STRUCT(arch_esf, r11);
GEN_OFFSET_STRUCT(arch_esf, r12);
GEN_OFFSET_STRUCT(arch_esf, r13);
GEN_OFFSET_STRUCT(arch_esf, r14);
GEN_OFFSET_STRUCT(arch_esf, r15);
GEN_OFFSET_STRUCT(arch_esf, estatus);
GEN_OFFSET_STRUCT(arch_esf, instr);
GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
GEN_ABS_SYM_END

View file

@ -24,22 +24,22 @@
/* Convenience macro for loading/storing register states. */
#define DO_CALLER_SAVED(op) \
RV_E( op t0, __z_arch_esf_t_t0_OFFSET(sp) );\
RV_E( op t1, __z_arch_esf_t_t1_OFFSET(sp) );\
RV_E( op t2, __z_arch_esf_t_t2_OFFSET(sp) );\
RV_I( op t3, __z_arch_esf_t_t3_OFFSET(sp) );\
RV_I( op t4, __z_arch_esf_t_t4_OFFSET(sp) );\
RV_I( op t5, __z_arch_esf_t_t5_OFFSET(sp) );\
RV_I( op t6, __z_arch_esf_t_t6_OFFSET(sp) );\
RV_E( op a0, __z_arch_esf_t_a0_OFFSET(sp) );\
RV_E( op a1, __z_arch_esf_t_a1_OFFSET(sp) );\
RV_E( op a2, __z_arch_esf_t_a2_OFFSET(sp) );\
RV_E( op a3, __z_arch_esf_t_a3_OFFSET(sp) );\
RV_E( op a4, __z_arch_esf_t_a4_OFFSET(sp) );\
RV_E( op a5, __z_arch_esf_t_a5_OFFSET(sp) );\
RV_I( op a6, __z_arch_esf_t_a6_OFFSET(sp) );\
RV_I( op a7, __z_arch_esf_t_a7_OFFSET(sp) );\
RV_E( op ra, __z_arch_esf_t_ra_OFFSET(sp) )
RV_E( op t0, __struct_arch_esf_t0_OFFSET(sp) );\
RV_E( op t1, __struct_arch_esf_t1_OFFSET(sp) );\
RV_E( op t2, __struct_arch_esf_t2_OFFSET(sp) );\
RV_I( op t3, __struct_arch_esf_t3_OFFSET(sp) );\
RV_I( op t4, __struct_arch_esf_t4_OFFSET(sp) );\
RV_I( op t5, __struct_arch_esf_t5_OFFSET(sp) );\
RV_I( op t6, __struct_arch_esf_t6_OFFSET(sp) );\
RV_E( op a0, __struct_arch_esf_a0_OFFSET(sp) );\
RV_E( op a1, __struct_arch_esf_a1_OFFSET(sp) );\
RV_E( op a2, __struct_arch_esf_a2_OFFSET(sp) );\
RV_E( op a3, __struct_arch_esf_a3_OFFSET(sp) );\
RV_E( op a4, __struct_arch_esf_a4_OFFSET(sp) );\
RV_E( op a5, __struct_arch_esf_a5_OFFSET(sp) );\
RV_I( op a6, __struct_arch_esf_a6_OFFSET(sp) );\
RV_I( op a7, __struct_arch_esf_a7_OFFSET(sp) );\
RV_E( op ra, __struct_arch_esf_ra_OFFSET(sp) )
#ifdef CONFIG_EXCEPTION_DEBUG
/* Convenience macro for storing callee saved register [s0 - s11] states. */
@ -157,7 +157,7 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
/* Save user stack value. Coming from user space, we know this
* can't overflow the privileged stack. The esf will be allocated
* later but it is safe to store our saved user sp here. */
sr t0, (-__z_arch_esf_t_SIZEOF + __z_arch_esf_t_sp_OFFSET)(sp)
sr t0, (-__struct_arch_esf_SIZEOF + __struct_arch_esf_sp_OFFSET)(sp)
/* Make sure tls pointer is sane */
lr t0, ___cpu_t_current_OFFSET(s0)
@ -180,21 +180,21 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
SOC_ISR_SW_STACKING
#else
/* Save caller-saved registers on current thread stack. */
addi sp, sp, -__z_arch_esf_t_SIZEOF
addi sp, sp, -__struct_arch_esf_SIZEOF
DO_CALLER_SAVED(sr) ;
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */
/* Save s0 in the esf and load it with &_current_cpu. */
sr s0, __z_arch_esf_t_s0_OFFSET(sp)
sr s0, __struct_arch_esf_s0_OFFSET(sp)
get_current_cpu s0
/* Save MEPC register */
csrr t0, mepc
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
/* Save MSTATUS register */
csrr t2, mstatus
sr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
#if defined(CONFIG_FPU_SHARING)
/* determine if FPU access was disabled */
@ -301,7 +301,7 @@ no_fp: /* increment _current->arch.exception_depth */
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Handle context saving at SOC level. */
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
addi a0, sp, __struct_arch_esf_soc_context_OFFSET
jal ra, __soc_save_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
@ -351,7 +351,7 @@ no_fp: /* increment _current->arch.exception_depth */
/*
* Call _Fault to handle exception.
* Stack pointer is pointing to a z_arch_esf_t structure, pass it
* Stack pointer is pointing to a struct_arch_esf structure, pass it
* to _Fault (via register a0).
* If _Fault shall return, set return address to
* no_reschedule to restore stack.
@ -370,9 +370,9 @@ is_kernel_syscall:
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
lr t0, __struct_arch_esf_mepc_OFFSET(sp)
addi t0, t0, 4
sr t0, __z_arch_esf_t_mepc_OFFSET(sp)
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
#ifdef CONFIG_PMP_STACK_GUARD
/* Re-activate PMP for m-mode */
@ -383,7 +383,7 @@ is_kernel_syscall:
#endif
/* Determine what to do. Operation code is in t0. */
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
lr t0, __struct_arch_esf_t0_OFFSET(sp)
.if RV_ECALL_RUNTIME_EXCEPT != 0; .err; .endif
beqz t0, do_fault
@ -396,8 +396,8 @@ is_kernel_syscall:
#ifdef CONFIG_RISCV_ALWAYS_SWITCH_THROUGH_ECALL
li t1, RV_ECALL_SCHEDULE
bne t0, t1, skip_schedule
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
lr a1, __z_arch_esf_t_a1_OFFSET(sp)
lr a0, __struct_arch_esf_a0_OFFSET(sp)
lr a1, __struct_arch_esf_a1_OFFSET(sp)
j reschedule
skip_schedule:
#endif
@ -408,7 +408,7 @@ skip_schedule:
do_fault:
/* Handle RV_ECALL_RUNTIME_EXCEPT. Retrieve reason in a0, esf in A1. */
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
lr a0, __struct_arch_esf_a0_OFFSET(sp)
1: mv a1, sp
#ifdef CONFIG_EXCEPTION_DEBUG
@ -431,8 +431,8 @@ do_irq_offload:
* Routine pointer is in saved a0, argument in saved a1
* so we load them with a1/a0 (reversed).
*/
lr a1, __z_arch_esf_t_a0_OFFSET(sp)
lr a0, __z_arch_esf_t_a1_OFFSET(sp)
lr a1, __struct_arch_esf_a0_OFFSET(sp)
lr a0, __struct_arch_esf_a1_OFFSET(sp)
/* Increment _current_cpu->nested */
lw t1, ___cpu_t_nested_OFFSET(s0)
@ -474,18 +474,18 @@ is_user_syscall:
* Same as for is_kernel_syscall: increment saved MEPC by 4 to
* prevent triggering the same ecall again upon exiting the ISR.
*/
lr t1, __z_arch_esf_t_mepc_OFFSET(sp)
lr t1, __struct_arch_esf_mepc_OFFSET(sp)
addi t1, t1, 4
sr t1, __z_arch_esf_t_mepc_OFFSET(sp)
sr t1, __struct_arch_esf_mepc_OFFSET(sp)
/* Restore argument registers from user stack */
lr a0, __z_arch_esf_t_a0_OFFSET(sp)
lr a1, __z_arch_esf_t_a1_OFFSET(sp)
lr a2, __z_arch_esf_t_a2_OFFSET(sp)
lr a3, __z_arch_esf_t_a3_OFFSET(sp)
lr a4, __z_arch_esf_t_a4_OFFSET(sp)
lr a5, __z_arch_esf_t_a5_OFFSET(sp)
lr t0, __z_arch_esf_t_t0_OFFSET(sp)
lr a0, __struct_arch_esf_a0_OFFSET(sp)
lr a1, __struct_arch_esf_a1_OFFSET(sp)
lr a2, __struct_arch_esf_a2_OFFSET(sp)
lr a3, __struct_arch_esf_a3_OFFSET(sp)
lr a4, __struct_arch_esf_a4_OFFSET(sp)
lr a5, __struct_arch_esf_a5_OFFSET(sp)
lr t0, __struct_arch_esf_t0_OFFSET(sp)
#if defined(CONFIG_RISCV_ISA_RV32E)
/* Stack alignment for RV32E is 4 bytes */
addi sp, sp, -4
@ -519,7 +519,7 @@ valid_syscall_id:
#endif /* CONFIG_RISCV_ISA_RV32E */
/* Update a0 (return value) on the stack */
sr a0, __z_arch_esf_t_a0_OFFSET(sp)
sr a0, __struct_arch_esf_a0_OFFSET(sp)
/* Disable IRQs again before leaving */
csrc mstatus, MSTATUS_IEN
@ -534,7 +534,7 @@ is_interrupt:
* If we came from userspace then we need to reconfigure the
* PMP for kernel mode stack guard.
*/
lr t0, __z_arch_esf_t_mstatus_OFFSET(sp)
lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
li t1, MSTATUS_MPP
and t0, t0, t1
bnez t0, 1f
@ -665,7 +665,7 @@ no_reschedule:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
/* Restore context at SOC level */
addi a0, sp, __z_arch_esf_t_soc_context_OFFSET
addi a0, sp, __struct_arch_esf_soc_context_OFFSET
jal ra, __soc_restore_context
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
@ -683,8 +683,8 @@ fp_trap_exit:
#endif
/* Restore MEPC and MSTATUS registers */
lr t0, __z_arch_esf_t_mepc_OFFSET(sp)
lr t2, __z_arch_esf_t_mstatus_OFFSET(sp)
lr t0, __struct_arch_esf_mepc_OFFSET(sp)
lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
csrw mepc, t0
csrw mstatus, t2
@ -711,7 +711,7 @@ fp_trap_exit:
sb t1, %tprel_lo(is_user_mode)(t0)
/* preserve stack pointer for next exception entry */
add t0, sp, __z_arch_esf_t_SIZEOF
add t0, sp, __struct_arch_esf_SIZEOF
sr t0, _curr_cpu_arch_user_exc_sp(s0)
j 2f
@ -720,13 +720,13 @@ fp_trap_exit:
* We are returning to kernel mode. Store the stack pointer to
* be re-loaded further down.
*/
addi t0, sp, __z_arch_esf_t_SIZEOF
sr t0, __z_arch_esf_t_sp_OFFSET(sp)
addi t0, sp, __struct_arch_esf_SIZEOF
sr t0, __struct_arch_esf_sp_OFFSET(sp)
2:
#endif
/* Restore s0 (it is no longer ours) */
lr s0, __z_arch_esf_t_s0_OFFSET(sp)
lr s0, __struct_arch_esf_s0_OFFSET(sp)
#ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
SOC_ISR_SW_UNSTACKING
@ -736,10 +736,10 @@ fp_trap_exit:
#ifdef CONFIG_USERSPACE
/* retrieve saved stack pointer */
lr sp, __z_arch_esf_t_sp_OFFSET(sp)
lr sp, __struct_arch_esf_sp_OFFSET(sp)
#else
/* remove esf from the stack */
addi sp, sp, __z_arch_esf_t_SIZEOF
addi sp, sp, __struct_arch_esf_SIZEOF
#endif
#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */

View file

@ -13,6 +13,7 @@
* structures.
*/
#include <zephyr/arch/exception.h>
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <gen_offset.h>
@ -88,43 +89,43 @@ GEN_OFFSET_SYM(_thread_arch_t, exception_depth);
#endif /* CONFIG_FPU_SHARING */
/* esf member offsets */
GEN_OFFSET_SYM(z_arch_esf_t, ra);
GEN_OFFSET_SYM(z_arch_esf_t, t0);
GEN_OFFSET_SYM(z_arch_esf_t, t1);
GEN_OFFSET_SYM(z_arch_esf_t, t2);
GEN_OFFSET_SYM(z_arch_esf_t, a0);
GEN_OFFSET_SYM(z_arch_esf_t, a1);
GEN_OFFSET_SYM(z_arch_esf_t, a2);
GEN_OFFSET_SYM(z_arch_esf_t, a3);
GEN_OFFSET_SYM(z_arch_esf_t, a4);
GEN_OFFSET_SYM(z_arch_esf_t, a5);
GEN_OFFSET_STRUCT(arch_esf, ra);
GEN_OFFSET_STRUCT(arch_esf, t0);
GEN_OFFSET_STRUCT(arch_esf, t1);
GEN_OFFSET_STRUCT(arch_esf, t2);
GEN_OFFSET_STRUCT(arch_esf, a0);
GEN_OFFSET_STRUCT(arch_esf, a1);
GEN_OFFSET_STRUCT(arch_esf, a2);
GEN_OFFSET_STRUCT(arch_esf, a3);
GEN_OFFSET_STRUCT(arch_esf, a4);
GEN_OFFSET_STRUCT(arch_esf, a5);
#if !defined(CONFIG_RISCV_ISA_RV32E)
GEN_OFFSET_SYM(z_arch_esf_t, t3);
GEN_OFFSET_SYM(z_arch_esf_t, t4);
GEN_OFFSET_SYM(z_arch_esf_t, t5);
GEN_OFFSET_SYM(z_arch_esf_t, t6);
GEN_OFFSET_SYM(z_arch_esf_t, a6);
GEN_OFFSET_SYM(z_arch_esf_t, a7);
GEN_OFFSET_STRUCT(arch_esf, t3);
GEN_OFFSET_STRUCT(arch_esf, t4);
GEN_OFFSET_STRUCT(arch_esf, t5);
GEN_OFFSET_STRUCT(arch_esf, t6);
GEN_OFFSET_STRUCT(arch_esf, a6);
GEN_OFFSET_STRUCT(arch_esf, a7);
#endif /* !CONFIG_RISCV_ISA_RV32E */
GEN_OFFSET_SYM(z_arch_esf_t, mepc);
GEN_OFFSET_SYM(z_arch_esf_t, mstatus);
GEN_OFFSET_STRUCT(arch_esf, mepc);
GEN_OFFSET_STRUCT(arch_esf, mstatus);
GEN_OFFSET_SYM(z_arch_esf_t, s0);
GEN_OFFSET_STRUCT(arch_esf, s0);
#ifdef CONFIG_USERSPACE
GEN_OFFSET_SYM(z_arch_esf_t, sp);
GEN_OFFSET_STRUCT(arch_esf, sp);
#endif
#if defined(CONFIG_RISCV_SOC_CONTEXT_SAVE)
GEN_OFFSET_SYM(z_arch_esf_t, soc_context);
GEN_OFFSET_STRUCT(arch_esf, soc_context);
#endif
#if defined(CONFIG_RISCV_SOC_OFFSETS)
GEN_SOC_OFFSET_SYMS();
#endif
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, sizeof(z_arch_esf_t));
GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
#ifdef CONFIG_EXCEPTION_DEBUG
GEN_ABSOLUTE_SYM(__callee_saved_t_SIZEOF, ROUND_UP(sizeof(_callee_saved_t), ARCH_STACK_PTR_ALIGN));

View file

@ -72,7 +72,7 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason)
mov %l5, %g3
/* Allocate an ABI stack frame and exception stack frame */
sub %fp, 96 + __z_arch_esf_t_SIZEOF, %sp
sub %fp, 96 + __struct_arch_esf_SIZEOF, %sp
/*
* %fp: %sp of interrupted task
* %sp: %sp of interrupted task - ABI_frame - esf
@ -81,19 +81,19 @@ SECTION_FUNC(TEXT, __sparc_trap_except_reason)
mov %l7, %o0
/* Fill in the content of the exception stack frame */
#if defined(CONFIG_EXTRA_EXCEPTION_INFO)
std %i0, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x00]
std %i2, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x08]
std %i4, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x10]
std %i6, [%sp + 96 + __z_arch_esf_t_out_OFFSET + 0x18]
std %g0, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x00]
std %g2, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x08]
std %g4, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x10]
std %g6, [%sp + 96 + __z_arch_esf_t_global_OFFSET + 0x18]
std %i0, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x00]
std %i2, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x08]
std %i4, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x10]
std %i6, [%sp + 96 + __struct_arch_esf_out_OFFSET + 0x18]
std %g0, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x00]
std %g2, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x08]
std %g4, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x10]
std %g6, [%sp + 96 + __struct_arch_esf_global_OFFSET + 0x18]
#endif
std %l0, [%sp + 96 + __z_arch_esf_t_psr_OFFSET] /* psr pc */
std %l2, [%sp + 96 + __z_arch_esf_t_npc_OFFSET] /* npc wim */
std %l0, [%sp + 96 + __struct_arch_esf_psr_OFFSET] /* psr pc */
std %l2, [%sp + 96 + __struct_arch_esf_npc_OFFSET] /* npc wim */
rd %y, %l7
std %l6, [%sp + 96 + __z_arch_esf_t_tbr_OFFSET] /* tbr y */
std %l6, [%sp + 96 + __struct_arch_esf_tbr_OFFSET] /* tbr y */
/* Enable traps, raise PIL to mask all maskable interrupts. */
or %l0, PSR_PIL, %o2

View file

@ -31,11 +31,11 @@ GEN_OFFSET_SYM(_callee_saved_t, i6);
GEN_OFFSET_SYM(_callee_saved_t, o6);
/* esf member offsets */
GEN_OFFSET_SYM(z_arch_esf_t, out);
GEN_OFFSET_SYM(z_arch_esf_t, global);
GEN_OFFSET_SYM(z_arch_esf_t, npc);
GEN_OFFSET_SYM(z_arch_esf_t, psr);
GEN_OFFSET_SYM(z_arch_esf_t, tbr);
GEN_ABSOLUTE_SYM(__z_arch_esf_t_SIZEOF, STACK_ROUND_UP(sizeof(z_arch_esf_t)));
GEN_OFFSET_STRUCT(arch_esf, out);
GEN_OFFSET_STRUCT(arch_esf, global);
GEN_OFFSET_STRUCT(arch_esf, npc);
GEN_OFFSET_STRUCT(arch_esf, psr);
GEN_OFFSET_STRUCT(arch_esf, tbr);
GEN_ABSOLUTE_SYM(__struct_arch_esf_SIZEOF, sizeof(struct arch_esf));
GEN_ABS_SYM_END

View file

@ -161,12 +161,12 @@ SECTION_FUNC(PINNED_TEXT, _exception_enter)
/* ESP is still pointing to the ESF at this point */
testl $0x200, __z_arch_esf_t_eflags_OFFSET(%esp)
testl $0x200, __struct_arch_esf_eflags_OFFSET(%esp)
je allDone
sti
allDone:
pushl %esp /* push z_arch_esf_t * parameter */
pushl %esp /* push struct_arch_esf * parameter */
call *%ecx /* call exception handler */
addl $0x4, %esp

View file

@ -52,6 +52,6 @@ GEN_ABSOLUTE_SYM(_K_THREAD_NO_FLOAT_SIZEOF,
GEN_OFFSET_SYM(_callee_saved_t, esp);
/* z_arch_esf_t structure member offsets */
GEN_OFFSET_SYM(z_arch_esf_t, eflags);
/* struct arch_esf structure member offsets */
GEN_OFFSET_STRUCT(arch_esf, eflags);
#endif /* _X86_OFFSETS_INC_ */

View file

@ -79,14 +79,18 @@
#include <zephyr/toolchain.h>
#include <stddef.h>
typedef struct arch_esf z_arch_esf_t;
/* definition of the GEN_OFFSET_SYM() macros is toolchain independent */
#define GEN_OFFSET_SYM(S, M) \
GEN_ABSOLUTE_SYM(__##S##_##M##_##OFFSET, offsetof(S, M))
#define GEN_OFFSET_STRUCT(S, M) \
GEN_ABSOLUTE_SYM(__struct_##S##_##M##_##OFFSET, offsetof(struct S, M))
#define GEN_NAMED_OFFSET_SYM(S, M, N) \
GEN_ABSOLUTE_SYM(__##S##_##N##_##OFFSET, offsetof(S, M))
#define GEN_NAMED_OFFSET_STRUCT(S, M, N) \
GEN_ABSOLUTE_SYM(__struct_##S##_##N##_##OFFSET, offsetof(struct S, M))
#endif /* ZEPHYR_KERNEL_INCLUDE_GEN_OFFSET_H_ */

View file

@ -73,7 +73,7 @@
/*
* Size of the SW managed part of the ESF in case of exception
*/
#define ESF_SW_EXC_SIZEOF (__z_arch_esf_t_SIZEOF - ESF_HW_SIZEOF)
#define ESF_SW_EXC_SIZEOF (__struct_arch_esf_SIZEOF - ESF_HW_SIZEOF)
/*
* Size of the SW managed part of the ESF in case of interrupt
@ -90,17 +90,17 @@
#define MEPC_SP_ALIGN_BIT_MASK (0x1UL)
#define STORE_SP_ALIGN_BIT_FROM_MEPC \
addi t1, sp, __z_arch_esf_t_soc_context_OFFSET; \
lr t0, __z_arch_esf_t_mepc_OFFSET(sp); \
addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \
lr t0, __struct_arch_esf_mepc_OFFSET(sp); \
andi t0, t0, MEPC_SP_ALIGN_BIT_MASK; \
sr t0, __soc_esf_t_sp_align_OFFSET(t1)
#define RESTORE_SP_ALIGN_BIT_TO_MEPC \
addi t1, sp, __z_arch_esf_t_soc_context_OFFSET; \
addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \
lr t0, __soc_esf_t_sp_align_OFFSET(t1); \
lr t1, __z_arch_esf_t_mepc_OFFSET(sp); \
lr t1, __struct_arch_esf_mepc_OFFSET(sp); \
or t2, t1, t0; \
sr t2, __z_arch_esf_t_mepc_OFFSET(sp)
sr t2, __struct_arch_esf_mepc_OFFSET(sp)
#define SOC_ISR_SW_STACKING \
csrw mscratch, t0; \