riscv: make core code 64-bit compatible

There are two aspects to this: CPU registers are twice as big, and the
load and store instructions must use the 'd' suffix instead of the 'w'
one. To abstract register differences, we simply use a ulong_t instead
of u32_t given that RISC-V is either ILP32 or LP64. And the relevant
lw/sw instructions are replaced by LR/SR (load/store register) that get
defined as either lw/sw or ld/sd. Finally a few constants to deal with
register offsets are also provided.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2019-07-24 16:21:58 -04:00 committed by Andrew Boie
commit 0440a815a9
10 changed files with 234 additions and 212 deletions

View file

@ -26,45 +26,45 @@ SECTION_FUNC(exception.other, __swap)
#ifdef CONFIG_EXECUTION_BENCHMARKING
addi sp, sp, -__z_arch_esf_t_SIZEOF
sw ra, __z_arch_esf_t_ra_OFFSET(sp)
sw gp, __z_arch_esf_t_gp_OFFSET(sp)
sw tp, __z_arch_esf_t_tp_OFFSET(sp)
sw t0, __z_arch_esf_t_t0_OFFSET(sp)
sw t1, __z_arch_esf_t_t1_OFFSET(sp)
sw t2, __z_arch_esf_t_t2_OFFSET(sp)
sw t3, __z_arch_esf_t_t3_OFFSET(sp)
sw t4, __z_arch_esf_t_t4_OFFSET(sp)
sw t5, __z_arch_esf_t_t5_OFFSET(sp)
sw t6, __z_arch_esf_t_t6_OFFSET(sp)
sw a0, __z_arch_esf_t_a0_OFFSET(sp)
sw a1, __z_arch_esf_t_a1_OFFSET(sp)
sw a2, __z_arch_esf_t_a2_OFFSET(sp)
sw a3, __z_arch_esf_t_a3_OFFSET(sp)
sw a4, __z_arch_esf_t_a4_OFFSET(sp)
sw a5, __z_arch_esf_t_a5_OFFSET(sp)
sw a6, __z_arch_esf_t_a6_OFFSET(sp)
sw a7, __z_arch_esf_t_a7_OFFSET(sp)
SR ra, __z_arch_esf_t_ra_OFFSET(sp)
SR gp, __z_arch_esf_t_gp_OFFSET(sp)
SR tp, __z_arch_esf_t_tp_OFFSET(sp)
SR t0, __z_arch_esf_t_t0_OFFSET(sp)
SR t1, __z_arch_esf_t_t1_OFFSET(sp)
SR t2, __z_arch_esf_t_t2_OFFSET(sp)
SR t3, __z_arch_esf_t_t3_OFFSET(sp)
SR t4, __z_arch_esf_t_t4_OFFSET(sp)
SR t5, __z_arch_esf_t_t5_OFFSET(sp)
SR t6, __z_arch_esf_t_t6_OFFSET(sp)
SR a0, __z_arch_esf_t_a0_OFFSET(sp)
SR a1, __z_arch_esf_t_a1_OFFSET(sp)
SR a2, __z_arch_esf_t_a2_OFFSET(sp)
SR a3, __z_arch_esf_t_a3_OFFSET(sp)
SR a4, __z_arch_esf_t_a4_OFFSET(sp)
SR a5, __z_arch_esf_t_a5_OFFSET(sp)
SR a6, __z_arch_esf_t_a6_OFFSET(sp)
SR a7, __z_arch_esf_t_a7_OFFSET(sp)
call read_timer_start_of_swap
lw ra, __z_arch_esf_t_ra_OFFSET(sp)
lw gp, __z_arch_esf_t_gp_OFFSET(sp)
lw tp, __z_arch_esf_t_tp_OFFSET(sp)
lw t0, __z_arch_esf_t_t0_OFFSET(sp)
lw t1, __z_arch_esf_t_t1_OFFSET(sp)
lw t2, __z_arch_esf_t_t2_OFFSET(sp)
lw t3, __z_arch_esf_t_t3_OFFSET(sp)
lw t4, __z_arch_esf_t_t4_OFFSET(sp)
lw t5, __z_arch_esf_t_t5_OFFSET(sp)
lw t6, __z_arch_esf_t_t6_OFFSET(sp)
lw a0, __z_arch_esf_t_a0_OFFSET(sp)
lw a1, __z_arch_esf_t_a1_OFFSET(sp)
lw a2, __z_arch_esf_t_a2_OFFSET(sp)
lw a3, __z_arch_esf_t_a3_OFFSET(sp)
lw a4, __z_arch_esf_t_a4_OFFSET(sp)
lw a5, __z_arch_esf_t_a5_OFFSET(sp)
lw a6, __z_arch_esf_t_a6_OFFSET(sp)
lw a7, __z_arch_esf_t_a7_OFFSET(sp)
LR ra, __z_arch_esf_t_ra_OFFSET(sp)
LR gp, __z_arch_esf_t_gp_OFFSET(sp)
LR tp, __z_arch_esf_t_tp_OFFSET(sp)
LR t0, __z_arch_esf_t_t0_OFFSET(sp)
LR t1, __z_arch_esf_t_t1_OFFSET(sp)
LR t2, __z_arch_esf_t_t2_OFFSET(sp)
LR t3, __z_arch_esf_t_t3_OFFSET(sp)
LR t4, __z_arch_esf_t_t4_OFFSET(sp)
LR t5, __z_arch_esf_t_t5_OFFSET(sp)
LR t6, __z_arch_esf_t_t6_OFFSET(sp)
LR a0, __z_arch_esf_t_a0_OFFSET(sp)
LR a1, __z_arch_esf_t_a1_OFFSET(sp)
LR a2, __z_arch_esf_t_a2_OFFSET(sp)
LR a3, __z_arch_esf_t_a3_OFFSET(sp)
LR a4, __z_arch_esf_t_a4_OFFSET(sp)
LR a5, __z_arch_esf_t_a5_OFFSET(sp)
LR a6, __z_arch_esf_t_a6_OFFSET(sp)
LR a7, __z_arch_esf_t_a7_OFFSET(sp)
/* Release stack space */
addi sp, sp, __z_arch_esf_t_SIZEOF
@ -83,7 +83,7 @@ SECTION_FUNC(exception.other, __swap)
la t0, _kernel
/* Get pointer to _kernel.current */
lw t1, _kernel_offset_to_current(t0)
LR t1, _kernel_offset_to_current(t0)
/* Load return value of __swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1)