riscv: implement arch_switch()

The move to arch_switch() is a prerequisite for SMP support.

Make it optimal without the need for an ECALL roundtrip on every
context switch. Performance numbers from tests/benchmarks/sched:

Before:
unpend  107 ready  102 switch  188 pend  218 tot  615 (avg  615)

After:
unpend  107 ready  102 switch  170 pend  217 tot  596 (avg  595)

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2022-03-07 17:01:36 -05:00 committed by Anas Nashif
commit ce8dabfe9e
12 changed files with 197 additions and 234 deletions

View file

@ -109,6 +109,8 @@ config RISCV
select ARCH_SUPPORTS_COREDUMP select ARCH_SUPPORTS_COREDUMP
select ARCH_HAS_THREAD_LOCAL_STORAGE select ARCH_HAS_THREAD_LOCAL_STORAGE
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select USE_SWITCH_SUPPORTED
select USE_SWITCH
select SCHED_IPI_SUPPORTED if SMP select SCHED_IPI_SUPPORTED if SMP
imply XIP imply XIP
help help

View file

@ -12,7 +12,7 @@ zephyr_library_sources(
prep_c.c prep_c.c
reboot.c reboot.c
reset.S reset.S
swap.S switch.S
smp.c smp.c
thread.c thread.c
) )

View file

@ -41,35 +41,6 @@
op fa6, __z_arch_esf_t_fa6_OFFSET(reg) ;\ op fa6, __z_arch_esf_t_fa6_OFFSET(reg) ;\
op fa7, __z_arch_esf_t_fa7_OFFSET(reg) ; op fa7, __z_arch_esf_t_fa7_OFFSET(reg) ;
#define DO_FP_CALLEE_SAVED(op, reg) \
op fs0, _thread_offset_to_fs0(reg) ;\
op fs1, _thread_offset_to_fs1(reg) ;\
op fs2, _thread_offset_to_fs2(reg) ;\
op fs3, _thread_offset_to_fs3(reg) ;\
op fs4, _thread_offset_to_fs4(reg) ;\
op fs5, _thread_offset_to_fs5(reg) ;\
op fs6, _thread_offset_to_fs6(reg) ;\
op fs7, _thread_offset_to_fs7(reg) ;\
op fs8, _thread_offset_to_fs8(reg) ;\
op fs9, _thread_offset_to_fs9(reg) ;\
op fs10, _thread_offset_to_fs10(reg) ;\
op fs11, _thread_offset_to_fs11(reg) ;
#define DO_CALLEE_SAVED(op, reg) \
op s0, _thread_offset_to_tp(reg) ;\
op s0, _thread_offset_to_s0(reg) ;\
op s1, _thread_offset_to_s1(reg) ;\
op s2, _thread_offset_to_s2(reg) ;\
op s3, _thread_offset_to_s3(reg) ;\
op s4, _thread_offset_to_s4(reg) ;\
op s5, _thread_offset_to_s5(reg) ;\
op s6, _thread_offset_to_s6(reg) ;\
op s7, _thread_offset_to_s7(reg) ;\
op s8, _thread_offset_to_s8(reg) ;\
op s9, _thread_offset_to_s9(reg) ;\
op s10, _thread_offset_to_s10(reg) ;\
op s11, _thread_offset_to_s11(reg) ;
#define DO_CALLER_SAVED(op) \ #define DO_CALLER_SAVED(op) \
op ra, __z_arch_esf_t_ra_OFFSET(sp) ;\ op ra, __z_arch_esf_t_ra_OFFSET(sp) ;\
op t0, __z_arch_esf_t_t0_OFFSET(sp) ;\ op t0, __z_arch_esf_t_t0_OFFSET(sp) ;\
@ -101,25 +72,16 @@ GTEXT(__soc_restore_context)
#endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */ #endif /* CONFIG_RISCV_SOC_CONTEXT_SAVE */
GTEXT(z_riscv_fatal_error) GTEXT(z_riscv_fatal_error)
GTEXT(_k_neg_eagain) GTEXT(z_get_next_switch_handle)
GTEXT(_is_next_thread_current) GTEXT(z_riscv_switch)
GTEXT(z_get_next_ready_thread) GTEXT(z_riscv_thread_start)
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
GTEXT(z_thread_mark_switched_in)
GTEXT(z_thread_mark_switched_out)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
GTEXT(sys_trace_isr_enter) GTEXT(sys_trace_isr_enter)
#endif #endif
#endif
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE
GDATA(_k_syscall_table) GDATA(_k_syscall_table)
GTEXT(z_riscv_configure_user_allowed_stack)
#endif
#ifdef CONFIG_PMP_STACK_GUARD
GTEXT(z_riscv_configure_stack_guard)
#endif #endif
/* exports */ /* exports */
@ -291,21 +253,15 @@ is_kernel_syscall:
/* Determine what to do. Operation code is in a7. */ /* Determine what to do. Operation code is in a7. */
lr a7, __z_arch_esf_t_a7_OFFSET(sp) lr a7, __z_arch_esf_t_a7_OFFSET(sp)
ASSUME_EQUAL(RV_ECALL_CONTEXT_SWITCH, 0) ASSUME_EQUAL(RV_ECALL_RUNTIME_EXCEPT, 0)
beqz a7, reschedule beqz a7, do_fault
#if defined(CONFIG_IRQ_OFFLOAD) #if defined(CONFIG_IRQ_OFFLOAD)
addi a7, a7, -1 addi a7, a7, -1
ASSUME_EQUAL(RV_ECALL_IRQ_OFFLOAD, 1) ASSUME_EQUAL(RV_ECALL_IRQ_OFFLOAD, 1)
beqz a7, do_irq_offload beqz a7, do_irq_offload
addi a7, a7, -1
#else
addi a7, a7, -2
#endif #endif
ASSUME_EQUAL(RV_ECALL_RUNTIME_EXCEPT, 2)
beqz a7, do_fault
/* default fault code is K_ERR_KERNEL_OOPS */ /* default fault code is K_ERR_KERNEL_OOPS */
li a0, 3 li a0, 3
j 1f j 1f
@ -467,102 +423,32 @@ irq_done:
#endif #endif
reschedule: reschedule:
/* Get reference to _kernel */
la t1, _kernel
/*
* Check if next thread to schedule is current thread.
* If yes do not perform a reschedule
*/
lr t2, _kernel_offset_to_current(t1)
lr t3, _kernel_offset_to_ready_q_cache(t1)
beq t3, t2, no_reschedule
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
call z_thread_mark_switched_out
#endif
/* Get reference to _kernel */
la t0, _kernel
/* Get pointer to _kernel.current */ /* Get pointer to _kernel.current */
lr t1, _kernel_offset_to_current(t0)
/*
* Save callee-saved registers of current kernel thread
* prior to handle context-switching
*/
DO_CALLEE_SAVED(sr, t1)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Assess whether floating-point registers need to be saved. */
lb t2, _thread_offset_to_user_options(t1)
andi t2, t2, K_FP_REGS
beqz t2, skip_store_fp_callee_saved
frcsr t2
sw t2, _thread_offset_to_fcsr(t1)
DO_FP_CALLEE_SAVED(fsr, t1)
skip_store_fp_callee_saved:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/*
* Save stack pointer of current thread and set the default return value
* of z_swap to _k_neg_eagain for the thread.
*/
sr sp, _thread_offset_to_sp(t1)
la t2, _k_neg_eagain
lw t3, 0(t2)
sw t3, _thread_offset_to_swap_return_value(t1)
/* Get next thread to schedule. */
lr t1, _kernel_offset_to_ready_q_cache(t0)
/* Set _kernel.current to new thread loaded in t1 */
sr t1, _kernel_offset_to_current(t0)
/* Switch to new thread stack */
lr sp, _thread_offset_to_sp(t1)
/* Restore callee-saved registers of new thread */
DO_CALLEE_SAVED(lr, t1)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Determine if we need to restore floating-point registers. */
lb t2, _thread_offset_to_user_options(t1)
andi t2, t2, K_FP_REGS
beqz t2, skip_load_fp_callee_saved
/*
* If we are switching from a thread with floating-point disabled the
* mstatus FS bits will still be cleared, which can cause an illegal
* instruction fault. Set the FS state before restoring the registers.
* mstatus will be restored later on.
*/
li t2, MSTATUS_FS_INIT
csrs mstatus, t2
lw t2, _thread_offset_to_fcsr(t1)
fscsr t2
DO_FP_CALLEE_SAVED(flr, t1)
skip_load_fp_callee_saved:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
#ifdef CONFIG_PMP_STACK_GUARD
mv a0, t1 /* kernel current */
jal ra, z_riscv_configure_stack_guard
#endif /* CONFIG_PMP_STACK_GUARD */
#ifdef CONFIG_USERSPACE
la t0, _kernel la t0, _kernel
lr a0, _kernel_offset_to_current(t0) lr a1, _kernel_offset_to_current(t0)
jal ra, z_riscv_configure_user_allowed_stack
#endif /* CONFIG_USERSPACE */
#if CONFIG_INSTRUMENT_THREAD_SWITCHING /*
call z_thread_mark_switched_in * Get next thread to schedule with z_get_next_switch_handle().
#endif * We pass it a NULL as we didn't save the whole thread context yet.
* If no scheduling is necessary then NULL will be returned.
*/
addi sp, sp, -16
sr a1, 0(sp)
mv a0, zero
call z_get_next_switch_handle
lr a1, 0(sp)
addi sp, sp, 16
beqz a0, no_reschedule
/*
* Perform context switch:
* a0 = new thread
* a1 = old thread
*/
call z_riscv_switch
z_riscv_thread_start:
no_reschedule: no_reschedule:
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE

View file

@ -25,14 +25,9 @@
#include <soc_offsets.h> #include <soc_offsets.h>
#endif #endif
/* thread_arch_t member offsets */
GEN_OFFSET_SYM(_thread_arch_t, swap_return_value);
#if defined(CONFIG_USERSPACE)
GEN_OFFSET_SYM(_thread_arch_t, priv_stack_start);
#endif
/* struct _callee_saved member offsets */ /* struct _callee_saved member offsets */
GEN_OFFSET_SYM(_callee_saved_t, sp); GEN_OFFSET_SYM(_callee_saved_t, sp);
GEN_OFFSET_SYM(_callee_saved_t, ra);
GEN_OFFSET_SYM(_callee_saved_t, tp); GEN_OFFSET_SYM(_callee_saved_t, tp);
GEN_OFFSET_SYM(_callee_saved_t, s0); GEN_OFFSET_SYM(_callee_saved_t, s0);
GEN_OFFSET_SYM(_callee_saved_t, s1); GEN_OFFSET_SYM(_callee_saved_t, s1);

View file

@ -1,59 +0,0 @@
/*
* Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include <arch/riscv/syscall.h>
#include "asm_macros.inc"
/* exports */
GTEXT(arch_swap)
/* Use ABI name of registers for the sake of simplicity */
/*
* unsigned int arch_swap(unsigned int key)
*
* Always called with interrupts locked
* key is stored in a0 register
*/
SECTION_FUNC(exception.other, arch_swap)
/* Make a system call to perform context switch */
li a7, RV_ECALL_CONTEXT_SWITCH
ecall
/*
* when thread is rescheduled, unlock irq and return.
* Restored register a0 contains IRQ lock state of thread.
*
* Prior to unlocking irq, load return value of
* arch_swap to temp register t2 (from
* _thread_offset_to_swap_return_value). Normally, it should be -EAGAIN,
* unless someone has previously called arch_thread_return_value_set(..).
*/
la t0, _kernel
/* Get pointer to _kernel.current */
lr t1, _kernel_offset_to_current(t0)
/* Load return value of arch_swap function in temp register t2 */
lw t2, _thread_offset_to_swap_return_value(t1)
/*
* Unlock irq, following IRQ lock state in a0 register.
* Use atomic instruction csrrs to do so.
*/
andi a0, a0, MSTATUS_IEN
csrs mstatus, a0
/* Set value of return register a0 to value of register t2 */
mv a0, t2
/* Return */
ret

126
arch/riscv/core/switch.S Normal file
View file

@ -0,0 +1,126 @@
/*
* Copyright (c) 2022 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <toolchain.h>
#include <linker/sections.h>
#include <kernel.h>
#include <sys/util.h>
#include <offsets_short.h>
#include <arch/cpu.h>
#include "asm_macros.inc"
/* Convenience macros for loading/storing register states. */
#define DO_CALLEE_SAVED(op, reg) \
op ra, _thread_offset_to_ra(reg) ;\
op tp, _thread_offset_to_tp(reg) ;\
op s0, _thread_offset_to_s0(reg) ;\
op s1, _thread_offset_to_s1(reg) ;\
op s2, _thread_offset_to_s2(reg) ;\
op s3, _thread_offset_to_s3(reg) ;\
op s4, _thread_offset_to_s4(reg) ;\
op s5, _thread_offset_to_s5(reg) ;\
op s6, _thread_offset_to_s6(reg) ;\
op s7, _thread_offset_to_s7(reg) ;\
op s8, _thread_offset_to_s8(reg) ;\
op s9, _thread_offset_to_s9(reg) ;\
op s10, _thread_offset_to_s10(reg) ;\
op s11, _thread_offset_to_s11(reg)
#define DO_FP_CALLEE_SAVED(op, reg) \
op fs0, _thread_offset_to_fs0(reg) ;\
op fs1, _thread_offset_to_fs1(reg) ;\
op fs2, _thread_offset_to_fs2(reg) ;\
op fs3, _thread_offset_to_fs3(reg) ;\
op fs4, _thread_offset_to_fs4(reg) ;\
op fs5, _thread_offset_to_fs5(reg) ;\
op fs6, _thread_offset_to_fs6(reg) ;\
op fs7, _thread_offset_to_fs7(reg) ;\
op fs8, _thread_offset_to_fs8(reg) ;\
op fs9, _thread_offset_to_fs9(reg) ;\
op fs10, _thread_offset_to_fs10(reg) ;\
op fs11, _thread_offset_to_fs11(reg)
GTEXT(z_riscv_switch)
GTEXT(z_thread_mark_switched_in)
GTEXT(z_riscv_configure_stack_guard)
/* void z_riscv_switch(k_thread_t *switch_to, k_thread_t *switch_from) */
SECTION_FUNC(TEXT, z_riscv_switch)
/* Save the old thread's callee-saved registers */
DO_CALLEE_SAVED(sr, a1)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Assess whether floating-point registers need to be saved. */
lb t0, _thread_offset_to_user_options(a1)
andi t0, t0, K_FP_REGS
beqz t0, skip_store_fp_callee_saved
frcsr t0
sw t0, _thread_offset_to_fcsr(a1)
DO_FP_CALLEE_SAVED(fsr, a1)
skip_store_fp_callee_saved:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
/* Save the old thread's stack pointer */
sr sp, _thread_offset_to_sp(a1)
/* Set thread->switch_handle = thread to mark completion */
sr a1, ___thread_t_switch_handle_OFFSET(a1)
/* Get the new thread's stack pointer */
lr sp, _thread_offset_to_sp(a0)
#ifdef CONFIG_PMP_STACK_GUARD
/* Preserve a0 across following call. s0 is not yet restored. */
mv s0, a0
call z_riscv_configure_stack_guard
mv a0, s0
#endif
#ifdef CONFIG_USERSPACE
lb t0, _thread_offset_to_user_options(a0)
andi t0, t0, K_USER
beqz t0, not_user_task
mv s0, a0
call z_riscv_configure_user_allowed_stack
mv a0, s0
not_user_task:
#endif
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
mv s0, a0
call z_thread_mark_switched_in
mv a0, s0
#endif
/* Restore the new thread's callee-saved registers */
DO_CALLEE_SAVED(lr, a0)
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
/* Determine if we need to restore floating-point registers. */
lb t0, _thread_offset_to_user_options(a0)
li t1, MSTATUS_FS_INIT
andi t0, t0, K_FP_REGS
beqz t0, no_fp
/* Enable floating point access */
csrs mstatus, t1
/* Restore FP regs */
lw t1, _thread_offset_to_fcsr(a0)
fscsr t1
DO_FP_CALLEE_SAVED(flr, a0)
j 1f
no_fp:
/* Disable floating point access */
csrc mstatus, t1
1:
#endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
ret

View file

@ -23,6 +23,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
char *stack_ptr, k_thread_entry_t entry, char *stack_ptr, k_thread_entry_t entry,
void *p1, void *p2, void *p3) void *p1, void *p2, void *p3)
{ {
extern void z_riscv_thread_start(void);
struct __esf *stack_init; struct __esf *stack_init;
#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
@ -53,8 +54,8 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* and restored prior to returning from the interrupt/exception. * and restored prior to returning from the interrupt/exception.
* This shall allow to handle nested interrupts. * This shall allow to handle nested interrupts.
* *
* Given that context switching is performed via a system call exception * Given that thread startup happens through the exception exit
* within the RISCV architecture implementation, initially set: * path, initially set:
* 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable * 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
* interrupts when the newly created thread will be scheduled; * interrupts when the newly created thread will be scheduled;
* 2) MEPC to the address of the z_thread_entry in the thread * 2) MEPC to the address of the z_thread_entry in the thread
@ -121,6 +122,12 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
#endif #endif
thread->callee_saved.sp = (ulong_t)stack_init; thread->callee_saved.sp = (ulong_t)stack_init;
/* where to go when returning from z_riscv_switch() */
thread->callee_saved.ra = (ulong_t)z_riscv_thread_start;
/* our switch handle is the thread pointer itself */
thread->switch_handle = thread;
} }
#if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING) #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)

View file

@ -38,9 +38,14 @@ static ALWAYS_INLINE void arch_kernel_init(void)
} }
static ALWAYS_INLINE void static ALWAYS_INLINE void
arch_thread_return_value_set(struct k_thread *thread, unsigned int value) arch_switch(void *switch_to, void **switched_from)
{ {
thread->arch.swap_return_value = value; extern void z_riscv_switch(struct k_thread *new, struct k_thread *old);
struct k_thread *new = switch_to;
struct k_thread *old = CONTAINER_OF(switched_from, struct k_thread,
switch_handle);
z_riscv_switch(new, old);
} }
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason, FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,

View file

@ -20,6 +20,9 @@
#define _thread_offset_to_sp \ #define _thread_offset_to_sp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET) (___thread_t_callee_saved_OFFSET + ___callee_saved_t_sp_OFFSET)
#define _thread_offset_to_ra \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_ra_OFFSET)
#define _thread_offset_to_tp \ #define _thread_offset_to_tp \
(___thread_t_callee_saved_OFFSET + ___callee_saved_t_tp_OFFSET) (___thread_t_callee_saved_OFFSET + ___callee_saved_t_tp_OFFSET)

View file

@ -19,9 +19,8 @@
/* /*
* Privileged mode system calls * Privileged mode system calls
*/ */
#define RV_ECALL_CONTEXT_SWITCH 0 #define RV_ECALL_RUNTIME_EXCEPT 0
#define RV_ECALL_IRQ_OFFLOAD 1 #define RV_ECALL_IRQ_OFFLOAD 1
#define RV_ECALL_RUNTIME_EXCEPT 2
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE

View file

@ -52,10 +52,11 @@
/* /*
* The following structure defines the list of registers that need to be * The following structure defines the list of registers that need to be
* saved/restored when a cooperative context switch occurs. * saved/restored when a context switch occurs.
*/ */
struct _callee_saved { struct _callee_saved {
ulong_t sp; /* Stack pointer, (x2 register) */ ulong_t sp; /* Stack pointer, (x2 register) */
ulong_t ra; /* return address */
ulong_t tp; /* thread pointer */ ulong_t tp; /* thread pointer */
ulong_t s0; /* saved register/frame pointer */ ulong_t s0; /* saved register/frame pointer */
@ -90,8 +91,6 @@ struct _callee_saved {
typedef struct _callee_saved _callee_saved_t; typedef struct _callee_saved _callee_saved_t;
struct _thread_arch { struct _thread_arch {
uint32_t swap_return_value; /* Return value of z_swap() */
#ifdef CONFIG_PMP_STACK_GUARD #ifdef CONFIG_PMP_STACK_GUARD
ulong_t s_pmpcfg[PMP_CFG_CSR_NUM_FOR_STACK_GUARD]; ulong_t s_pmpcfg[PMP_CFG_CSR_NUM_FOR_STACK_GUARD];
ulong_t s_pmpaddr[PMP_REGION_NUM_FOR_STACK_GUARD]; ulong_t s_pmpaddr[PMP_REGION_NUM_FOR_STACK_GUARD];

View file

@ -141,9 +141,9 @@ ZTEST_SUITE(test_log_stack, NULL, NULL, NULL, NULL, after);
#elif defined(CONFIG_RISCV) && !defined(CONFIG_64BIT) && !defined(CONFIG_SMP) #elif defined(CONFIG_RISCV) && !defined(CONFIG_64BIT) && !defined(CONFIG_SMP)
#if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 24 #define SIMPLE_USAGE 36
#define HEXDUMP_USAGE 60 #define HEXDUMP_USAGE 84
#define MORE_ARGS_USAGE 40 #define MORE_ARGS_USAGE 52
#elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 456 #define SIMPLE_USAGE 456
#define HEXDUMP_USAGE 456 #define HEXDUMP_USAGE 456
@ -181,33 +181,33 @@ ZTEST_SUITE(test_log_stack, NULL, NULL, NULL, NULL, after);
#elif defined(CONFIG_RISCV) && defined(CONFIG_64BIT) && defined(CONFIG_SMP) #elif defined(CONFIG_RISCV) && defined(CONFIG_64BIT) && defined(CONFIG_SMP)
#if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 24 #define SIMPLE_USAGE 80
#define HEXDUMP_USAGE 60 #define HEXDUMP_USAGE 96
#define MORE_ARGS_USAGE 48 #define MORE_ARGS_USAGE 112
#elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 656 #define SIMPLE_USAGE 688
#define HEXDUMP_USAGE 656 #define HEXDUMP_USAGE 688
#define MORE_ARGS_USAGE 688 #define MORE_ARGS_USAGE 720
#elif !defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS) #elif !defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 240 #define SIMPLE_USAGE 272
#define HEXDUMP_USAGE 240 #define HEXDUMP_USAGE 272
#define MORE_ARGS_USAGE 240 #define MORE_ARGS_USAGE 272
#elif defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS) #elif defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 992 #define SIMPLE_USAGE 992
#define HEXDUMP_USAGE 1408 #define HEXDUMP_USAGE 992
#define MORE_ARGS_USAGE 1024 #define MORE_ARGS_USAGE 1024
#endif #endif
#elif defined(CONFIG_RISCV) && defined(CONFIG_64BIT) #elif defined(CONFIG_RISCV) && defined(CONFIG_64BIT)
#if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #if !defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 16 #define SIMPLE_USAGE 104
#define HEXDUMP_USAGE 32 #define HEXDUMP_USAGE 144
#define MORE_ARGS_USAGE 48 #define MORE_ARGS_USAGE 136
#elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS) #elif defined(CONFIG_LOG_MODE_IMMEDIATE) && !defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 616 #define SIMPLE_USAGE 704
#define HEXDUMP_USAGE 616 #define HEXDUMP_USAGE 704
#define MORE_ARGS_USAGE 648 #define MORE_ARGS_USAGE 736
#elif !defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS) #elif !defined(CONFIG_LOG_MODE_IMMEDIATE) && defined(CONFIG_NO_OPTIMIZATIONS)
#define SIMPLE_USAGE 272 #define SIMPLE_USAGE 272
#define HEXDUMP_USAGE 272 #define HEXDUMP_USAGE 272