arch: riscv: implement frame-pointer based stack unwinding

Influenced heavily by the RISCV64 stack unwinding
implementation in the Linux kernel.

`CONFIG_RISCV_EXCEPTION_STACK_TRACE` can be enabled by
configuring the following Kconfigs:

```prj.conf
CONFIG_DEBUG_INFO=y
CONFIG_EXCEPTION_STACK_TRACE=y
CONFIG_OVERRIDE_FRAME_POINTER_DEFAULT=y
CONFIG_OMIT_FRAME_POINTER=n
```

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
This commit is contained in:
Yong Cong Sin 2024-04-02 16:57:04 +08:00 committed by Anas Nashif
commit 7398831884
2 changed files with 92 additions and 0 deletions

View file

@ -38,6 +38,24 @@ config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
and most people should say n here to minimize context switching
overhead.
config RISCV_ENABLE_FRAME_POINTER
bool
default y
depends on OVERRIDE_FRAME_POINTER_DEFAULT && !OMIT_FRAME_POINTER
help
Hidden option to simplify access to OVERRIDE_FRAME_POINTER_DEFAULT
and OMIT_FRAME_POINTER. It is automatically enabled when the frame
pointer unwinding is enabled.
config RISCV_EXCEPTION_STACK_TRACE
bool
default y
depends on RISCV_ENABLE_FRAME_POINTER
depends on EXCEPTION_STACK_TRACE
imply THREAD_STACK_INFO
help
Internal config to enable runtime stack traces on fatal exceptions.
menu "RISCV Processor Options"
config INCLUDE_RESET_VECTOR

View file

@ -28,6 +28,77 @@ static const struct z_exc_handle exceptions[] = {
#define NO_REG " "
#endif
#ifdef CONFIG_RISCV_EXCEPTION_STACK_TRACE
#define MAX_STACK_FRAMES 8
struct stackframe {
uintptr_t fp;
uintptr_t ra;
};
static bool in_stack_bound(uintptr_t addr)
{
#ifdef CONFIG_THREAD_STACK_INFO
uintptr_t start, end;
if (_current == NULL || arch_is_in_isr()) {
/* We were servicing an interrupt */
int cpu_id;
#ifdef CONFIG_SMP
cpu_id = arch_curr_cpu()->id;
#else
cpu_id = 0;
#endif
start = (uintptr_t)K_KERNEL_STACK_BUFFER(z_interrupt_stacks[cpu_id]);
end = start + CONFIG_ISR_STACK_SIZE;
#ifdef CONFIG_USERSPACE
/* TODO: handle user threads */
#endif
} else {
start = _current->stack_info.start;
end = Z_STACK_PTR_ALIGN(_current->stack_info.start + _current->stack_info.size);
}
return (addr >= start) && (addr < end);
#else
ARG_UNUSED(addr);
return true;
#endif /* CONFIG_THREAD_STACK_INFO */
}
static inline bool in_text_region(uintptr_t addr)
{
extern uintptr_t __text_region_start, __text_region_end;
return (addr >= (uintptr_t)&__text_region_start) && (addr < (uintptr_t)&__text_region_end);
}
static void unwind_stack(const z_arch_esf_t *esf)
{
uintptr_t fp = esf->s0;
uintptr_t ra;
struct stackframe *frame;
LOG_ERR("call trace:");
for (int i = 0; (i < MAX_STACK_FRAMES) && (fp != 0U) && in_stack_bound((uintptr_t)fp);) {
frame = (struct stackframe *)fp - 1;
ra = frame->ra;
if (in_text_region(ra)) {
LOG_ERR(" %2d: fp: " PR_REG " ra: " PR_REG, i, (uintptr_t)fp, ra);
/*
* Increment the iterator only if `ra` is within the text region to get the
* most out of it
*/
i++;
}
fp = frame->fp;
}
}
#endif /* CONFIG_RISCV_EXCEPTION_STACK_TRACE */
FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
const z_arch_esf_t *esf)
{
@ -54,6 +125,9 @@ FUNC_NORETURN void z_riscv_fatal_error(unsigned int reason,
LOG_ERR(" mepc: " PR_REG, esf->mepc);
LOG_ERR("mstatus: " PR_REG, esf->mstatus);
LOG_ERR("");
#ifdef CONFIG_RISCV_EXCEPTION_STACK_TRACE
unwind_stack(esf);
#endif /* CONFIG_RISCV_EXCEPTION_STACK_TRACE */
}
#endif /* CONFIG_EXCEPTION_DEBUG */
z_fatal_error(reason, esf);