arch: x86: implement arch_stack_walk()

Currently it supports `esf` based unwinding only.

Then, update the exception stack unwinding to use
`arch_stack_walk()`, and update the Kconfigs & testcase
accordingly.

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
Yong Cong Sin 2024-08-14 17:08:28 +08:00 committed by Anas Nashif
commit 06a8c35316
5 changed files with 80 additions and 41 deletions

View file

@ -163,14 +163,15 @@ config X86_DYNAMIC_IRQ_STUBS
endmenu endmenu
config X86_EXCEPTION_STACK_TRACE config ARCH_HAS_STACKWALK
bool bool
default y default y
select DEBUG_INFO select DEBUG_INFO
select THREAD_STACK_INFO select THREAD_STACK_INFO
depends on !OMIT_FRAME_POINTER depends on !OMIT_FRAME_POINTER
help help
Internal config to enable runtime stack traces on fatal exceptions. Internal config to indicate that the arch_stack_walk() API is implemented
and it can be enabled.
config X86_USE_THREAD_LOCAL_STORAGE config X86_USE_THREAD_LOCAL_STORAGE
bool bool

View file

@ -29,7 +29,7 @@ config X86_EXCEPTION_STACK_SIZE
support limited call-tree depth and must fit into the low core, support limited call-tree depth and must fit into the low core,
so they are typically smaller than the ISR stacks. so they are typically smaller than the ISR stacks.
config X86_EXCEPTION_STACK_TRACE config ARCH_HAS_STACKWALK
bool bool
default y default y
select DEBUG_INFO select DEBUG_INFO
@ -37,7 +37,8 @@ config X86_EXCEPTION_STACK_TRACE
depends on !OMIT_FRAME_POINTER depends on !OMIT_FRAME_POINTER
depends on NO_OPTIMIZATIONS depends on NO_OPTIMIZATIONS
help help
Internal config to enable runtime stack traces on fatal exceptions. Internal config to indicate that the arch_stack_walk() API is implemented
and it can be enabled.
config SCHED_IPI_VECTOR config SCHED_IPI_VECTOR
int "IDT vector to use for scheduler IPI" int "IDT vector to use for scheduler IPI"

View file

@ -84,7 +84,7 @@ bool z_x86_check_stack_bounds(uintptr_t addr, size_t size, uint16_t cs)
return (addr <= start) || (addr + size > end); return (addr <= start) || (addr + size > end);
} }
#endif #endif /* CONFIG_THREAD_STACK_INFO */
#ifdef CONFIG_THREAD_STACK_MEM_MAPPED #ifdef CONFIG_THREAD_STACK_MEM_MAPPED
/** /**
@ -120,18 +120,9 @@ bool z_x86_check_guard_page(uintptr_t addr)
} }
#endif /* CONFIG_THREAD_STACK_MEM_MAPPED */ #endif /* CONFIG_THREAD_STACK_MEM_MAPPED */
#ifdef CONFIG_EXCEPTION_DEBUG #if defined(CONFIG_ARCH_STACKWALK)
typedef bool (*x86_stacktrace_cb)(void *cookie, unsigned long addr, unsigned long arg);
static inline uintptr_t esf_get_code(const struct arch_esf *esf)
{
#ifdef CONFIG_X86_64
return esf->code;
#else
return esf->errorCode;
#endif
}
#if defined(CONFIG_EXCEPTION_STACK_TRACE)
struct stack_frame { struct stack_frame {
uintptr_t next; uintptr_t next;
uintptr_t ret_addr; uintptr_t ret_addr;
@ -140,20 +131,31 @@ struct stack_frame {
#endif #endif
}; };
#define MAX_STACK_FRAMES CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES __pinned_func static void walk_stackframe(x86_stacktrace_cb cb, void *cookie,
const struct arch_esf *esf, int max_frames)
__pinned_func
static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
{ {
uintptr_t base_ptr;
uint16_t cs;
struct stack_frame *frame; struct stack_frame *frame;
int i; int i;
if (esf != NULL) {
#ifdef CONFIG_X86_64
base_ptr = esf->rbp;
#else /* x86 32-bit */
base_ptr = esf->ebp;
#endif /* CONFIG_X86_64 */
cs = esf->cs;
} else {
return;
}
if (base_ptr == 0U) { if (base_ptr == 0U) {
LOG_ERR("NULL base ptr"); LOG_ERR("NULL base ptr");
return; return;
} }
for (i = 0; i < MAX_STACK_FRAMES; i++) { for (i = 0; i < max_frames; i++) {
if (base_ptr % sizeof(base_ptr) != 0U) { if (base_ptr % sizeof(base_ptr) != 0U) {
LOG_ERR("unaligned frame ptr"); LOG_ERR("unaligned frame ptr");
return; return;
@ -178,16 +180,59 @@ static void unwind_stack(uintptr_t base_ptr, uint16_t cs)
if (frame->ret_addr == 0U) { if (frame->ret_addr == 0U) {
break; break;
} }
#ifdef CONFIG_X86_64
LOG_ERR(" 0x%016lx", frame->ret_addr); if (!cb(cookie, frame->ret_addr,
#else COND_CODE_1(CONFIG_X86_64, (0), (frame->args)))) {
LOG_ERR(" 0x%08lx (0x%lx)", frame->ret_addr, frame->args); break;
#endif }
base_ptr = frame->next; base_ptr = frame->next;
} }
} }
void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
const struct k_thread *thread, const struct arch_esf *esf)
{
ARG_UNUSED(thread);
walk_stackframe((x86_stacktrace_cb)callback_fn, cookie, esf,
CONFIG_ARCH_STACKWALK_MAX_FRAMES);
}
#endif /* CONFIG_ARCH_STACKWALK */
#if defined(CONFIG_EXCEPTION_STACK_TRACE)
static bool print_trace_address(void *arg, unsigned long addr, unsigned long args)
{
int *i = arg;
#ifdef CONFIG_X86_64
LOG_ERR(" %d: 0x%016lx", (*i)++, addr);
#else
LOG_ERR(" %d: 0x%08lx (0x%lx)", (*i)++, addr, args);
#endif
return true;
}
__pinned_func
static void unwind_stack(const struct arch_esf *esf)
{
int i = 0;
walk_stackframe(print_trace_address, &i, esf, CONFIG_ARCH_STACKWALK_MAX_FRAMES);
}
#endif /* CONFIG_EXCEPTION_STACK_TRACE */ #endif /* CONFIG_EXCEPTION_STACK_TRACE */
#ifdef CONFIG_EXCEPTION_DEBUG
static inline uintptr_t esf_get_code(const struct arch_esf *esf)
{
#ifdef CONFIG_X86_64
return esf->code;
#else
return esf->errorCode;
#endif
}
static inline uintptr_t get_cr3(const struct arch_esf *esf) static inline uintptr_t get_cr3(const struct arch_esf *esf)
{ {
#if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI) #if defined(CONFIG_USERSPACE) && defined(CONFIG_X86_KPTI)
@ -226,13 +271,7 @@ static void dump_regs(const struct arch_esf *esf)
LOG_ERR("RSP: 0x%016lx RFLAGS: 0x%016lx CS: 0x%04lx CR3: 0x%016lx", LOG_ERR("RSP: 0x%016lx RFLAGS: 0x%016lx CS: 0x%04lx CR3: 0x%016lx",
esf->rsp, esf->rflags, esf->cs & 0xFFFFU, get_cr3(esf)); esf->rsp, esf->rflags, esf->cs & 0xFFFFU, get_cr3(esf));
#ifdef CONFIG_EXCEPTION_STACK_TRACE
LOG_ERR("call trace:");
#endif
LOG_ERR("RIP: 0x%016lx", esf->rip); LOG_ERR("RIP: 0x%016lx", esf->rip);
#ifdef CONFIG_EXCEPTION_STACK_TRACE
unwind_stack(esf->rbp, esf->cs);
#endif
} }
#else /* 32-bit */ #else /* 32-bit */
__pinned_func __pinned_func
@ -245,13 +284,7 @@ static void dump_regs(const struct arch_esf *esf)
LOG_ERR("EFLAGS: 0x%08x CS: 0x%04x CR3: 0x%08lx", esf->eflags, LOG_ERR("EFLAGS: 0x%08x CS: 0x%04x CR3: 0x%08lx", esf->eflags,
esf->cs & 0xFFFFU, get_cr3(esf)); esf->cs & 0xFFFFU, get_cr3(esf));
#ifdef CONFIG_EXCEPTION_STACK_TRACE
LOG_ERR("call trace:");
#endif
LOG_ERR("EIP: 0x%08x", esf->eip); LOG_ERR("EIP: 0x%08x", esf->eip);
#ifdef CONFIG_EXCEPTION_STACK_TRACE
unwind_stack(esf->ebp, esf->cs);
#endif
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
@ -368,6 +401,10 @@ FUNC_NORETURN void z_x86_fatal_error(unsigned int reason,
#ifdef CONFIG_EXCEPTION_DEBUG #ifdef CONFIG_EXCEPTION_DEBUG
dump_regs(esf); dump_regs(esf);
#endif #endif
#ifdef CONFIG_EXCEPTION_STACK_TRACE
LOG_ERR("call trace:");
unwind_stack(esf);
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
#if defined(CONFIG_ASSERT) && defined(CONFIG_X86_64) #if defined(CONFIG_ASSERT) && defined(CONFIG_X86_64)
if (esf->rip == 0xb9) { if (esf->rip == 0xb9) {
/* See implementation of __resume in locore.S. This is /* See implementation of __resume in locore.S. This is

View file

@ -381,8 +381,7 @@ config DEBUG_INFO
config EXCEPTION_STACK_TRACE config EXCEPTION_STACK_TRACE
bool "Attempt to print stack traces upon exceptions" bool "Attempt to print stack traces upon exceptions"
default y default y
depends on (X86_EXCEPTION_STACK_TRACE || \ depends on ARM64_EXCEPTION_STACK_TRACE || ARCH_STACKWALK
ARM64_EXCEPTION_STACK_TRACE) || ARCH_STACKWALK
help help
If the architecture fatal handling code supports it, attempt to If the architecture fatal handling code supports it, attempt to
print a stack trace of function memory addresses when an print a stack trace of function memory addresses when an

View file

@ -42,7 +42,8 @@ tests:
type: multi_line type: multi_line
regex: regex:
- "E: call trace:" - "E: call trace:"
- "E: (E|R)IP: \\w+" - "E: 0: \\w+"
- "E: 1: \\w+"
arch.common.stack_unwind.arm64: arch.common.stack_unwind.arm64:
arch_allow: arch_allow:
- arm64 - arm64