arch: reorg the dependencies around (exception) stack trace
This commit introduces a new ARCH_STACKWALK Kconfig which determines if the `arch_stack_walk()` is available should the arch supports it. Starting from RISCV, this will be able to converge the exception stack trace implementation & stack walking features. Existing exception stack trace implementation will be updated later. Eventually we will end up with the following: 1. If an arch implements `arch_stack_walk()` `ARCH_HAS_STACKWALK` should be selected. 2. If the above is enabled, `ARCH_SUPPORTS_STACKWALK` indicates if the dependencies are met for arch to enable stack walking. This Kconfig replaces `<arch>_EXCEPTION_STACK_TRACE` 2. If the above is enabled, then, `ARCH_STACKWALK` determines if `arch_stack_walk()` should be compiled. 3. `EXCEPTION_STACK_TRACE` should build on top of the `ARCH_STACKWALK`, stack traces will be printed when it is enabled. 4. `ARCH_STACKWALK_MAX_FRAMES` will be removed as it is replaced by `ARCH_STACKWALK_MAX_FRAMES` Signed-off-by: Yong Cong Sin <ycsin@meta.com> Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
parent
e0770d4557
commit
6fae891c7e
7 changed files with 51 additions and 49 deletions
15
arch/Kconfig
15
arch/Kconfig
|
@ -114,7 +114,6 @@ config RISCV
|
|||
select ARCH_SUPPORTS_ROM_START if !SOC_FAMILY_ESPRESSIF_ESP32
|
||||
select ARCH_HAS_CODE_DATA_RELOCATION
|
||||
select ARCH_HAS_THREAD_LOCAL_STORAGE
|
||||
select ARCH_HAS_STACKWALK
|
||||
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
|
||||
select USE_SWITCH_SUPPORTED
|
||||
select USE_SWITCH
|
||||
|
@ -415,10 +414,17 @@ config FRAME_POINTER
|
|||
Select Y here to gain precise stack traces at the expense of slightly
|
||||
increased size and decreased speed.
|
||||
|
||||
config ARCH_STACKWALK
|
||||
bool "Compile the stack walking function"
|
||||
default y
|
||||
depends on ARCH_HAS_STACKWALK
|
||||
help
|
||||
Select Y here to compile the `arch_stack_walk()` function
|
||||
|
||||
config ARCH_STACKWALK_MAX_FRAMES
|
||||
int "Max depth for stack walk function"
|
||||
default 8
|
||||
depends on ARCH_HAS_STACKWALK
|
||||
depends on ARCH_STACKWALK
|
||||
help
|
||||
Depending on implementation, this can place a hard limit on the depths of the stack
|
||||
for the stack walk function to examine.
|
||||
|
@ -671,11 +677,6 @@ config ARCH_HAS_EXTRA_EXCEPTION_INFO
|
|||
config ARCH_HAS_GDBSTUB
|
||||
bool
|
||||
|
||||
config ARCH_HAS_STACKWALK
|
||||
bool
|
||||
help
|
||||
This is selected when the architecture implemented the arch_stack_walk() API.
|
||||
|
||||
config ARCH_HAS_COHERENCE
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -37,13 +37,6 @@ config RISCV_ALWAYS_SWITCH_THROUGH_ECALL
|
|||
and most people should say n here to minimize context switching
|
||||
overhead.
|
||||
|
||||
config RISCV_EXCEPTION_STACK_TRACE
|
||||
bool
|
||||
default y
|
||||
imply THREAD_STACK_INFO
|
||||
help
|
||||
Internal config to enable runtime stack traces on fatal exceptions.
|
||||
|
||||
menu "RISCV Processor Options"
|
||||
|
||||
config INCLUDE_RESET_VECTOR
|
||||
|
@ -383,6 +376,14 @@ config GEN_IRQ_VECTOR_TABLE
|
|||
config ARCH_HAS_SINGLE_THREAD_SUPPORT
|
||||
default y if !SMP
|
||||
|
||||
config ARCH_HAS_STACKWALK
|
||||
bool
|
||||
default y
|
||||
imply THREAD_STACK_INFO
|
||||
help
|
||||
Internal config to indicate that the arch_stack_walk() API is implemented
|
||||
and it can be enabled.
|
||||
|
||||
rsource "Kconfig.isa"
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -25,5 +25,5 @@ zephyr_library_sources_ifdef(CONFIG_RISCV_PMP pmp.c pmp.S)
|
|||
zephyr_library_sources_ifdef(CONFIG_THREAD_LOCAL_STORAGE tls.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_USERSPACE userspace.S)
|
||||
zephyr_library_sources_ifdef(CONFIG_SEMIHOST semihost.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_EXCEPTION_STACK_TRACE stacktrace.c)
|
||||
zephyr_library_sources_ifdef(CONFIG_ARCH_STACKWALK stacktrace.c)
|
||||
zephyr_linker_sources(ROM_START SORT_KEY 0x0vectors vector_table.ld)
|
||||
|
|
|
@ -98,12 +98,12 @@ FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arc
|
|||
#endif /* CONFIG_RISCV_ISA_RV32E */
|
||||
LOG_ERR("");
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_EXCEPTION_STACK_TRACE)) {
|
||||
z_riscv_unwind_stack(esf, csf);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EXCEPTION_DEBUG */
|
||||
|
||||
#ifdef CONFIG_EXCEPTION_STACK_TRACE
|
||||
z_riscv_unwind_stack(esf, csf);
|
||||
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
|
||||
|
||||
z_fatal_error(reason, esf);
|
||||
CODE_UNREACHABLE;
|
||||
}
|
||||
|
|
|
@ -14,8 +14,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
|
|||
|
||||
uintptr_t z_riscv_get_sp_before_exc(const struct arch_esf *esf);
|
||||
|
||||
#define MAX_STACK_FRAMES \
|
||||
MAX(CONFIG_EXCEPTION_STACK_TRACE_MAX_FRAMES, CONFIG_ARCH_STACKWALK_MAX_FRAMES)
|
||||
#define MAX_STACK_FRAMES CONFIG_ARCH_STACKWALK_MAX_FRAMES
|
||||
|
||||
struct stackframe {
|
||||
uintptr_t fp;
|
||||
|
@ -87,26 +86,6 @@ static bool in_stack_bound(uintptr_t addr, const struct k_thread *const thread,
|
|||
return in_kernel_thread_stack_bound(addr, thread);
|
||||
}
|
||||
|
||||
static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
|
||||
const struct arch_esf *esf)
|
||||
{
|
||||
const uintptr_t align =
|
||||
COND_CODE_1(CONFIG_FRAME_POINTER, (ARCH_STACK_PTR_ALIGN), (sizeof(uintptr_t)));
|
||||
|
||||
if (!IS_ALIGNED(addr, align)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((thread == NULL) || arch_is_in_isr()) {
|
||||
/* We were servicing an interrupt */
|
||||
uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
|
||||
|
||||
return in_irq_stack_bound(addr, cpu_id);
|
||||
}
|
||||
|
||||
return in_stack_bound(addr, thread, esf);
|
||||
}
|
||||
|
||||
static inline bool in_text_region(uintptr_t addr)
|
||||
{
|
||||
extern uintptr_t __text_region_start, __text_region_end;
|
||||
|
@ -242,6 +221,27 @@ void arch_stack_walk(stack_trace_callback_fn callback_fn, void *cookie,
|
|||
walk_stackframe(callback_fn, cookie, thread, esf, in_stack_bound, &thread->callee_saved);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EXCEPTION_STACK_TRACE
|
||||
static bool in_fatal_stack_bound(uintptr_t addr, const struct k_thread *const thread,
|
||||
const struct arch_esf *esf)
|
||||
{
|
||||
const uintptr_t align =
|
||||
COND_CODE_1(CONFIG_FRAME_POINTER, (ARCH_STACK_PTR_ALIGN), (sizeof(uintptr_t)));
|
||||
|
||||
if (!IS_ALIGNED(addr, align)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((thread == NULL) || arch_is_in_isr()) {
|
||||
/* We were servicing an interrupt */
|
||||
uint8_t cpu_id = IS_ENABLED(CONFIG_SMP) ? arch_curr_cpu()->id : 0U;
|
||||
|
||||
return in_irq_stack_bound(addr, cpu_id);
|
||||
}
|
||||
|
||||
return in_stack_bound(addr, thread, esf);
|
||||
}
|
||||
|
||||
#if __riscv_xlen == 32
|
||||
#define PR_REG "%08" PRIxPTR
|
||||
#elif __riscv_xlen == 64
|
||||
|
@ -276,3 +276,4 @@ void z_riscv_unwind_stack(const struct arch_esf *esf, const _callee_saved_t *csf
|
|||
walk_stackframe(print_trace_address, &i, _current, esf, in_fatal_stack_bound, csf);
|
||||
LOG_ERR("");
|
||||
}
|
||||
#endif /* CONFIG_EXCEPTION_STACK_TRACE */
|
||||
|
|
|
@ -382,8 +382,7 @@ config EXCEPTION_STACK_TRACE
|
|||
bool "Attempt to print stack traces upon exceptions"
|
||||
default y
|
||||
depends on (X86_EXCEPTION_STACK_TRACE || \
|
||||
ARM64_EXCEPTION_STACK_TRACE || \
|
||||
RISCV_EXCEPTION_STACK_TRACE)
|
||||
ARM64_EXCEPTION_STACK_TRACE) || ARCH_STACKWALK
|
||||
help
|
||||
If the architecture fatal handling code supports it, attempt to
|
||||
print a stack trace of function memory addresses when an
|
||||
|
@ -391,7 +390,7 @@ config EXCEPTION_STACK_TRACE
|
|||
|
||||
config EXCEPTION_STACK_TRACE_MAX_FRAMES
|
||||
int "Configures the depth of stack trace"
|
||||
default ARCH_STACKWALK_MAX_FRAMES if ARCH_HAS_STACKWALK
|
||||
default ARCH_STACKWALK_MAX_FRAMES if ARCH_STACKWALK
|
||||
default 8
|
||||
depends on EXCEPTION_STACK_TRACE
|
||||
help
|
||||
|
|
|
@ -208,7 +208,7 @@ static int cmd_kernel_threads(const struct shell *sh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_STACKWALK)
|
||||
#if defined(CONFIG_ARCH_STACKWALK)
|
||||
|
||||
static bool print_trace_address(void *arg, unsigned long ra)
|
||||
{
|
||||
|
@ -266,7 +266,7 @@ static int cmd_kernel_unwind(const struct shell *sh, size_t argc, char **argv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_STACKWALK */
|
||||
#endif /* CONFIG_ARCH_STACKWALK */
|
||||
|
||||
static void shell_stack_dump(const struct k_thread *thread, void *user_data)
|
||||
{
|
||||
|
@ -462,9 +462,9 @@ SHELL_STATIC_SUBCMD_SET_CREATE(sub_kernel,
|
|||
defined(CONFIG_THREAD_MONITOR)
|
||||
SHELL_CMD(stacks, NULL, "List threads stack usage.", cmd_kernel_stacks),
|
||||
SHELL_CMD(threads, NULL, "List kernel threads.", cmd_kernel_threads),
|
||||
#if defined(CONFIG_ARCH_HAS_STACKWALK)
|
||||
#if defined(CONFIG_ARCH_STACKWALK)
|
||||
SHELL_CMD_ARG(unwind, NULL, "Unwind a thread.", cmd_kernel_unwind, 1, 1),
|
||||
#endif /* CONFIG_ARCH_HAS_STACKWALK */
|
||||
#endif /* CONFIG_ARCH_STACKWALK */
|
||||
#endif
|
||||
#if defined(CONFIG_SYS_HEAP_RUNTIME_STATS) && (K_HEAP_MEM_POOL_SIZE > 0)
|
||||
SHELL_CMD(heap, NULL, "System heap usage statistics.", cmd_kernel_heap),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue