subsys/profiling: relocate stack unwind backends

Relocate stack unwind backends from `arch/` to perf's
`backends/` folder, just like logging/shell/..

Signed-off-by: Yong Cong Sin <ycsin@meta.com>
Signed-off-by: Yong Cong Sin <yongcong.sin@gmail.com>
This commit is contained in:
Yong Cong Sin 2024-08-16 15:59:09 +08:00 committed by Henrik Brix Andersen
commit 42362c6fcc
10 changed files with 58 additions and 8 deletions

View file

@ -2,4 +2,10 @@
#
# SPDX-License-Identifier: Apache-2.0
zephyr_sources(perf.c)
add_subdirectory(backends)
zephyr_library()
zephyr_library_sources(
perf.c
)

View file

@ -4,11 +4,9 @@
config PROFILING_PERF
bool "Perf support"
depends on THREAD_STACK_INFO
depends on !SMP
depends on FRAME_POINTER
depends on SHELL
depends on RISCV || X86
depends on PROFILING_PERF_HAS_BACKEND
help
Enable perf shell command.
@ -21,3 +19,5 @@ config PROFILING_PERF_BUFFER_SIZE
Size of buffer used by perf to save stack trace samples.
endif
rsource "backends/Kconfig"

View file

@ -0,0 +1,15 @@
# Copyright (c) 2024 Meta Platforms
#
# SPDX-License-Identifier: Apache-2.0
zephyr_sources_ifdef(CONFIG_PROFILING_PERF_BACKEND_RISCV
perf_riscv.c
)
zephyr_sources_ifdef(CONFIG_PROFILING_PERF_BACKEND_X86
perf_x86.c
)
zephyr_sources_ifdef(CONFIG_PROFILING_PERF_BACKEND_X86_64
perf_x86_64.c
)

View file

@ -0,0 +1,33 @@
# Copyright (c) 2024 Meta Platforms
#
# SPDX-License-Identifier: Apache-2.0
config PROFILING_PERF_HAS_BACKEND
bool
help
Selected when there's an implementation for
`arch_perf_current_stack_trace()`
config PROFILING_PERF_BACKEND_RISCV
bool
default y
depends on RISCV
depends on THREAD_STACK_INFO
depends on FRAME_POINTER
select PROFILING_PERF_HAS_BACKEND
config PROFILING_PERF_BACKEND_X86
bool
default y
depends on X86 && !X86_64
depends on THREAD_STACK_INFO
depends on FRAME_POINTER
select PROFILING_PERF_HAS_BACKEND
config PROFILING_PERF_BACKEND_X86_64
bool
default y
depends on X86_64
depends on THREAD_STACK_INFO
depends on FRAME_POINTER
select PROFILING_PERF_HAS_BACKEND

View file

@ -0,0 +1,93 @@
/*
* Copyright (c) 2023 KNS Group LLC (YADRO)
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
static bool valid_stack(uintptr_t addr, k_tid_t current)
{
return current->stack_info.start <= addr &&
addr < current->stack_info.start + current->stack_info.size;
}
/*
* This function use frame pointers to unwind stack and get trace of return addresses.
* Return addresses are translated in corresponding function's names using .elf file.
* So we get function call trace
*/
size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
{
if (size < 2U)
return 0;
size_t idx = 0;
/*
* In riscv (arch/riscv/core/isr.S) ra, ip($mepc) and fp($s0) are saved
* at the beginning of _isr_wrapper in order, specified by z_arch_esf_t.
* Then, before calling interruption handler, core switch $sp to
* _current_cpu->irq_stack and save $sp with offset -16 on irq stack
*
* The following lines lines do the reverse things to get ra, ip anf fp
* from thread stack
*/
const struct arch_esf * const esf =
*((struct arch_esf **)(((uintptr_t)_current_cpu->irq_stack) - 16));
/*
* $s0 is used as frame pointer.
*
* stack frame in memory (commonly):
* (addresses growth up)
* ....
* [-] <- $fp($s0) (curr)
* $ra
* $fp($s0) (next)
* ....
*
* If function do not call any other function, compiller may not save $ra,
* then stack frame will be:
* ....
* [-] <- $fp($s0) (curr)
* $fp($s0) (next)
* ....
*
*/
void **fp = (void **)esf->s0;
void **new_fp = (void **)fp[-1];
buf[idx++] = (uintptr_t)esf->mepc;
/*
* During function prologue and epilogue fp is equal to fp of
* previous function stack frame, it looks like second function
* from top is missed.
* So saving $ra will help in case when irq occurred in
* function prologue or epilogue.
*/
buf[idx++] = (uintptr_t)esf->ra;
if (valid_stack((uintptr_t)new_fp, _current)) {
fp = new_fp;
}
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size)
return 0;
buf[idx++] = (uintptr_t)fp[-1];
new_fp = (void **)fp[-2];
/*
* anti-infinity-loop if
* new_fp can't be smaller than fp, cause the stack is growing down
* and trace moves deeper into the stack
*/
if (new_fp <= fp) {
break;
}
fp = new_fp;
}
return idx;
}

View file

@ -0,0 +1,81 @@
/*
* Copyright (c) 2023 KNS Group LLC (YADRO)
* Copyright (c) 2020 Yonatan Goldschmidt <yon.goldschmidt@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
static bool valid_stack(uintptr_t addr, k_tid_t current)
{
return current->stack_info.start <= addr &&
addr < current->stack_info.start + current->stack_info.size;
}
/* interruption stack frame */
struct isf {
uint32_t ebp;
uint32_t ecx;
uint32_t edx;
uint32_t eax;
uint32_t eip;
};
/*
* This function use frame pointers to unwind stack and get trace of return addresses.
* Return addresses are translated in corresponding function's names using .elf file.
* So we get function call trace
*/
size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
{
if (size < 1U)
return 0;
size_t idx = 0;
const struct isf * const isf =
*((struct isf **)(((void **)_current_cpu->irq_stack)-1));
/*
* In x86 (arch/x86/core/ia32/intstub.S) %eip and %ebp
* are saved at the beginning of _interrupt_enter in order, that described
* in struct esf. Core switch %esp to
* _current_cpu->irq_stack and push %esp on irq stack
*
* The following lines lines do the reverse things to get %eip and %ebp
* from thread stack
*/
void **fp = (void **)isf->ebp;
/*
* %ebp is frame pointer.
*
* stack frame in memory:
* (addresses growth up)
* ....
* ra
* %ebp (next) <- %ebp (curr)
* ....
*/
buf[idx++] = (uintptr_t)isf->eip;
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size)
return 0;
buf[idx++] = (uintptr_t)fp[1];
void **new_fp = (void **)fp[0];
/*
* anti-infinity-loop if
* new_fp can't be smaller than fp, cause the stack is growing down
* and trace moves deeper into the stack
*/
if (new_fp <= fp) {
break;
}
fp = new_fp;
}
return idx;
}

View file

@ -0,0 +1,67 @@
/*
* Copyright (c) 2023 KNS Group LLC (YADRO)
* Copyright (c) 2020 Yonatan Goldschmidt <yon.goldschmidt@gmail.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/kernel.h>
static bool valid_stack(uintptr_t addr, k_tid_t current)
{
return current->stack_info.start <= addr &&
addr < current->stack_info.start + current->stack_info.size;
}
/*
* This function use frame pointers to unwind stack and get trace of return addresses.
* Return addresses are translated in corresponding function's names using .elf file.
* So we get function call trace
*/
size_t arch_perf_current_stack_trace(uintptr_t *buf, size_t size)
{
if (size < 1U)
return 0;
size_t idx = 0;
/*
* In x86_64 (arch/x86/core/intel64/locore.S) %rip and %rbp
* are always saved in _current->callee_saved before calling
* handler function if interrupt is not nested
*
* %rip points the location where interrupt was occurred
*/
buf[idx++] = (uintptr_t)_current->callee_saved.rip;
void **fp = (void **)_current->callee_saved.rbp;
/*
* %rbp is frame pointer.
*
* stack frame in memory:
* (addresses growth up)
* ....
* ra
* %rbp (next) <- %rbp (curr)
* ....
*/
while (valid_stack((uintptr_t)fp, _current)) {
if (idx >= size)
return 0;
buf[idx++] = (uintptr_t)fp[1];
void **new_fp = (void **)fp[0];
/*
* anti-infinity-loop if
* new_fp can't be smaller than fp, cause the stack is growing down
* and trace moves deeper into the stack
*/
if (new_fp <= fp) {
break;
}
fp = new_fp;
}
return idx;
}