diff --git a/arch/arc/core/cpu_idle.S b/arch/arc/core/cpu_idle.S index 3c5ff72c9a6..f3660d67a3e 100644 --- a/arch/arc/core/cpu_idle.S +++ b/arch/arc/core/cpu_idle.S @@ -37,7 +37,7 @@ SECTION_FUNC(TEXT, k_cpu_idle) #ifdef CONFIG_TRACING push_s blink - jl z_sys_trace_idle + jl sys_trace_idle pop_s blink #endif @@ -58,7 +58,7 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle) #ifdef CONFIG_TRACING push_s blink - jl z_sys_trace_idle + jl sys_trace_idle pop_s blink #endif diff --git a/arch/arc/core/isr_wrapper.S b/arch/arc/core/isr_wrapper.S index 92b0814c5e9..d4cca4eff85 100644 --- a/arch/arc/core/isr_wrapper.S +++ b/arch/arc/core/isr_wrapper.S @@ -245,13 +245,13 @@ rirq_path: #endif #if defined(CONFIG_TRACING) -GTEXT(z_sys_trace_isr_enter) +GTEXT(sys_trace_isr_enter) .macro log_interrupt_k_event clri r0 /* do not interrupt event logger operations */ push_s r0 push_s blink - jl z_sys_trace_isr_enter + jl sys_trace_isr_enter pop_s blink pop_s r0 seti r0 diff --git a/arch/arm/core/cpu_idle.S b/arch/arm/core/cpu_idle.S index bcd359210b9..c279bf53291 100644 --- a/arch/arm/core/cpu_idle.S +++ b/arch/arm/core/cpu_idle.S @@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, z_CpuIdleInit) SECTION_FUNC(TEXT, k_cpu_idle) #ifdef CONFIG_TRACING push {r0, lr} - bl z_sys_trace_idle + bl sys_trace_idle #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0, r1} mov lr, r1 @@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, k_cpu_idle) SECTION_FUNC(TEXT, k_cpu_atomic_idle) #ifdef CONFIG_TRACING push {r0, lr} - bl z_sys_trace_idle + bl sys_trace_idle #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0, r1} mov lr, r1 diff --git a/arch/arm/core/irq_manage.c b/arch/arm/core/irq_manage.c index a4ebcc5faee..1b320f94f02 100644 --- a/arch/arm/core/irq_manage.c +++ b/arch/arm/core/irq_manage.c @@ -255,7 +255,7 @@ void _arch_isr_direct_pm(void) void z_arch_isr_direct_header(void) { - z_sys_trace_isr_enter(); + sys_trace_isr_enter(); } #if defined(CONFIG_ARM_SECURE_FIRMWARE) diff --git a/arch/arm/core/isr_wrapper.S b/arch/arm/core/isr_wrapper.S index 5b05e8ad6c2..42f6a3396b8 100644 --- a/arch/arm/core/isr_wrapper.S +++ b/arch/arm/core/isr_wrapper.S @@ -65,7 +65,7 @@ SECTION_FUNC(TEXT, _isr_wrapper) #endif #ifdef CONFIG_TRACING - bl z_sys_trace_isr_enter + bl sys_trace_isr_enter #endif #ifdef CONFIG_SYS_POWER_MANAGEMENT @@ -152,7 +152,7 @@ _idle_state_cleared: blx r3 /* call ISR */ #ifdef CONFIG_TRACING - bl z_sys_trace_isr_exit + bl sys_trace_isr_exit #endif #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) diff --git a/arch/arm/core/swap_helper.S b/arch/arm/core/swap_helper.S index 636d95dc8c4..3934788d80f 100644 --- a/arch/arm/core/swap_helper.S +++ b/arch/arm/core/swap_helper.S @@ -46,7 +46,7 @@ SECTION_FUNC(TEXT, __pendsv) #ifdef CONFIG_TRACING /* Register the context switch */ push {r0, lr} - bl z_sys_trace_thread_switched_out + bl sys_trace_thread_switched_out #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0, r1} mov lr, r1 @@ -319,7 +319,7 @@ _thread_irq_disabled: #ifdef CONFIG_TRACING /* Register the context switch */ push {r0, lr} - bl z_sys_trace_thread_switched_in + bl sys_trace_thread_switched_in #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) pop {r0, r1} mov lr, r1 diff --git a/arch/arm/include/kernel_arch_func.h b/arch/arm/include/kernel_arch_func.h index 26ed8ac6d43..eaea891fe20 100644 --- a/arch/arm/include/kernel_arch_func.h +++ b/arch/arm/include/kernel_arch_func.h @@ -81,7 +81,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread, _current = main_thread; #ifdef CONFIG_TRACING - z_sys_trace_thread_switched_in(); + sys_trace_thread_switched_in(); #endif /* the ready queue cache already contains the main thread */ diff --git a/arch/nios2/core/irq_manage.c b/arch/nios2/core/irq_manage.c index d15cdd60ea5..c1ee42f0ba6 100644 --- a/arch/nios2/core/irq_manage.c +++ b/arch/nios2/core/irq_manage.c @@ -87,7 +87,7 @@ void _enter_irq(u32_t ipending) while (ipending) { struct _isr_table_entry *ite; - z_sys_trace_isr_enter(); + sys_trace_isr_enter(); index = find_lsb_set(ipending) - 1; ipending &= ~BIT(index); diff --git a/arch/nios2/core/swap.S b/arch/nios2/core/swap.S index c467a54098b..7285c8890b3 100644 --- a/arch/nios2/core/swap.S +++ b/arch/nios2/core/swap.S @@ -12,7 +12,7 @@ GTEXT(__swap) GTEXT(z_thread_entry_wrapper) /* imports */ -GTEXT(z_sys_trace_thread_switched_in) +GTEXT(sys_trace_thread_switched_in) GTEXT(_k_neg_eagain) /* unsigned int __swap(unsigned int key) @@ -84,7 +84,7 @@ SECTION_FUNC(exception.other, __swap) stw r4, _thread_offset_to_retval(r11) #if CONFIG_TRACING - call z_sys_trace_thread_switched_in + call sys_trace_thread_switched_in /* restore caller-saved r10 */ movhi r10, %hi(_kernel) ori r10, r10, %lo(_kernel) diff --git a/arch/posix/core/cpuhalt.c b/arch/posix/core/cpuhalt.c index 81ade08e2fc..4d27a8143ba 100644 --- a/arch/posix/core/cpuhalt.c +++ b/arch/posix/core/cpuhalt.c @@ -38,7 +38,7 @@ */ void k_cpu_idle(void) { - z_sys_trace_idle(); + sys_trace_idle(); posix_irq_full_unlock(); posix_halt_cpu(); } @@ -64,7 +64,7 @@ void k_cpu_idle(void) */ void k_cpu_atomic_idle(unsigned int key) { - z_sys_trace_idle(); + sys_trace_idle(); posix_atomic_halt_cpu(key); } diff --git a/arch/posix/core/swap.c b/arch/posix/core/swap.c index 5ea448b67ce..a77ae16d9f4 100644 --- a/arch/posix/core/swap.c +++ b/arch/posix/core/swap.c @@ -94,11 +94,11 @@ void z_arch_switch_to_main_thread(struct k_thread *main_thread, (posix_thread_status_t *) _kernel.ready_q.cache->callee_saved.thread_status; - z_sys_trace_thread_switched_out(); + sys_trace_thread_switched_out(); _kernel.current = _kernel.ready_q.cache; - z_sys_trace_thread_switched_in(); + sys_trace_thread_switched_in(); posix_main_thread_start(ready_thread_ptr->thread_idx); } /* LCOV_EXCL_LINE */ diff --git a/arch/riscv/core/isr.S b/arch/riscv/core/isr.S index 97fd70e2586..a9b449a4c1f 100644 --- a/arch/riscv/core/isr.S +++ b/arch/riscv/core/isr.S @@ -25,8 +25,8 @@ GTEXT(_is_next_thread_current) GTEXT(z_get_next_ready_thread) #ifdef CONFIG_TRACING -GTEXT(z_sys_trace_thread_switched_in) -GTEXT(z_sys_trace_isr_enter) +GTEXT(sys_trace_thread_switched_in) +GTEXT(sys_trace_isr_enter) #endif #ifdef CONFIG_IRQ_OFFLOAD @@ -227,7 +227,7 @@ on_irq_stack: call_irq: #ifdef CONFIG_TRACING - call z_sys_trace_isr_enter + call sys_trace_isr_enter #endif /* Get IRQ causing interrupt */ @@ -305,7 +305,7 @@ on_thread_stack: reschedule: #if CONFIG_TRACING - call z_sys_trace_thread_switched_in + call sys_trace_thread_switched_in #endif /* Get reference to _kernel */ la t0, _kernel diff --git a/arch/x86/core/cpuhalt.c b/arch/x86/core/cpuhalt.c index 08638e5bfae..317608175e2 100644 --- a/arch/x86/core/cpuhalt.c +++ b/arch/x86/core/cpuhalt.c @@ -20,7 +20,7 @@ */ void k_cpu_idle(void) { - z_sys_trace_idle(); + sys_trace_idle(); #if defined(CONFIG_BOOT_TIME_MEASUREMENT) __idle_time_stamp = k_cycle_get_32(); #endif @@ -50,7 +50,7 @@ void k_cpu_idle(void) void k_cpu_atomic_idle(unsigned int key) { - z_sys_trace_idle(); + sys_trace_idle(); __asm__ volatile ( "sti\n\t" diff --git a/arch/x86/core/ia32/intstub.S b/arch/x86/core/ia32/intstub.S index f9557ae5ec3..a04fde7c246 100644 --- a/arch/x86/core/ia32/intstub.S +++ b/arch/x86/core/ia32/intstub.S @@ -137,7 +137,7 @@ SECTION_FUNC(TEXT, _interrupt_enter) pushl %eax pushl %edx - call z_sys_trace_isr_enter + call sys_trace_isr_enter popl %edx popl %eax diff --git a/arch/x86/core/ia32/irq_manage.c b/arch/x86/core/ia32/irq_manage.c index 5838b0749c7..84b506410c4 100644 --- a/arch/x86/core/ia32/irq_manage.c +++ b/arch/x86/core/ia32/irq_manage.c @@ -61,7 +61,7 @@ void z_arch_irq_direct_pm(void) void z_arch_isr_direct_header(void) { - z_sys_trace_isr_enter(); + sys_trace_isr_enter(); /* We're not going to unlock IRQs, but we still need to increment this * so that z_is_in_isr() works diff --git a/arch/x86/core/ia32/swap.S b/arch/x86/core/ia32/swap.S index 91ee5176183..ab78541941c 100644 --- a/arch/x86/core/ia32/swap.S +++ b/arch/x86/core/ia32/swap.S @@ -131,7 +131,7 @@ SECTION_FUNC(TEXT, __swap) #ifdef CONFIG_TRACING /* Register the context switch */ push %edx - call z_sys_trace_thread_switched_in + call sys_trace_thread_switched_in pop %edx #endif movl _kernel_offset_to_ready_q_cache(%edi), %eax diff --git a/arch/x86_64/core/x86_64.c b/arch/x86_64/core/x86_64.c index c44f606961e..05829374326 100644 --- a/arch/x86_64/core/x86_64.c +++ b/arch/x86_64/core/x86_64.c @@ -45,7 +45,7 @@ void z_new_thread(struct k_thread *t, k_thread_stack_t *stack, void k_cpu_idle(void) { - z_sys_trace_idle(); + sys_trace_idle(); __asm__ volatile("sti; hlt"); } diff --git a/arch/xtensa/core/cpu_idle.c b/arch/xtensa/core/cpu_idle.c index 3a67a3daa4f..b0fce09e3fe 100644 --- a/arch/xtensa/core/cpu_idle.c +++ b/arch/xtensa/core/cpu_idle.c @@ -14,7 +14,7 @@ */ void k_cpu_idle(void) { - z_sys_trace_idle(); + sys_trace_idle(); __asm__ volatile ("waiti 0"); } /* @@ -26,7 +26,7 @@ void k_cpu_idle(void) */ void k_cpu_atomic_idle(unsigned int key) { - z_sys_trace_idle(); + sys_trace_idle(); __asm__ volatile ("waiti 0\n\t" "wsr.ps %0\n\t" "rsync" :: "a"(key)); diff --git a/include/arch/arm/irq.h b/include/arch/arm/irq.h index 6eded9e5795..0804a048e17 100644 --- a/include/arch/arm/irq.h +++ b/include/arch/arm/irq.h @@ -125,14 +125,14 @@ extern void z_arch_isr_direct_header(void); extern void _IntExit(void); #ifdef CONFIG_TRACING -extern void z_sys_trace_isr_exit(void); +extern void sys_trace_isr_exit(void); #endif static inline void z_arch_isr_direct_footer(int maybe_swap) { #ifdef CONFIG_TRACING - z_sys_trace_isr_exit(); + sys_trace_isr_exit(); #endif if (maybe_swap) { _IntExit(); diff --git a/include/debug/tracing.h b/include/debug/tracing.h index c1bdc46de9a..127b4f700a9 100644 --- a/include/debug/tracing.h +++ b/include/debug/tracing.h @@ -19,12 +19,12 @@ #define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET) #ifdef CONFIG_TRACING -void z_sys_trace_idle(void); -void z_sys_trace_isr_enter(void); -void z_sys_trace_isr_exit(void); -void z_sys_trace_isr_exit_to_scheduler(void); -void z_sys_trace_thread_switched_in(void); -void z_sys_trace_thread_switched_out(void); +void sys_trace_idle(void); +void sys_trace_isr_enter(void); +void sys_trace_isr_exit(void); +void sys_trace_isr_exit_to_scheduler(void); +void sys_trace_thread_switched_in(void); +void sys_trace_thread_switched_out(void); #endif #ifdef CONFIG_SEGGER_SYSTEMVIEW @@ -132,18 +132,11 @@ void z_sys_trace_thread_switched_out(void); */ #define sys_trace_end_call(id) +/** + * @brief Called when the cpu enters the idle state + */ +#define sys_trace_idle() -#define z_sys_trace_idle() - -#define z_sys_trace_isr_enter() - -#define z_sys_trace_isr_exit() - -#define z_sys_trace_isr_exit_to_scheduler() - -#define z_sys_trace_thread_switched_in() - -#define z_sys_trace_thread_switched_out() /** * @} */ diff --git a/soc/riscv/riscv-privilege/common/idle.c b/soc/riscv/riscv-privilege/common/idle.c index c89cb1e7ef5..1571fb81567 100644 --- a/soc/riscv/riscv-privilege/common/idle.c +++ b/soc/riscv/riscv-privilege/common/idle.c @@ -13,7 +13,7 @@ static ALWAYS_INLINE void riscv_idle(unsigned int key) { - z_sys_trace_idle(); + sys_trace_idle(); /* unlock interrupts */ irq_unlock(key); diff --git a/subsys/debug/tracing/cpu_stats.c b/subsys/debug/tracing/cpu_stats.c index 1bdd7c9cd59..e66d0fde56e 100644 --- a/subsys/debug/tracing/cpu_stats.c +++ b/subsys/debug/tracing/cpu_stats.c @@ -159,31 +159,6 @@ void sys_trace_idle(void) { } -void z_sys_trace_idle(void) -{ - sys_trace_idle(); -} - -void z_sys_trace_isr_enter(void) -{ - sys_trace_isr_enter(); -} - -void z_sys_trace_isr_exit(void) -{ - sys_trace_isr_exit(); -} - -void z_sys_trace_thread_switched_in(void) -{ - sys_trace_thread_switched_in(); -} - -void z_sys_trace_thread_switched_out(void) -{ - sys_trace_thread_switched_out(); -} - #ifdef CONFIG_TRACING_CPU_STATS_LOG static struct k_delayed_work cpu_stats_log; diff --git a/subsys/debug/tracing/ctf/ctf_top.c b/subsys/debug/tracing/ctf/ctf_top.c index f56a001834e..9bd46ec1e19 100644 --- a/subsys/debug/tracing/ctf/ctf_top.c +++ b/subsys/debug/tracing/ctf/ctf_top.c @@ -153,33 +153,6 @@ void sys_trace_end_call(unsigned int id) ctf_top_end_call(id); } - -void z_sys_trace_thread_switched_out(void) -{ - sys_trace_thread_switched_out(); -} -void z_sys_trace_thread_switched_in(void) -{ - sys_trace_thread_switched_in(); -} -void z_sys_trace_isr_enter(void) -{ - sys_trace_isr_enter(); -} -void z_sys_trace_isr_exit(void) -{ - sys_trace_isr_exit(); -} -void z_sys_trace_isr_exit_to_scheduler(void) -{ - sys_trace_isr_exit_to_scheduler(); -} -void z_sys_trace_idle(void) -{ - sys_trace_idle(); -} - - static int ctf_top_init(struct device *arg) { ARG_UNUSED(arg); diff --git a/subsys/debug/tracing/include/tracing_sysview.h b/subsys/debug/tracing/include/tracing_sysview.h index 3f9c461b724..acd890305e1 100644 --- a/subsys/debug/tracing/include/tracing_sysview.h +++ b/subsys/debug/tracing/include/tracing_sysview.h @@ -25,30 +25,12 @@ static inline int is_idle_thread(struct k_thread *thread) #endif } - -static inline void z__sys_trace_thread_switched_in(void) -{ - struct k_thread *thread; - - thread = k_current_get(); - - if (is_idle_thread(thread)) { - SEGGER_SYSVIEW_OnIdle(); - } else { - SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread); - } -} - -#define sys_trace_thread_switched_in() z__sys_trace_thread_switched_in() - -#define sys_trace_thread_switched_out() SEGGER_SYSVIEW_OnTaskStopExec() - -#define sys_trace_isr_enter() SEGGER_SYSVIEW_RecordEnterISR() - -#define sys_trace_isr_exit() SEGGER_SYSVIEW_RecordExitISR() - -#define sys_trace_isr_exit_to_scheduler() \ - SEGGER_SYSVIEW_RecordExitISRToScheduler() +void sys_trace_thread_switched_in(void); +void sys_trace_thread_switched_out(void); +void sys_trace_isr_enter(void); +void sys_trace_isr_exit(void); +void sys_trace_isr_exit_to_scheduler(void); +void sys_trace_idle(void); #define sys_trace_thread_priority_set(thread) @@ -90,8 +72,6 @@ static inline void sys_trace_thread_info(struct k_thread *thread) #define sys_trace_void(id) SEGGER_SYSVIEW_RecordVoid(id) -#define sys_trace_idle() SEGGER_SYSVIEW_OnIdle() - #define sys_trace_end_call(id) SEGGER_SYSVIEW_RecordEndCall(id) #endif /* _TRACE_SYSVIEW_H */ diff --git a/subsys/debug/tracing/sysview.c b/subsys/debug/tracing/sysview.c index 34ca756f4aa..898ae772925 100644 --- a/subsys/debug/tracing/sysview.c +++ b/subsys/debug/tracing/sysview.c @@ -23,34 +23,42 @@ u32_t sysview_get_interrupt(void) return interrupt; } -void z_sys_trace_idle(void) +void sys_trace_thread_switched_in(void) { - sys_trace_idle(); + struct k_thread *thread; + + thread = k_current_get(); + + if (is_idle_thread(thread)) { + SEGGER_SYSVIEW_OnIdle(); + } else { + SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread); + } } -void z_sys_trace_isr_enter(void) +void sys_trace_thread_switched_out(void) { - sys_trace_isr_enter(); + SEGGER_SYSVIEW_OnTaskStopExec(); } -void z_sys_trace_isr_exit(void) +void sys_trace_isr_enter(void) { - sys_trace_isr_exit(); + SEGGER_SYSVIEW_RecordEnterISR(); } -void z_sys_trace_isr_exit_to_scheduler(void) +void sys_trace_isr_exit(void) { - sys_trace_isr_exit_to_scheduler(); + SEGGER_SYSVIEW_RecordExitISR(); } -void z_sys_trace_thread_switched_in(void) +void sys_trace_isr_exit_to_scheduler(void) { - sys_trace_thread_switched_in(); + SEGGER_SYSVIEW_RecordExitISRToScheduler(); } -void z_sys_trace_thread_switched_out(void) +void sys_trace_idle(void) { - sys_trace_thread_switched_out(); + SEGGER_SYSVIEW_OnIdle(); } static void send_task_list_cb(void)