debug: tracing: Remove unneeded abstraction

Various C and Assembly modules
make function calls to z_sys_trace_*. These merely call
corresponding functions sys_trace_*. This commit
is to simplify these by making direct function calls
to the sys_trace_* functions from these modules.
Subsequently, the z_sys_trace_* functions are removed.

Signed-off-by: Mrinal Sen <msen@oticon.com>
This commit is contained in:
Mrinal Sen 2019-09-19 09:25:19 +02:00 committed by Anas Nashif
commit 1246cb8cef
25 changed files with 70 additions and 141 deletions

View file

@ -37,7 +37,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_TRACING
push_s blink
jl z_sys_trace_idle
jl sys_trace_idle
pop_s blink
#endif
@ -58,7 +58,7 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push_s blink
jl z_sys_trace_idle
jl sys_trace_idle
pop_s blink
#endif

View file

@ -245,13 +245,13 @@ rirq_path:
#endif
#if defined(CONFIG_TRACING)
GTEXT(z_sys_trace_isr_enter)
GTEXT(sys_trace_isr_enter)
.macro log_interrupt_k_event
clri r0 /* do not interrupt event logger operations */
push_s r0
push_s blink
jl z_sys_trace_isr_enter
jl sys_trace_isr_enter
pop_s blink
pop_s r0
seti r0

View file

@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, z_CpuIdleInit)
SECTION_FUNC(TEXT, k_cpu_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
bl z_sys_trace_idle
bl sys_trace_idle
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1
@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
#ifdef CONFIG_TRACING
push {r0, lr}
bl z_sys_trace_idle
bl sys_trace_idle
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1

View file

@ -255,7 +255,7 @@ void _arch_isr_direct_pm(void)
void z_arch_isr_direct_header(void)
{
z_sys_trace_isr_enter();
sys_trace_isr_enter();
}
#if defined(CONFIG_ARM_SECURE_FIRMWARE)

View file

@ -65,7 +65,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
#endif
#ifdef CONFIG_TRACING
bl z_sys_trace_isr_enter
bl sys_trace_isr_enter
#endif
#ifdef CONFIG_SYS_POWER_MANAGEMENT
@ -152,7 +152,7 @@ _idle_state_cleared:
blx r3 /* call ISR */
#ifdef CONFIG_TRACING
bl z_sys_trace_isr_exit
bl sys_trace_isr_exit
#endif
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)

View file

@ -46,7 +46,7 @@ SECTION_FUNC(TEXT, __pendsv)
#ifdef CONFIG_TRACING
/* Register the context switch */
push {r0, lr}
bl z_sys_trace_thread_switched_out
bl sys_trace_thread_switched_out
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1
@ -319,7 +319,7 @@ _thread_irq_disabled:
#ifdef CONFIG_TRACING
/* Register the context switch */
push {r0, lr}
bl z_sys_trace_thread_switched_in
bl sys_trace_thread_switched_in
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
pop {r0, r1}
mov lr, r1

View file

@ -81,7 +81,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
_current = main_thread;
#ifdef CONFIG_TRACING
z_sys_trace_thread_switched_in();
sys_trace_thread_switched_in();
#endif
/* the ready queue cache already contains the main thread */

View file

@ -87,7 +87,7 @@ void _enter_irq(u32_t ipending)
while (ipending) {
struct _isr_table_entry *ite;
z_sys_trace_isr_enter();
sys_trace_isr_enter();
index = find_lsb_set(ipending) - 1;
ipending &= ~BIT(index);

View file

@ -12,7 +12,7 @@ GTEXT(__swap)
GTEXT(z_thread_entry_wrapper)
/* imports */
GTEXT(z_sys_trace_thread_switched_in)
GTEXT(sys_trace_thread_switched_in)
GTEXT(_k_neg_eagain)
/* unsigned int __swap(unsigned int key)
@ -84,7 +84,7 @@ SECTION_FUNC(exception.other, __swap)
stw r4, _thread_offset_to_retval(r11)
#if CONFIG_TRACING
call z_sys_trace_thread_switched_in
call sys_trace_thread_switched_in
/* restore caller-saved r10 */
movhi r10, %hi(_kernel)
ori r10, r10, %lo(_kernel)

View file

@ -38,7 +38,7 @@
*/
void k_cpu_idle(void)
{
z_sys_trace_idle();
sys_trace_idle();
posix_irq_full_unlock();
posix_halt_cpu();
}
@ -64,7 +64,7 @@ void k_cpu_idle(void)
*/
void k_cpu_atomic_idle(unsigned int key)
{
z_sys_trace_idle();
sys_trace_idle();
posix_atomic_halt_cpu(key);
}

View file

@ -94,11 +94,11 @@ void z_arch_switch_to_main_thread(struct k_thread *main_thread,
(posix_thread_status_t *)
_kernel.ready_q.cache->callee_saved.thread_status;
z_sys_trace_thread_switched_out();
sys_trace_thread_switched_out();
_kernel.current = _kernel.ready_q.cache;
z_sys_trace_thread_switched_in();
sys_trace_thread_switched_in();
posix_main_thread_start(ready_thread_ptr->thread_idx);
} /* LCOV_EXCL_LINE */

View file

@ -25,8 +25,8 @@ GTEXT(_is_next_thread_current)
GTEXT(z_get_next_ready_thread)
#ifdef CONFIG_TRACING
GTEXT(z_sys_trace_thread_switched_in)
GTEXT(z_sys_trace_isr_enter)
GTEXT(sys_trace_thread_switched_in)
GTEXT(sys_trace_isr_enter)
#endif
#ifdef CONFIG_IRQ_OFFLOAD
@ -227,7 +227,7 @@ on_irq_stack:
call_irq:
#ifdef CONFIG_TRACING
call z_sys_trace_isr_enter
call sys_trace_isr_enter
#endif
/* Get IRQ causing interrupt */
@ -305,7 +305,7 @@ on_thread_stack:
reschedule:
#if CONFIG_TRACING
call z_sys_trace_thread_switched_in
call sys_trace_thread_switched_in
#endif
/* Get reference to _kernel */
la t0, _kernel

View file

@ -20,7 +20,7 @@
*/
void k_cpu_idle(void)
{
z_sys_trace_idle();
sys_trace_idle();
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
__idle_time_stamp = k_cycle_get_32();
#endif
@ -50,7 +50,7 @@ void k_cpu_idle(void)
void k_cpu_atomic_idle(unsigned int key)
{
z_sys_trace_idle();
sys_trace_idle();
__asm__ volatile (
"sti\n\t"

View file

@ -137,7 +137,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
pushl %eax
pushl %edx
call z_sys_trace_isr_enter
call sys_trace_isr_enter
popl %edx
popl %eax

View file

@ -61,7 +61,7 @@ void z_arch_irq_direct_pm(void)
void z_arch_isr_direct_header(void)
{
z_sys_trace_isr_enter();
sys_trace_isr_enter();
/* We're not going to unlock IRQs, but we still need to increment this
* so that z_is_in_isr() works

View file

@ -131,7 +131,7 @@ SECTION_FUNC(TEXT, __swap)
#ifdef CONFIG_TRACING
/* Register the context switch */
push %edx
call z_sys_trace_thread_switched_in
call sys_trace_thread_switched_in
pop %edx
#endif
movl _kernel_offset_to_ready_q_cache(%edi), %eax

View file

@ -45,7 +45,7 @@ void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
void k_cpu_idle(void)
{
z_sys_trace_idle();
sys_trace_idle();
__asm__ volatile("sti; hlt");
}

View file

@ -14,7 +14,7 @@
*/
void k_cpu_idle(void)
{
z_sys_trace_idle();
sys_trace_idle();
__asm__ volatile ("waiti 0");
}
/*
@ -26,7 +26,7 @@ void k_cpu_idle(void)
*/
void k_cpu_atomic_idle(unsigned int key)
{
z_sys_trace_idle();
sys_trace_idle();
__asm__ volatile ("waiti 0\n\t"
"wsr.ps %0\n\t"
"rsync" :: "a"(key));

View file

@ -125,14 +125,14 @@ extern void z_arch_isr_direct_header(void);
extern void _IntExit(void);
#ifdef CONFIG_TRACING
extern void z_sys_trace_isr_exit(void);
extern void sys_trace_isr_exit(void);
#endif
static inline void z_arch_isr_direct_footer(int maybe_swap)
{
#ifdef CONFIG_TRACING
z_sys_trace_isr_exit();
sys_trace_isr_exit();
#endif
if (maybe_swap) {
_IntExit();

View file

@ -19,12 +19,12 @@
#define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET)
#ifdef CONFIG_TRACING
void z_sys_trace_idle(void);
void z_sys_trace_isr_enter(void);
void z_sys_trace_isr_exit(void);
void z_sys_trace_isr_exit_to_scheduler(void);
void z_sys_trace_thread_switched_in(void);
void z_sys_trace_thread_switched_out(void);
void sys_trace_idle(void);
void sys_trace_isr_enter(void);
void sys_trace_isr_exit(void);
void sys_trace_isr_exit_to_scheduler(void);
void sys_trace_thread_switched_in(void);
void sys_trace_thread_switched_out(void);
#endif
#ifdef CONFIG_SEGGER_SYSTEMVIEW
@ -132,18 +132,11 @@ void z_sys_trace_thread_switched_out(void);
*/
#define sys_trace_end_call(id)
/**
* @brief Called when the cpu enters the idle state
*/
#define sys_trace_idle()
#define z_sys_trace_idle()
#define z_sys_trace_isr_enter()
#define z_sys_trace_isr_exit()
#define z_sys_trace_isr_exit_to_scheduler()
#define z_sys_trace_thread_switched_in()
#define z_sys_trace_thread_switched_out()
/**
* @}
*/

View file

@ -13,7 +13,7 @@
static ALWAYS_INLINE void riscv_idle(unsigned int key)
{
z_sys_trace_idle();
sys_trace_idle();
/* unlock interrupts */
irq_unlock(key);

View file

@ -159,31 +159,6 @@ void sys_trace_idle(void)
{
}
void z_sys_trace_idle(void)
{
sys_trace_idle();
}
void z_sys_trace_isr_enter(void)
{
sys_trace_isr_enter();
}
void z_sys_trace_isr_exit(void)
{
sys_trace_isr_exit();
}
void z_sys_trace_thread_switched_in(void)
{
sys_trace_thread_switched_in();
}
void z_sys_trace_thread_switched_out(void)
{
sys_trace_thread_switched_out();
}
#ifdef CONFIG_TRACING_CPU_STATS_LOG
static struct k_delayed_work cpu_stats_log;

View file

@ -153,33 +153,6 @@ void sys_trace_end_call(unsigned int id)
ctf_top_end_call(id);
}
void z_sys_trace_thread_switched_out(void)
{
sys_trace_thread_switched_out();
}
void z_sys_trace_thread_switched_in(void)
{
sys_trace_thread_switched_in();
}
void z_sys_trace_isr_enter(void)
{
sys_trace_isr_enter();
}
void z_sys_trace_isr_exit(void)
{
sys_trace_isr_exit();
}
void z_sys_trace_isr_exit_to_scheduler(void)
{
sys_trace_isr_exit_to_scheduler();
}
void z_sys_trace_idle(void)
{
sys_trace_idle();
}
static int ctf_top_init(struct device *arg)
{
ARG_UNUSED(arg);

View file

@ -25,30 +25,12 @@ static inline int is_idle_thread(struct k_thread *thread)
#endif
}
static inline void z__sys_trace_thread_switched_in(void)
{
struct k_thread *thread;
thread = k_current_get();
if (is_idle_thread(thread)) {
SEGGER_SYSVIEW_OnIdle();
} else {
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
}
}
#define sys_trace_thread_switched_in() z__sys_trace_thread_switched_in()
#define sys_trace_thread_switched_out() SEGGER_SYSVIEW_OnTaskStopExec()
#define sys_trace_isr_enter() SEGGER_SYSVIEW_RecordEnterISR()
#define sys_trace_isr_exit() SEGGER_SYSVIEW_RecordExitISR()
#define sys_trace_isr_exit_to_scheduler() \
SEGGER_SYSVIEW_RecordExitISRToScheduler()
void sys_trace_thread_switched_in(void);
void sys_trace_thread_switched_out(void);
void sys_trace_isr_enter(void);
void sys_trace_isr_exit(void);
void sys_trace_isr_exit_to_scheduler(void);
void sys_trace_idle(void);
#define sys_trace_thread_priority_set(thread)
@ -90,8 +72,6 @@ static inline void sys_trace_thread_info(struct k_thread *thread)
#define sys_trace_void(id) SEGGER_SYSVIEW_RecordVoid(id)
#define sys_trace_idle() SEGGER_SYSVIEW_OnIdle()
#define sys_trace_end_call(id) SEGGER_SYSVIEW_RecordEndCall(id)
#endif /* _TRACE_SYSVIEW_H */

View file

@ -23,34 +23,42 @@ u32_t sysview_get_interrupt(void)
return interrupt;
}
void z_sys_trace_idle(void)
void sys_trace_thread_switched_in(void)
{
sys_trace_idle();
struct k_thread *thread;
thread = k_current_get();
if (is_idle_thread(thread)) {
SEGGER_SYSVIEW_OnIdle();
} else {
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
}
}
void z_sys_trace_isr_enter(void)
void sys_trace_thread_switched_out(void)
{
sys_trace_isr_enter();
SEGGER_SYSVIEW_OnTaskStopExec();
}
void z_sys_trace_isr_exit(void)
void sys_trace_isr_enter(void)
{
sys_trace_isr_exit();
SEGGER_SYSVIEW_RecordEnterISR();
}
void z_sys_trace_isr_exit_to_scheduler(void)
void sys_trace_isr_exit(void)
{
sys_trace_isr_exit_to_scheduler();
SEGGER_SYSVIEW_RecordExitISR();
}
void z_sys_trace_thread_switched_in(void)
void sys_trace_isr_exit_to_scheduler(void)
{
sys_trace_thread_switched_in();
SEGGER_SYSVIEW_RecordExitISRToScheduler();
}
void z_sys_trace_thread_switched_out(void)
void sys_trace_idle(void)
{
sys_trace_thread_switched_out();
SEGGER_SYSVIEW_OnIdle();
}
static void send_task_list_cb(void)