debug: tracing: Remove unneeded abstraction
Various C and Assembly modules make function calls to z_sys_trace_*. These merely call corresponding functions sys_trace_*. This commit is to simplify these by making direct function calls to the sys_trace_* functions from these modules. Subsequently, the z_sys_trace_* functions are removed. Signed-off-by: Mrinal Sen <msen@oticon.com>
This commit is contained in:
parent
15724c6cdc
commit
1246cb8cef
25 changed files with 70 additions and 141 deletions
|
@ -37,7 +37,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
push_s blink
|
push_s blink
|
||||||
jl z_sys_trace_idle
|
jl sys_trace_idle
|
||||||
pop_s blink
|
pop_s blink
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ SECTION_FUNC(TEXT, k_cpu_atomic_idle)
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
push_s blink
|
push_s blink
|
||||||
jl z_sys_trace_idle
|
jl sys_trace_idle
|
||||||
pop_s blink
|
pop_s blink
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -245,13 +245,13 @@ rirq_path:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_TRACING)
|
#if defined(CONFIG_TRACING)
|
||||||
GTEXT(z_sys_trace_isr_enter)
|
GTEXT(sys_trace_isr_enter)
|
||||||
|
|
||||||
.macro log_interrupt_k_event
|
.macro log_interrupt_k_event
|
||||||
clri r0 /* do not interrupt event logger operations */
|
clri r0 /* do not interrupt event logger operations */
|
||||||
push_s r0
|
push_s r0
|
||||||
push_s blink
|
push_s blink
|
||||||
jl z_sys_trace_isr_enter
|
jl sys_trace_isr_enter
|
||||||
pop_s blink
|
pop_s blink
|
||||||
pop_s r0
|
pop_s r0
|
||||||
seti r0
|
seti r0
|
||||||
|
|
|
@ -74,7 +74,7 @@ SECTION_FUNC(TEXT, z_CpuIdleInit)
|
||||||
SECTION_FUNC(TEXT, k_cpu_idle)
|
SECTION_FUNC(TEXT, k_cpu_idle)
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
bl z_sys_trace_idle
|
bl sys_trace_idle
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||||
pop {r0, r1}
|
pop {r0, r1}
|
||||||
mov lr, r1
|
mov lr, r1
|
||||||
|
@ -123,7 +123,7 @@ SECTION_FUNC(TEXT, k_cpu_idle)
|
||||||
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
|
SECTION_FUNC(TEXT, k_cpu_atomic_idle)
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
bl z_sys_trace_idle
|
bl sys_trace_idle
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||||
pop {r0, r1}
|
pop {r0, r1}
|
||||||
mov lr, r1
|
mov lr, r1
|
||||||
|
|
|
@ -255,7 +255,7 @@ void _arch_isr_direct_pm(void)
|
||||||
|
|
||||||
void z_arch_isr_direct_header(void)
|
void z_arch_isr_direct_header(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_isr_enter();
|
sys_trace_isr_enter();
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
#if defined(CONFIG_ARM_SECURE_FIRMWARE)
|
||||||
|
|
|
@ -65,7 +65,7 @@ SECTION_FUNC(TEXT, _isr_wrapper)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
bl z_sys_trace_isr_enter
|
bl sys_trace_isr_enter
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
#ifdef CONFIG_SYS_POWER_MANAGEMENT
|
||||||
|
@ -152,7 +152,7 @@ _idle_state_cleared:
|
||||||
blx r3 /* call ISR */
|
blx r3 /* call ISR */
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
bl z_sys_trace_isr_exit
|
bl sys_trace_isr_exit
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||||
|
|
|
@ -46,7 +46,7 @@ SECTION_FUNC(TEXT, __pendsv)
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
/* Register the context switch */
|
/* Register the context switch */
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
bl z_sys_trace_thread_switched_out
|
bl sys_trace_thread_switched_out
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||||
pop {r0, r1}
|
pop {r0, r1}
|
||||||
mov lr, r1
|
mov lr, r1
|
||||||
|
@ -319,7 +319,7 @@ _thread_irq_disabled:
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
/* Register the context switch */
|
/* Register the context switch */
|
||||||
push {r0, lr}
|
push {r0, lr}
|
||||||
bl z_sys_trace_thread_switched_in
|
bl sys_trace_thread_switched_in
|
||||||
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
|
||||||
pop {r0, r1}
|
pop {r0, r1}
|
||||||
mov lr, r1
|
mov lr, r1
|
||||||
|
|
|
@ -81,7 +81,7 @@ z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||||
|
|
||||||
_current = main_thread;
|
_current = main_thread;
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
z_sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* the ready queue cache already contains the main thread */
|
/* the ready queue cache already contains the main thread */
|
||||||
|
|
|
@ -87,7 +87,7 @@ void _enter_irq(u32_t ipending)
|
||||||
while (ipending) {
|
while (ipending) {
|
||||||
struct _isr_table_entry *ite;
|
struct _isr_table_entry *ite;
|
||||||
|
|
||||||
z_sys_trace_isr_enter();
|
sys_trace_isr_enter();
|
||||||
|
|
||||||
index = find_lsb_set(ipending) - 1;
|
index = find_lsb_set(ipending) - 1;
|
||||||
ipending &= ~BIT(index);
|
ipending &= ~BIT(index);
|
||||||
|
|
|
@ -12,7 +12,7 @@ GTEXT(__swap)
|
||||||
GTEXT(z_thread_entry_wrapper)
|
GTEXT(z_thread_entry_wrapper)
|
||||||
|
|
||||||
/* imports */
|
/* imports */
|
||||||
GTEXT(z_sys_trace_thread_switched_in)
|
GTEXT(sys_trace_thread_switched_in)
|
||||||
GTEXT(_k_neg_eagain)
|
GTEXT(_k_neg_eagain)
|
||||||
|
|
||||||
/* unsigned int __swap(unsigned int key)
|
/* unsigned int __swap(unsigned int key)
|
||||||
|
@ -84,7 +84,7 @@ SECTION_FUNC(exception.other, __swap)
|
||||||
stw r4, _thread_offset_to_retval(r11)
|
stw r4, _thread_offset_to_retval(r11)
|
||||||
|
|
||||||
#if CONFIG_TRACING
|
#if CONFIG_TRACING
|
||||||
call z_sys_trace_thread_switched_in
|
call sys_trace_thread_switched_in
|
||||||
/* restore caller-saved r10 */
|
/* restore caller-saved r10 */
|
||||||
movhi r10, %hi(_kernel)
|
movhi r10, %hi(_kernel)
|
||||||
ori r10, r10, %lo(_kernel)
|
ori r10, r10, %lo(_kernel)
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
*/
|
*/
|
||||||
void k_cpu_idle(void)
|
void k_cpu_idle(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
posix_irq_full_unlock();
|
posix_irq_full_unlock();
|
||||||
posix_halt_cpu();
|
posix_halt_cpu();
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ void k_cpu_idle(void)
|
||||||
*/
|
*/
|
||||||
void k_cpu_atomic_idle(unsigned int key)
|
void k_cpu_atomic_idle(unsigned int key)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
posix_atomic_halt_cpu(key);
|
posix_atomic_halt_cpu(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,11 +94,11 @@ void z_arch_switch_to_main_thread(struct k_thread *main_thread,
|
||||||
(posix_thread_status_t *)
|
(posix_thread_status_t *)
|
||||||
_kernel.ready_q.cache->callee_saved.thread_status;
|
_kernel.ready_q.cache->callee_saved.thread_status;
|
||||||
|
|
||||||
z_sys_trace_thread_switched_out();
|
sys_trace_thread_switched_out();
|
||||||
|
|
||||||
_kernel.current = _kernel.ready_q.cache;
|
_kernel.current = _kernel.ready_q.cache;
|
||||||
|
|
||||||
z_sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
|
|
||||||
posix_main_thread_start(ready_thread_ptr->thread_idx);
|
posix_main_thread_start(ready_thread_ptr->thread_idx);
|
||||||
} /* LCOV_EXCL_LINE */
|
} /* LCOV_EXCL_LINE */
|
||||||
|
|
|
@ -25,8 +25,8 @@ GTEXT(_is_next_thread_current)
|
||||||
GTEXT(z_get_next_ready_thread)
|
GTEXT(z_get_next_ready_thread)
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
GTEXT(z_sys_trace_thread_switched_in)
|
GTEXT(sys_trace_thread_switched_in)
|
||||||
GTEXT(z_sys_trace_isr_enter)
|
GTEXT(sys_trace_isr_enter)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_IRQ_OFFLOAD
|
#ifdef CONFIG_IRQ_OFFLOAD
|
||||||
|
@ -227,7 +227,7 @@ on_irq_stack:
|
||||||
|
|
||||||
call_irq:
|
call_irq:
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
call z_sys_trace_isr_enter
|
call sys_trace_isr_enter
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Get IRQ causing interrupt */
|
/* Get IRQ causing interrupt */
|
||||||
|
@ -305,7 +305,7 @@ on_thread_stack:
|
||||||
|
|
||||||
reschedule:
|
reschedule:
|
||||||
#if CONFIG_TRACING
|
#if CONFIG_TRACING
|
||||||
call z_sys_trace_thread_switched_in
|
call sys_trace_thread_switched_in
|
||||||
#endif
|
#endif
|
||||||
/* Get reference to _kernel */
|
/* Get reference to _kernel */
|
||||||
la t0, _kernel
|
la t0, _kernel
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
void k_cpu_idle(void)
|
void k_cpu_idle(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
|
#if defined(CONFIG_BOOT_TIME_MEASUREMENT)
|
||||||
__idle_time_stamp = k_cycle_get_32();
|
__idle_time_stamp = k_cycle_get_32();
|
||||||
#endif
|
#endif
|
||||||
|
@ -50,7 +50,7 @@ void k_cpu_idle(void)
|
||||||
|
|
||||||
void k_cpu_atomic_idle(unsigned int key)
|
void k_cpu_atomic_idle(unsigned int key)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
|
|
||||||
__asm__ volatile (
|
__asm__ volatile (
|
||||||
"sti\n\t"
|
"sti\n\t"
|
||||||
|
|
|
@ -137,7 +137,7 @@ SECTION_FUNC(TEXT, _interrupt_enter)
|
||||||
pushl %eax
|
pushl %eax
|
||||||
pushl %edx
|
pushl %edx
|
||||||
|
|
||||||
call z_sys_trace_isr_enter
|
call sys_trace_isr_enter
|
||||||
|
|
||||||
popl %edx
|
popl %edx
|
||||||
popl %eax
|
popl %eax
|
||||||
|
|
|
@ -61,7 +61,7 @@ void z_arch_irq_direct_pm(void)
|
||||||
|
|
||||||
void z_arch_isr_direct_header(void)
|
void z_arch_isr_direct_header(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_isr_enter();
|
sys_trace_isr_enter();
|
||||||
|
|
||||||
/* We're not going to unlock IRQs, but we still need to increment this
|
/* We're not going to unlock IRQs, but we still need to increment this
|
||||||
* so that z_is_in_isr() works
|
* so that z_is_in_isr() works
|
||||||
|
|
|
@ -131,7 +131,7 @@ SECTION_FUNC(TEXT, __swap)
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
/* Register the context switch */
|
/* Register the context switch */
|
||||||
push %edx
|
push %edx
|
||||||
call z_sys_trace_thread_switched_in
|
call sys_trace_thread_switched_in
|
||||||
pop %edx
|
pop %edx
|
||||||
#endif
|
#endif
|
||||||
movl _kernel_offset_to_ready_q_cache(%edi), %eax
|
movl _kernel_offset_to_ready_q_cache(%edi), %eax
|
||||||
|
|
|
@ -45,7 +45,7 @@ void z_new_thread(struct k_thread *t, k_thread_stack_t *stack,
|
||||||
|
|
||||||
void k_cpu_idle(void)
|
void k_cpu_idle(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
__asm__ volatile("sti; hlt");
|
__asm__ volatile("sti; hlt");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
*/
|
*/
|
||||||
void k_cpu_idle(void)
|
void k_cpu_idle(void)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
__asm__ volatile ("waiti 0");
|
__asm__ volatile ("waiti 0");
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -26,7 +26,7 @@ void k_cpu_idle(void)
|
||||||
*/
|
*/
|
||||||
void k_cpu_atomic_idle(unsigned int key)
|
void k_cpu_atomic_idle(unsigned int key)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
__asm__ volatile ("waiti 0\n\t"
|
__asm__ volatile ("waiti 0\n\t"
|
||||||
"wsr.ps %0\n\t"
|
"wsr.ps %0\n\t"
|
||||||
"rsync" :: "a"(key));
|
"rsync" :: "a"(key));
|
||||||
|
|
|
@ -125,14 +125,14 @@ extern void z_arch_isr_direct_header(void);
|
||||||
extern void _IntExit(void);
|
extern void _IntExit(void);
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
extern void z_sys_trace_isr_exit(void);
|
extern void sys_trace_isr_exit(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void z_arch_isr_direct_footer(int maybe_swap)
|
static inline void z_arch_isr_direct_footer(int maybe_swap)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
z_sys_trace_isr_exit();
|
sys_trace_isr_exit();
|
||||||
#endif
|
#endif
|
||||||
if (maybe_swap) {
|
if (maybe_swap) {
|
||||||
_IntExit();
|
_IntExit();
|
||||||
|
|
|
@ -19,12 +19,12 @@
|
||||||
#define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET)
|
#define SYS_TRACE_ID_SEMA_TAKE (6u + SYS_TRACE_ID_OFFSET)
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
void z_sys_trace_idle(void);
|
void sys_trace_idle(void);
|
||||||
void z_sys_trace_isr_enter(void);
|
void sys_trace_isr_enter(void);
|
||||||
void z_sys_trace_isr_exit(void);
|
void sys_trace_isr_exit(void);
|
||||||
void z_sys_trace_isr_exit_to_scheduler(void);
|
void sys_trace_isr_exit_to_scheduler(void);
|
||||||
void z_sys_trace_thread_switched_in(void);
|
void sys_trace_thread_switched_in(void);
|
||||||
void z_sys_trace_thread_switched_out(void);
|
void sys_trace_thread_switched_out(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SEGGER_SYSTEMVIEW
|
#ifdef CONFIG_SEGGER_SYSTEMVIEW
|
||||||
|
@ -132,18 +132,11 @@ void z_sys_trace_thread_switched_out(void);
|
||||||
*/
|
*/
|
||||||
#define sys_trace_end_call(id)
|
#define sys_trace_end_call(id)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Called when the cpu enters the idle state
|
||||||
|
*/
|
||||||
|
#define sys_trace_idle()
|
||||||
|
|
||||||
#define z_sys_trace_idle()
|
|
||||||
|
|
||||||
#define z_sys_trace_isr_enter()
|
|
||||||
|
|
||||||
#define z_sys_trace_isr_exit()
|
|
||||||
|
|
||||||
#define z_sys_trace_isr_exit_to_scheduler()
|
|
||||||
|
|
||||||
#define z_sys_trace_thread_switched_in()
|
|
||||||
|
|
||||||
#define z_sys_trace_thread_switched_out()
|
|
||||||
/**
|
/**
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
static ALWAYS_INLINE void riscv_idle(unsigned int key)
|
static ALWAYS_INLINE void riscv_idle(unsigned int key)
|
||||||
{
|
{
|
||||||
z_sys_trace_idle();
|
sys_trace_idle();
|
||||||
/* unlock interrupts */
|
/* unlock interrupts */
|
||||||
irq_unlock(key);
|
irq_unlock(key);
|
||||||
|
|
||||||
|
|
|
@ -159,31 +159,6 @@ void sys_trace_idle(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_idle(void)
|
|
||||||
{
|
|
||||||
sys_trace_idle();
|
|
||||||
}
|
|
||||||
|
|
||||||
void z_sys_trace_isr_enter(void)
|
|
||||||
{
|
|
||||||
sys_trace_isr_enter();
|
|
||||||
}
|
|
||||||
|
|
||||||
void z_sys_trace_isr_exit(void)
|
|
||||||
{
|
|
||||||
sys_trace_isr_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
void z_sys_trace_thread_switched_in(void)
|
|
||||||
{
|
|
||||||
sys_trace_thread_switched_in();
|
|
||||||
}
|
|
||||||
|
|
||||||
void z_sys_trace_thread_switched_out(void)
|
|
||||||
{
|
|
||||||
sys_trace_thread_switched_out();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACING_CPU_STATS_LOG
|
#ifdef CONFIG_TRACING_CPU_STATS_LOG
|
||||||
static struct k_delayed_work cpu_stats_log;
|
static struct k_delayed_work cpu_stats_log;
|
||||||
|
|
||||||
|
|
|
@ -153,33 +153,6 @@ void sys_trace_end_call(unsigned int id)
|
||||||
ctf_top_end_call(id);
|
ctf_top_end_call(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void z_sys_trace_thread_switched_out(void)
|
|
||||||
{
|
|
||||||
sys_trace_thread_switched_out();
|
|
||||||
}
|
|
||||||
void z_sys_trace_thread_switched_in(void)
|
|
||||||
{
|
|
||||||
sys_trace_thread_switched_in();
|
|
||||||
}
|
|
||||||
void z_sys_trace_isr_enter(void)
|
|
||||||
{
|
|
||||||
sys_trace_isr_enter();
|
|
||||||
}
|
|
||||||
void z_sys_trace_isr_exit(void)
|
|
||||||
{
|
|
||||||
sys_trace_isr_exit();
|
|
||||||
}
|
|
||||||
void z_sys_trace_isr_exit_to_scheduler(void)
|
|
||||||
{
|
|
||||||
sys_trace_isr_exit_to_scheduler();
|
|
||||||
}
|
|
||||||
void z_sys_trace_idle(void)
|
|
||||||
{
|
|
||||||
sys_trace_idle();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static int ctf_top_init(struct device *arg)
|
static int ctf_top_init(struct device *arg)
|
||||||
{
|
{
|
||||||
ARG_UNUSED(arg);
|
ARG_UNUSED(arg);
|
||||||
|
|
|
@ -25,30 +25,12 @@ static inline int is_idle_thread(struct k_thread *thread)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sys_trace_thread_switched_in(void);
|
||||||
static inline void z__sys_trace_thread_switched_in(void)
|
void sys_trace_thread_switched_out(void);
|
||||||
{
|
void sys_trace_isr_enter(void);
|
||||||
struct k_thread *thread;
|
void sys_trace_isr_exit(void);
|
||||||
|
void sys_trace_isr_exit_to_scheduler(void);
|
||||||
thread = k_current_get();
|
void sys_trace_idle(void);
|
||||||
|
|
||||||
if (is_idle_thread(thread)) {
|
|
||||||
SEGGER_SYSVIEW_OnIdle();
|
|
||||||
} else {
|
|
||||||
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define sys_trace_thread_switched_in() z__sys_trace_thread_switched_in()
|
|
||||||
|
|
||||||
#define sys_trace_thread_switched_out() SEGGER_SYSVIEW_OnTaskStopExec()
|
|
||||||
|
|
||||||
#define sys_trace_isr_enter() SEGGER_SYSVIEW_RecordEnterISR()
|
|
||||||
|
|
||||||
#define sys_trace_isr_exit() SEGGER_SYSVIEW_RecordExitISR()
|
|
||||||
|
|
||||||
#define sys_trace_isr_exit_to_scheduler() \
|
|
||||||
SEGGER_SYSVIEW_RecordExitISRToScheduler()
|
|
||||||
|
|
||||||
#define sys_trace_thread_priority_set(thread)
|
#define sys_trace_thread_priority_set(thread)
|
||||||
|
|
||||||
|
@ -90,8 +72,6 @@ static inline void sys_trace_thread_info(struct k_thread *thread)
|
||||||
|
|
||||||
#define sys_trace_void(id) SEGGER_SYSVIEW_RecordVoid(id)
|
#define sys_trace_void(id) SEGGER_SYSVIEW_RecordVoid(id)
|
||||||
|
|
||||||
#define sys_trace_idle() SEGGER_SYSVIEW_OnIdle()
|
|
||||||
|
|
||||||
#define sys_trace_end_call(id) SEGGER_SYSVIEW_RecordEndCall(id)
|
#define sys_trace_end_call(id) SEGGER_SYSVIEW_RecordEndCall(id)
|
||||||
|
|
||||||
#endif /* _TRACE_SYSVIEW_H */
|
#endif /* _TRACE_SYSVIEW_H */
|
||||||
|
|
|
@ -23,34 +23,42 @@ u32_t sysview_get_interrupt(void)
|
||||||
return interrupt;
|
return interrupt;
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_idle(void)
|
void sys_trace_thread_switched_in(void)
|
||||||
{
|
{
|
||||||
sys_trace_idle();
|
struct k_thread *thread;
|
||||||
|
|
||||||
|
thread = k_current_get();
|
||||||
|
|
||||||
|
if (is_idle_thread(thread)) {
|
||||||
|
SEGGER_SYSVIEW_OnIdle();
|
||||||
|
} else {
|
||||||
|
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_isr_enter(void)
|
void sys_trace_thread_switched_out(void)
|
||||||
{
|
{
|
||||||
sys_trace_isr_enter();
|
SEGGER_SYSVIEW_OnTaskStopExec();
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_isr_exit(void)
|
void sys_trace_isr_enter(void)
|
||||||
{
|
{
|
||||||
sys_trace_isr_exit();
|
SEGGER_SYSVIEW_RecordEnterISR();
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_isr_exit_to_scheduler(void)
|
void sys_trace_isr_exit(void)
|
||||||
{
|
{
|
||||||
sys_trace_isr_exit_to_scheduler();
|
SEGGER_SYSVIEW_RecordExitISR();
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_thread_switched_in(void)
|
void sys_trace_isr_exit_to_scheduler(void)
|
||||||
{
|
{
|
||||||
sys_trace_thread_switched_in();
|
SEGGER_SYSVIEW_RecordExitISRToScheduler();
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sys_trace_thread_switched_out(void)
|
void sys_trace_idle(void)
|
||||||
{
|
{
|
||||||
sys_trace_thread_switched_out();
|
SEGGER_SYSVIEW_OnIdle();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void send_task_list_cb(void)
|
static void send_task_list_cb(void)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue