diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index 5eea3207a7c..4b2c5446051 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -140,11 +140,13 @@ static inline bool z_is_thread_queued(struct k_thread *thread) static inline void z_mark_thread_as_suspended(struct k_thread *thread) { thread->base.thread_state |= _THREAD_SUSPENDED; + sys_trace_thread_suspend(thread); } static inline void z_mark_thread_as_not_suspended(struct k_thread *thread) { thread->base.thread_state &= ~_THREAD_SUSPENDED; + sys_trace_thread_resume(thread); } static inline void z_mark_thread_as_started(struct k_thread *thread) @@ -248,9 +250,8 @@ static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread) { if (z_is_thread_ready(thread)) { z_add_thread_to_ready_q(thread); + sys_trace_thread_ready(thread); } - - sys_trace_thread_ready(thread); } static inline void _ready_one_thread(_wait_q_t *wq) diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 53ad0cfe340..3537a24d21d 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -53,8 +53,6 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, z_check_stack_sentinel(); - sys_trace_thread_switched_out(); - if (is_spinlock) { k_spin_release(lock); } @@ -62,6 +60,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, new_thread = z_get_next_ready_thread(); if (new_thread != old_thread) { + sys_trace_thread_switched_out(); #ifdef CONFIG_TIMESLICING z_reset_time_slice(); #endif @@ -80,9 +79,9 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, _current = new_thread; arch_switch(new_thread->switch_handle, &old_thread->switch_handle); - } - sys_trace_thread_switched_in(); + sys_trace_thread_switched_in(); + } if (is_spinlock) { arch_irq_unlock(key); @@ -119,7 +118,6 @@ static inline int z_swap_irqlock(unsigned int key) { int ret; z_check_stack_sentinel(); - #ifndef CONFIG_ARM sys_trace_thread_switched_out(); #endif @@ -127,7 +125,6 @@ static inline int z_swap_irqlock(unsigned int key) #ifndef CONFIG_ARM sys_trace_thread_switched_in(); #endif - return ret; } diff --git a/kernel/sched.c b/kernel/sched.c index 0878092b607..e1ecbbbf934 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -468,6 +468,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) { z_remove_thread_from_ready_q(thread); z_mark_thread_as_pending(thread); + sys_trace_thread_pend(thread); if (wait_q != NULL) { thread->base.pended_on = wait_q; @@ -488,8 +489,6 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) z_add_thread_timeout(thread, ticks); } - - sys_trace_thread_pend(thread); } void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout) diff --git a/kernel/thread.c b/kernel/thread.c index 2d5d99cbebb..527980d34c1 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -679,8 +679,6 @@ void z_impl_k_thread_suspend(struct k_thread *thread) z_thread_single_suspend(thread); - sys_trace_thread_suspend(thread); - if (thread == _current) { z_reschedule(&lock, key); } else { @@ -709,7 +707,6 @@ void z_impl_k_thread_resume(struct k_thread *thread) z_thread_single_resume(thread); - sys_trace_thread_resume(thread); z_reschedule(&lock, key); }