tracing: better positioning of tracing points
Improve positioning of tracing calls. Avoid multiple calls and missing events because of complex logix. Trace the event where things happen really. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
1530819e12
commit
0ad67650f2
4 changed files with 7 additions and 13 deletions
|
@ -140,11 +140,13 @@ static inline bool z_is_thread_queued(struct k_thread *thread)
|
||||||
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
|
static inline void z_mark_thread_as_suspended(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
thread->base.thread_state |= _THREAD_SUSPENDED;
|
thread->base.thread_state |= _THREAD_SUSPENDED;
|
||||||
|
sys_trace_thread_suspend(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
|
static inline void z_mark_thread_as_not_suspended(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
thread->base.thread_state &= ~_THREAD_SUSPENDED;
|
thread->base.thread_state &= ~_THREAD_SUSPENDED;
|
||||||
|
sys_trace_thread_resume(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void z_mark_thread_as_started(struct k_thread *thread)
|
static inline void z_mark_thread_as_started(struct k_thread *thread)
|
||||||
|
@ -248,9 +250,8 @@ static ALWAYS_INLINE void z_ready_thread(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
if (z_is_thread_ready(thread)) {
|
if (z_is_thread_ready(thread)) {
|
||||||
z_add_thread_to_ready_q(thread);
|
z_add_thread_to_ready_q(thread);
|
||||||
|
sys_trace_thread_ready(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
sys_trace_thread_ready(thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void _ready_one_thread(_wait_q_t *wq)
|
static inline void _ready_one_thread(_wait_q_t *wq)
|
||||||
|
|
|
@ -53,8 +53,6 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
|
|
||||||
z_check_stack_sentinel();
|
z_check_stack_sentinel();
|
||||||
|
|
||||||
sys_trace_thread_switched_out();
|
|
||||||
|
|
||||||
if (is_spinlock) {
|
if (is_spinlock) {
|
||||||
k_spin_release(lock);
|
k_spin_release(lock);
|
||||||
}
|
}
|
||||||
|
@ -62,6 +60,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
new_thread = z_get_next_ready_thread();
|
new_thread = z_get_next_ready_thread();
|
||||||
|
|
||||||
if (new_thread != old_thread) {
|
if (new_thread != old_thread) {
|
||||||
|
sys_trace_thread_switched_out();
|
||||||
#ifdef CONFIG_TIMESLICING
|
#ifdef CONFIG_TIMESLICING
|
||||||
z_reset_time_slice();
|
z_reset_time_slice();
|
||||||
#endif
|
#endif
|
||||||
|
@ -80,9 +79,9 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
|
||||||
_current = new_thread;
|
_current = new_thread;
|
||||||
arch_switch(new_thread->switch_handle,
|
arch_switch(new_thread->switch_handle,
|
||||||
&old_thread->switch_handle);
|
&old_thread->switch_handle);
|
||||||
}
|
|
||||||
|
|
||||||
sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
|
}
|
||||||
|
|
||||||
if (is_spinlock) {
|
if (is_spinlock) {
|
||||||
arch_irq_unlock(key);
|
arch_irq_unlock(key);
|
||||||
|
@ -119,7 +118,6 @@ static inline int z_swap_irqlock(unsigned int key)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
z_check_stack_sentinel();
|
z_check_stack_sentinel();
|
||||||
|
|
||||||
#ifndef CONFIG_ARM
|
#ifndef CONFIG_ARM
|
||||||
sys_trace_thread_switched_out();
|
sys_trace_thread_switched_out();
|
||||||
#endif
|
#endif
|
||||||
|
@ -127,7 +125,6 @@ static inline int z_swap_irqlock(unsigned int key)
|
||||||
#ifndef CONFIG_ARM
|
#ifndef CONFIG_ARM
|
||||||
sys_trace_thread_switched_in();
|
sys_trace_thread_switched_in();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -468,6 +468,7 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
||||||
{
|
{
|
||||||
z_remove_thread_from_ready_q(thread);
|
z_remove_thread_from_ready_q(thread);
|
||||||
z_mark_thread_as_pending(thread);
|
z_mark_thread_as_pending(thread);
|
||||||
|
sys_trace_thread_pend(thread);
|
||||||
|
|
||||||
if (wait_q != NULL) {
|
if (wait_q != NULL) {
|
||||||
thread->base.pended_on = wait_q;
|
thread->base.pended_on = wait_q;
|
||||||
|
@ -488,8 +489,6 @@ static void pend(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
||||||
|
|
||||||
z_add_thread_timeout(thread, ticks);
|
z_add_thread_timeout(thread, ticks);
|
||||||
}
|
}
|
||||||
|
|
||||||
sys_trace_thread_pend(thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q, s32_t timeout)
|
||||||
|
|
|
@ -679,8 +679,6 @@ void z_impl_k_thread_suspend(struct k_thread *thread)
|
||||||
|
|
||||||
z_thread_single_suspend(thread);
|
z_thread_single_suspend(thread);
|
||||||
|
|
||||||
sys_trace_thread_suspend(thread);
|
|
||||||
|
|
||||||
if (thread == _current) {
|
if (thread == _current) {
|
||||||
z_reschedule(&lock, key);
|
z_reschedule(&lock, key);
|
||||||
} else {
|
} else {
|
||||||
|
@ -709,7 +707,6 @@ void z_impl_k_thread_resume(struct k_thread *thread)
|
||||||
|
|
||||||
z_thread_single_resume(thread);
|
z_thread_single_resume(thread);
|
||||||
|
|
||||||
sys_trace_thread_resume(thread);
|
|
||||||
z_reschedule(&lock, key);
|
z_reschedule(&lock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue