tracing: simplify idle thread detection

We now define z_is_idle_thread_object() in ksched.h,
and the repeated definitions of a function that does
the same thing now changed to just use the common
definition.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2019-09-21 18:36:23 -07:00 committed by Anas Nashif
commit 8f0bb6afe6
6 changed files with 25 additions and 47 deletions

View file

@ -8,6 +8,7 @@
#define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_
#include <kernel_structs.h>
#include <kernel_internal.h>
#include <timeout_q.h>
#include <debug/tracing.h>
#include <stdbool.h>
@ -87,6 +88,15 @@ static inline bool z_is_idle_thread_entry(void *entry_point)
return entry_point == idle;
}
static inline bool z_is_idle_thread_object(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_thread;
#endif
}
static inline bool z_is_thread_pending(struct k_thread *thread)
{
return (thread->base.thread_state & _THREAD_PENDING) != 0U;

View file

@ -80,15 +80,6 @@ static inline bool is_thread_dummy(struct k_thread *thread)
}
#endif
static inline bool is_idle(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_thread;
#endif
}
bool z_is_t1_higher_prio_than_t2(struct k_thread *t1, struct k_thread *t2)
{
if (t1->base.prio < t2->base.prio) {
@ -151,7 +142,8 @@ static ALWAYS_INLINE bool should_preempt(struct k_thread *th, int preempt_ok)
* preemptible priorities (this is sort of an API glitch).
* They must always be preemptible.
*/
if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) && is_idle(_current)) {
if (!IS_ENABLED(CONFIG_PREEMPT_ENABLED) &&
z_is_idle_thread_object(_current)) {
return true;
}
@ -219,7 +211,8 @@ static ALWAYS_INLINE struct k_thread *next_up(void)
}
/* Put _current back into the queue */
if (th != _current && active && !is_idle(_current) && !queued) {
if (th != _current && active && !z_is_idle_thread_object(_current) &&
!queued) {
_priq_run_add(&_kernel.ready_q.runq, _current);
z_mark_thread_as_queued(_current);
}
@ -274,7 +267,7 @@ static inline int sliceable(struct k_thread *t)
{
return is_preempt(t)
&& !z_is_prio_higher(t->base.prio, slice_max_prio)
&& !is_idle(t)
&& !z_is_idle_thread_object(t)
&& !z_is_thread_timeout_active(t);
}
@ -642,7 +635,7 @@ ALWAYS_INLINE void z_priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread)
{
struct k_thread *t;
__ASSERT_NO_MSG(!is_idle(thread));
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) {
if (z_is_t1_higher_prio_than_t2(thread, t)) {
@ -664,7 +657,7 @@ void z_priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread)
}
#endif
__ASSERT_NO_MSG(!is_idle(thread));
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
sys_dlist_remove(&thread->base.qnode_dlist);
}
@ -700,7 +693,7 @@ void z_priq_rb_add(struct _priq_rb *pq, struct k_thread *thread)
{
struct k_thread *t;
__ASSERT_NO_MSG(!is_idle(thread));
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
thread->base.order_key = pq->next_order_key++;
@ -727,7 +720,7 @@ void z_priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread)
return;
}
#endif
__ASSERT_NO_MSG(!is_idle(thread));
__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
rb_remove(&pq->tree, &thread->base.qnode_rb);
@ -910,7 +903,7 @@ void z_impl_k_yield(void)
{
__ASSERT(!z_arch_is_in_isr(), "");
if (!is_idle(_current)) {
if (!z_is_idle_thread_object(_current)) {
LOCKED(&sched_spinlock) {
if (!IS_ENABLED(CONFIG_SMP) ||
z_is_thread_queued(_current)) {

View file

@ -7,6 +7,7 @@
#include <tracing_cpu_stats.h>
#include <sys/printk.h>
#include <kernel_internal.h>
#include <ksched.h>
enum cpu_state {
CPU_STATE_IDLE,
@ -22,15 +23,6 @@ static struct cpu_stats stats_hw_tick;
static int nested_interrupts;
static struct k_thread *current_thread;
static int is_idle_thread(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_thread;
#endif
}
void update_counter(volatile u64_t *cnt)
{
u32_t time = k_cycle_get_32();
@ -107,7 +99,7 @@ void sys_trace_thread_switched_in(void)
cpu_stats_update_counters();
current_thread = k_current_get();
if (is_idle_thread(current_thread)) {
if (z_is_idle_thread_object(current_thread)) {
last_cpu_state = CPU_STATE_IDLE;
} else {
last_cpu_state = CPU_STATE_NON_IDLE;

View file

@ -10,15 +10,6 @@
#include <kernel_internal.h>
#include "ctf_top.h"
static inline int is_idle_thread(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_thread;
#endif
}
void sys_trace_thread_switched_out(void)
{
struct k_thread *thread = k_current_get();

View file

@ -13,15 +13,6 @@
#include <Global.h>
#include "SEGGER_SYSVIEW_Zephyr.h"
static inline int is_idle_thread(struct k_thread *thread)
{
#ifdef CONFIG_SMP
return thread->base.is_idle;
#else
return thread == &z_idle_thread;
#endif
}
void sys_trace_thread_switched_in(void);
void sys_trace_thread_switched_out(void);
void sys_trace_isr_enter(void);

View file

@ -6,6 +6,7 @@
#include <zephyr.h>
#include <kernel_structs.h>
#include <init.h>
#include <ksched.h>
#include <SEGGER_SYSVIEW.h>
#include <Global.h>
@ -29,7 +30,7 @@ void sys_trace_thread_switched_in(void)
thread = k_current_get();
if (is_idle_thread(thread)) {
if (z_is_idle_thread_object(thread)) {
SEGGER_SYSVIEW_OnIdle();
} else {
SEGGER_SYSVIEW_OnTaskStartExec((u32_t)(uintptr_t)thread);
@ -69,7 +70,7 @@ static void send_task_list_cb(void)
char name[20];
const char *tname = k_thread_name_get(thread);
if (is_idle_thread(thread)) {
if (z_is_idle_thread_object(thread)) {
continue;
}