kernel: update z_sched_thread_usage()

This commit does two things to the z_sched_thread_usage(). First,
it updates the API so that it accepts a pointer to the runtime
stats instead of simply returning the usage cycles. This gives it
the flexibility to retrieve additional statistics in the future.

Second, the runtime stats are only updated if the specified thread
is the current thread running on the current core.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2021-12-14 10:56:14 -05:00 committed by Anas Nashif
commit 5deaffb2ee
3 changed files with 31 additions and 14 deletions

View file

@ -385,7 +385,11 @@ void z_sched_usage_stop(void);
void z_sched_usage_start(struct k_thread *thread); void z_sched_usage_start(struct k_thread *thread);
uint64_t z_sched_thread_usage(struct k_thread *thread); /**
* @brief Retrieves thread cycle usage data for specified thread
*/
void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats);
static inline void z_sched_usage_switch(struct k_thread *thread) static inline void z_sched_usage_switch(struct k_thread *thread)
{ {

View file

@ -1026,10 +1026,10 @@ int k_thread_runtime_stats_get(k_tid_t thread,
return -EINVAL; return -EINVAL;
} }
*stats = (k_thread_runtime_stats_t) {};
#ifdef CONFIG_SCHED_THREAD_USAGE #ifdef CONFIG_SCHED_THREAD_USAGE
stats->execution_cycles = z_sched_thread_usage(thread); z_sched_thread_usage(thread, stats);
#else
*stats = (k_thread_runtime_stats_t) {};
#endif #endif
return 0; return 0;

View file

@ -63,15 +63,28 @@ void z_sched_usage_stop(void)
k_spin_unlock(&usage_lock, k); k_spin_unlock(&usage_lock, k);
} }
uint64_t z_sched_thread_usage(struct k_thread *thread) void z_sched_thread_usage(struct k_thread *thread,
struct k_thread_runtime_stats *stats)
{ {
k_spinlock_key_t k = k_spin_lock(&usage_lock); uint32_t u0;
uint32_t u0 = _current_cpu->usage0, now = usage_now(); uint32_t now;
uint64_t ret = thread->base.usage; struct _cpu *cpu;
k_spinlock_key_t key;
if (u0 != 0) { cpu = _current_cpu;
key = k_spin_lock(&usage_lock);
u0 = cpu->usage0;
now = usage_now();
if ((u0 != 0) && (thread == cpu->current)) {
uint32_t dt = now - u0; uint32_t dt = now - u0;
/*
* Update the thread's usage stats if it is the current thread
* running on the current core.
*/
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
if (z_is_idle_thread_object(thread)) { if (z_is_idle_thread_object(thread)) {
_kernel.idle_thread_usage += dt; _kernel.idle_thread_usage += dt;
@ -80,11 +93,11 @@ uint64_t z_sched_thread_usage(struct k_thread *thread)
} }
#endif #endif
ret += dt; thread->base.usage += dt;
thread->base.usage = ret; cpu->usage0 = now;
_current_cpu->usage0 = now;
} }
k_spin_unlock(&usage_lock, k); stats->execution_cycles = thread->base.usage;
return ret;
k_spin_unlock(&usage_lock, key);
} }