kernel: extend CPU runtime stats
Extends the CPU usage runtime stats to track current, total, peak and average usage (as bounded by the scheduling of the idle thread). This permits a developer to obtain more system information if desired to tune the system. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
572f1db56a
commit
4eb1dd02cc
5 changed files with 106 additions and 16 deletions
|
@ -174,6 +174,7 @@ struct _thread_userspace_local_data {
|
|||
typedef struct k_thread_runtime_stats {
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE
|
||||
uint64_t execution_cycles;
|
||||
uint64_t total_cycles; /* total # of non-idle cycles */
|
||||
/*
|
||||
* In the context of thread statistics, [execution_cycles] is the same
|
||||
* as the total # of non-idle cycles. In the context of CPU statistics,
|
||||
|
@ -182,11 +183,19 @@ typedef struct k_thread_runtime_stats {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
||||
/*
|
||||
* For threads, the following fields refer to the time spent executing
|
||||
* as bounded by when the thread was scheduled in and scheduled out.
|
||||
* For CPUs, the same fields refer to the time spent executing
|
||||
* non-idle threads as bounded by the idle thread(s).
|
||||
*/
|
||||
|
||||
uint64_t current_cycles; /* current # of non-idle cycles */
|
||||
uint64_t peak_cycles; /* peak # of non-idle cycles */
|
||||
uint64_t total_cycles; /* total # of non-idle cycles */
|
||||
uint64_t average_cycles; /* average # of non-idle cycles */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
/*
|
||||
* This field is always zero for individual threads. It only comes
|
||||
* into play when gathering statistics for the CPU. In that case it
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <sys/util.h>
|
||||
#include <sys/sys_heap.h>
|
||||
#include <arch/structs.h>
|
||||
#include <kernel/stats.h>
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -131,7 +132,17 @@ struct _cpu {
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE
|
||||
/*
|
||||
* [usage0] is used as a timestamp to mark the beginning of an
|
||||
* execution window. [0] is a special value indicating that it
|
||||
* has been stopped (but not disabled).
|
||||
*/
|
||||
|
||||
uint32_t usage0;
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
struct k_cycle_stats usage;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Per CPU architecture specifics */
|
||||
|
@ -172,11 +183,6 @@ struct z_kernel {
|
|||
#if defined(CONFIG_THREAD_MONITOR)
|
||||
struct k_thread *threads; /* singly linked list of ALL threads */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
uint64_t all_thread_usage;
|
||||
uint64_t idle_thread_usage;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct z_kernel _kernel_t;
|
||||
|
|
|
@ -385,6 +385,11 @@ void z_sched_usage_stop(void);
|
|||
|
||||
void z_sched_usage_start(struct k_thread *thread);
|
||||
|
||||
/**
|
||||
* @brief Retrieves CPU cycle usage data for specified core
|
||||
*/
|
||||
void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats);
|
||||
|
||||
/**
|
||||
* @brief Retrieves thread cycle usage data for specified thread
|
||||
*/
|
||||
|
|
|
@ -1037,6 +1037,10 @@ int k_thread_runtime_stats_get(k_tid_t thread,
|
|||
|
||||
int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
k_thread_runtime_stats_t tmp_stats;
|
||||
#endif
|
||||
|
||||
if (stats == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1044,8 +1048,19 @@ int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats)
|
|||
*stats = (k_thread_runtime_stats_t) {};
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
stats->execution_cycles = (_kernel.all_thread_usage
|
||||
+ _kernel.idle_thread_usage);
|
||||
/* Retrieve the usage stats for each core and amalgamate them. */
|
||||
|
||||
for (uint8_t i = 0; i < CONFIG_MP_NUM_CPUS; i++) {
|
||||
z_sched_cpu_usage(i, &tmp_stats);
|
||||
|
||||
stats->execution_cycles += tmp_stats.execution_cycles;
|
||||
stats->total_cycles += tmp_stats.total_cycles;
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
||||
stats->peak_cycles += tmp_stats.peak_cycles;
|
||||
stats->average_cycles += tmp_stats.average_cycles;
|
||||
#endif
|
||||
stats->idle_cycles += tmp_stats.idle_cycles;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -34,13 +34,20 @@ static uint32_t usage_now(void)
|
|||
/**
|
||||
* Update the usage statistics for the specified CPU and thread
|
||||
*/
|
||||
static void sched_update_usage(struct k_thread *thread, uint32_t cycles)
|
||||
static void sched_update_usage(struct _cpu *cpu, struct k_thread *thread,
|
||||
uint32_t cycles)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
if (z_is_idle_thread_object(thread)) {
|
||||
_kernel.idle_thread_usage += cycles;
|
||||
} else {
|
||||
_kernel.all_thread_usage += cycles;
|
||||
if (!z_is_idle_thread_object(thread)) {
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
||||
cpu->usage.current += cycles;
|
||||
|
||||
if (cpu->usage.longest < cpu->usage.current) {
|
||||
cpu->usage.longest = cpu->usage.current;
|
||||
}
|
||||
#endif
|
||||
|
||||
cpu->usage.total += cycles;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -87,13 +94,59 @@ void z_sched_usage_stop(void)
|
|||
if (u0 != 0) {
|
||||
uint32_t dt = usage_now() - u0;
|
||||
|
||||
sched_update_usage(cpu->current, dt);
|
||||
sched_update_usage(cpu, cpu->current, dt);
|
||||
}
|
||||
|
||||
cpu->usage0 = 0;
|
||||
k_spin_unlock(&usage_lock, k);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
void z_sched_cpu_usage(uint8_t core_id, struct k_thread_runtime_stats *stats)
|
||||
{
|
||||
k_spinlock_key_t key;
|
||||
struct _cpu *cpu;
|
||||
uint32_t now;
|
||||
uint32_t u0;
|
||||
|
||||
cpu = _current_cpu;
|
||||
key = k_spin_lock(&usage_lock);
|
||||
|
||||
u0 = cpu->usage0;
|
||||
now = usage_now();
|
||||
|
||||
if ((u0 != 0) && (&_kernel.cpus[core_id] == cpu)) {
|
||||
uint32_t dt = now - u0;
|
||||
|
||||
/* It is safe to update the CPU's usage stats */
|
||||
|
||||
sched_update_usage(cpu, cpu->current, dt);
|
||||
|
||||
cpu->usage0 = now;
|
||||
}
|
||||
|
||||
stats->total_cycles = cpu->usage.total;
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
||||
stats->current_cycles = cpu->usage.current;
|
||||
stats->peak_cycles = cpu->usage.longest;
|
||||
|
||||
if (cpu->usage.num_windows == 0) {
|
||||
stats->average_cycles = 0;
|
||||
} else {
|
||||
stats->average_cycles = stats->total_cycles /
|
||||
cpu->usage.num_windows;
|
||||
}
|
||||
#endif
|
||||
|
||||
stats->idle_cycles =
|
||||
_kernel.cpus[core_id].idle_thread->base.usage.total;
|
||||
|
||||
stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
|
||||
|
||||
k_spin_unlock(&usage_lock, key);
|
||||
}
|
||||
#endif
|
||||
|
||||
void z_sched_thread_usage(struct k_thread *thread,
|
||||
struct k_thread_runtime_stats *stats)
|
||||
{
|
||||
|
@ -116,19 +169,19 @@ void z_sched_thread_usage(struct k_thread *thread,
|
|||
* running on the current core.
|
||||
*/
|
||||
|
||||
sched_update_usage(thread, dt);
|
||||
sched_update_usage(cpu, thread, dt);
|
||||
|
||||
cpu->usage0 = now;
|
||||
}
|
||||
|
||||
stats->execution_cycles = thread->base.usage.total;
|
||||
stats->total_cycles = thread->base.usage.total;
|
||||
|
||||
/* Copy-out the thread's usage stats */
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
|
||||
stats->current_cycles = thread->base.usage.current;
|
||||
stats->peak_cycles = thread->base.usage.longest;
|
||||
stats->total_cycles = thread->base.usage.total;
|
||||
|
||||
if (thread->base.usage.num_windows == 0) {
|
||||
stats->average_cycles = 0;
|
||||
|
@ -136,7 +189,9 @@ void z_sched_thread_usage(struct k_thread *thread,
|
|||
stats->average_cycles = stats->total_cycles /
|
||||
thread->base.usage.num_windows;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
|
||||
stats->idle_cycles = 0;
|
||||
#endif
|
||||
stats->execution_cycles = thread->base.usage.total;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue