kernel: Refactor CPU usage

Refactors CPU usage (thread runtime stats) to make it easier to
integrate with the object core statistics framework.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2023-05-23 18:36:04 -04:00 committed by Johan Hedberg
commit 9bedfd82a2
3 changed files with 24 additions and 20 deletions

View file

@ -141,7 +141,7 @@ struct _cpu {
uint32_t usage0; uint32_t usage0;
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
struct k_cycle_stats usage; struct k_cycle_stats *usage;
#endif #endif
#endif #endif
@ -183,6 +183,9 @@ struct z_kernel {
#if defined(CONFIG_THREAD_MONITOR) #if defined(CONFIG_THREAD_MONITOR)
struct k_thread *threads; /* singly linked list of ALL threads */ struct k_thread *threads; /* singly linked list of ALL threads */
#endif #endif
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
struct k_cycle_stats usage[CONFIG_MP_MAX_NUM_CPUS];
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED) #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
/* Need to signal an IPI at the next scheduling point */ /* Need to signal an IPI at the next scheduling point */

View file

@ -399,7 +399,8 @@ void z_init_cpu(int id)
(Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) + (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[id]) +
K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id])); K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[id]));
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
_kernel.cpus[id].usage.track_usage = _kernel.cpus[id].usage = &_kernel.usage[id];
_kernel.cpus[id].usage->track_usage =
CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE; CONFIG_SCHED_THREAD_USAGE_AUTO_ENABLE;
#endif #endif

View file

@ -35,22 +35,22 @@ static uint32_t usage_now(void)
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles) static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
{ {
if (!cpu->usage.track_usage) { if (!cpu->usage->track_usage) {
return; return;
} }
if (cpu->current != cpu->idle_thread) { if (cpu->current != cpu->idle_thread) {
cpu->usage.total += cycles; cpu->usage->total += cycles;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
cpu->usage.current += cycles; cpu->usage->current += cycles;
if (cpu->usage.longest < cpu->usage.current) { if (cpu->usage->longest < cpu->usage->current) {
cpu->usage.longest = cpu->usage.current; cpu->usage->longest = cpu->usage->current;
} }
} else { } else {
cpu->usage.current = 0; cpu->usage->current = 0;
cpu->usage.num_windows++; cpu->usage->num_windows++;
#endif #endif
} }
} }
@ -148,16 +148,16 @@ void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
cpu->usage0 = now; cpu->usage0 = now;
} }
stats->total_cycles = cpu->usage.total; stats->total_cycles = cpu->usage->total;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
stats->current_cycles = cpu->usage.current; stats->current_cycles = cpu->usage->current;
stats->peak_cycles = cpu->usage.longest; stats->peak_cycles = cpu->usage->longest;
if (cpu->usage.num_windows == 0) { if (cpu->usage->num_windows == 0) {
stats->average_cycles = 0; stats->average_cycles = 0;
} else { } else {
stats->average_cycles = stats->total_cycles / stats->average_cycles = stats->total_cycles /
cpu->usage.num_windows; cpu->usage->num_windows;
} }
#endif #endif
@ -282,7 +282,7 @@ void k_sys_runtime_stats_enable(void)
key = k_spin_lock(&usage_lock); key = k_spin_lock(&usage_lock);
if (_current_cpu->usage.track_usage) { if (_current_cpu->usage->track_usage) {
/* /*
* Usage tracking is already enabled on the current CPU * Usage tracking is already enabled on the current CPU
@ -299,10 +299,10 @@ void k_sys_runtime_stats_enable(void)
unsigned int num_cpus = arch_num_cpus(); unsigned int num_cpus = arch_num_cpus();
for (uint8_t i = 0; i < num_cpus; i++) { for (uint8_t i = 0; i < num_cpus; i++) {
_kernel.cpus[i].usage.track_usage = true; _kernel.cpus[i].usage->track_usage = true;
#ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
_kernel.cpus[i].usage.num_windows++; _kernel.cpus[i].usage->num_windows++;
_kernel.cpus[i].usage.current = 0; _kernel.cpus[i].usage->current = 0;
#endif #endif
} }
@ -316,7 +316,7 @@ void k_sys_runtime_stats_disable(void)
key = k_spin_lock(&usage_lock); key = k_spin_lock(&usage_lock);
if (!_current_cpu->usage.track_usage) { if (!_current_cpu->usage->track_usage) {
/* /*
* Usage tracking is already disabled on the current CPU * Usage tracking is already disabled on the current CPU
@ -337,7 +337,7 @@ void k_sys_runtime_stats_disable(void)
if (cpu->usage0 != 0) { if (cpu->usage0 != 0) {
sched_cpu_update_usage(cpu, now - cpu->usage0); sched_cpu_update_usage(cpu, now - cpu->usage0);
} }
cpu->usage.track_usage = false; cpu->usage->track_usage = false;
} }
k_spin_unlock(&usage_lock, key); k_spin_unlock(&usage_lock, key);