kernel: move thread usage routines to own file

Moves the CONFIG_SCHED_THREAD_USAGE block of code out of sched.c
into its own file. Not only do they employ their own private
spin lock, but it is expected that additional usage routines will be
added in the future.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2021-12-13 15:38:25 -05:00 committed by Anas Nashif
commit 82c3d531a6
3 changed files with 91 additions and 82 deletions

View file

@ -81,6 +81,7 @@ target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c)
target_sources_ifdef(CONFIG_MMU kernel PRIVATE mmu.c)
target_sources_ifdef(CONFIG_POLL kernel PRIVATE poll.c)
target_sources_ifdef(CONFIG_EVENTS kernel PRIVATE events.c)
target_sources_ifdef(CONFIG_SCHED_THREAD_USAGE kernel PRIVATE usage.c)
if(${CONFIG_KERNEL_MEM_POOL})
target_sources(kernel PRIVATE mempool.c)

View file

@ -1744,85 +1744,3 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
}
return ret;
}
#ifdef CONFIG_SCHED_THREAD_USAGE
/* Need one of these for this to work */
#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
#endif
static struct k_spinlock usage_lock;
static uint32_t usage_now(void)
{
uint32_t now;
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
now = (uint32_t)timing_counter_get();
#else
now = k_cycle_get_32();
#endif
/* Edge case: we use a zero as a null ("stop() already called") */
return (now == 0) ? 1 : now;
}
void z_sched_usage_start(struct k_thread *thread)
{
/* One write through a volatile pointer doesn't require
* synchronization as long as _usage() treats it as volatile
* (we can't race with _stop() by design).
*/
_current_cpu->usage0 = usage_now();
}
void z_sched_usage_stop(void)
{
k_spinlock_key_t k = k_spin_lock(&usage_lock);
uint32_t u0 = _current_cpu->usage0;
if (u0 != 0) {
uint32_t dt = usage_now() - u0;
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
if (z_is_idle_thread_object(_current)) {
_kernel.idle_thread_usage += dt;
} else {
_kernel.all_thread_usage += dt;
}
#endif
_current->base.usage += dt;
}
_current_cpu->usage0 = 0;
k_spin_unlock(&usage_lock, k);
}
uint64_t z_sched_thread_usage(struct k_thread *thread)
{
k_spinlock_key_t k = k_spin_lock(&usage_lock);
uint32_t u0 = _current_cpu->usage0, now = usage_now();
uint64_t ret = thread->base.usage;
if (u0 != 0) {
uint32_t dt = now - u0;
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
if (z_is_idle_thread_object(thread)) {
_kernel.idle_thread_usage += dt;
} else {
_kernel.all_thread_usage += dt;
}
#endif
ret += dt;
thread->base.usage = ret;
_current_cpu->usage0 = now;
}
k_spin_unlock(&usage_lock, k);
return ret;
}
#endif /* CONFIG_SCHED_THREAD_USAGE */

90
kernel/usage.c Normal file
View file

@ -0,0 +1,90 @@
/*
* Copyright (c) 2018 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <kernel.h>
#include <timing/timing.h>
#include <ksched.h>
#include <spinlock.h>
/* Need one of these for this to work */
#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
#endif
static struct k_spinlock usage_lock;
static uint32_t usage_now(void)
{
uint32_t now;
#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
now = (uint32_t)timing_counter_get();
#else
now = k_cycle_get_32();
#endif
/* Edge case: we use a zero as a null ("stop() already called") */
return (now == 0) ? 1 : now;
}
void z_sched_usage_start(struct k_thread *thread)
{
/* One write through a volatile pointer doesn't require
* synchronization as long as _usage() treats it as volatile
* (we can't race with _stop() by design).
*/
_current_cpu->usage0 = usage_now();
}
void z_sched_usage_stop(void)
{
k_spinlock_key_t k = k_spin_lock(&usage_lock);
uint32_t u0 = _current_cpu->usage0;
if (u0 != 0) {
uint32_t dt = usage_now() - u0;
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
if (z_is_idle_thread_object(_current)) {
_kernel.idle_thread_usage += dt;
} else {
_kernel.all_thread_usage += dt;
}
#endif
_current->base.usage += dt;
}
_current_cpu->usage0 = 0;
k_spin_unlock(&usage_lock, k);
}
uint64_t z_sched_thread_usage(struct k_thread *thread)
{
k_spinlock_key_t k = k_spin_lock(&usage_lock);
uint32_t u0 = _current_cpu->usage0, now = usage_now();
uint64_t ret = thread->base.usage;
if (u0 != 0) {
uint32_t dt = now - u0;
#ifdef CONFIG_SCHED_THREAD_USAGE_ALL
if (z_is_idle_thread_object(thread)) {
_kernel.idle_thread_usage += dt;
} else {
_kernel.all_thread_usage += dt;
}
#endif
ret += dt;
thread->base.usage = ret;
_current_cpu->usage0 = now;
}
k_spin_unlock(&usage_lock, k);
return ret;
}