From 82c3d531a69473781d5c49ac747c9b0e043891bd Mon Sep 17 00:00:00 2001 From: Peter Mitsis Date: Mon, 13 Dec 2021 15:38:25 -0500 Subject: [PATCH] kernel: move thread usage routines to own file Moves the CONFIG_SCHED_THREAD_USAGE block of code out of sched.c into its own file. Not only do they employ their own private spin lock, but it is expected that additional usage routines will be added in the future. Signed-off-by: Peter Mitsis --- kernel/CMakeLists.txt | 1 + kernel/sched.c | 82 --------------------------------------- kernel/usage.c | 90 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+), 82 deletions(-) create mode 100644 kernel/usage.c diff --git a/kernel/CMakeLists.txt b/kernel/CMakeLists.txt index 2b04386287b..a9432b62fff 100644 --- a/kernel/CMakeLists.txt +++ b/kernel/CMakeLists.txt @@ -81,6 +81,7 @@ target_sources_ifdef(CONFIG_ATOMIC_OPERATIONS_C kernel PRIVATE atomic_c.c) target_sources_ifdef(CONFIG_MMU kernel PRIVATE mmu.c) target_sources_ifdef(CONFIG_POLL kernel PRIVATE poll.c) target_sources_ifdef(CONFIG_EVENTS kernel PRIVATE events.c) +target_sources_ifdef(CONFIG_SCHED_THREAD_USAGE kernel PRIVATE usage.c) if(${CONFIG_KERNEL_MEM_POOL}) target_sources(kernel PRIVATE mempool.c) diff --git a/kernel/sched.c b/kernel/sched.c index 08e8674157a..adeffd7ef5c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1744,85 +1744,3 @@ int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key, } return ret; } - -#ifdef CONFIG_SCHED_THREAD_USAGE - -/* Need one of these for this to work */ -#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) -#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE" -#endif - -static struct k_spinlock usage_lock; - -static uint32_t usage_now(void) -{ - uint32_t now; - -#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS - now = (uint32_t)timing_counter_get(); -#else - now = k_cycle_get_32(); -#endif - - /* Edge case: we use a zero as a null ("stop() already called") */ - return (now == 0) ? 1 : now; -} - -void z_sched_usage_start(struct k_thread *thread) -{ - /* One write through a volatile pointer doesn't require - * synchronization as long as _usage() treats it as volatile - * (we can't race with _stop() by design). - */ - _current_cpu->usage0 = usage_now(); -} - -void z_sched_usage_stop(void) -{ - k_spinlock_key_t k = k_spin_lock(&usage_lock); - uint32_t u0 = _current_cpu->usage0; - - if (u0 != 0) { - uint32_t dt = usage_now() - u0; - -#ifdef CONFIG_SCHED_THREAD_USAGE_ALL - if (z_is_idle_thread_object(_current)) { - _kernel.idle_thread_usage += dt; - } else { - _kernel.all_thread_usage += dt; - } -#endif - _current->base.usage += dt; - } - - _current_cpu->usage0 = 0; - k_spin_unlock(&usage_lock, k); -} - -uint64_t z_sched_thread_usage(struct k_thread *thread) -{ - k_spinlock_key_t k = k_spin_lock(&usage_lock); - uint32_t u0 = _current_cpu->usage0, now = usage_now(); - uint64_t ret = thread->base.usage; - - if (u0 != 0) { - uint32_t dt = now - u0; - -#ifdef CONFIG_SCHED_THREAD_USAGE_ALL - if (z_is_idle_thread_object(thread)) { - _kernel.idle_thread_usage += dt; - } else { - _kernel.all_thread_usage += dt; - } -#endif - - ret += dt; - thread->base.usage = ret; - _current_cpu->usage0 = now; - } - - k_spin_unlock(&usage_lock, k); - return ret; -} - -#endif /* CONFIG_SCHED_THREAD_USAGE */ diff --git a/kernel/usage.c b/kernel/usage.c new file mode 100644 index 00000000000..d695bf51ec8 --- /dev/null +++ b/kernel/usage.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2018 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include + +#include +#include +#include + +/* Need one of these for this to work */ +#if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING) +#error "No data backend configured for CONFIG_SCHED_THREAD_USAGE" +#endif + +static struct k_spinlock usage_lock; + +static uint32_t usage_now(void) +{ + uint32_t now; + +#ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS + now = (uint32_t)timing_counter_get(); +#else + now = k_cycle_get_32(); +#endif + + /* Edge case: we use a zero as a null ("stop() already called") */ + return (now == 0) ? 1 : now; +} + +void z_sched_usage_start(struct k_thread *thread) +{ + /* One write through a volatile pointer doesn't require + * synchronization as long as _usage() treats it as volatile + * (we can't race with _stop() by design). + */ + + _current_cpu->usage0 = usage_now(); +} + +void z_sched_usage_stop(void) +{ + k_spinlock_key_t k = k_spin_lock(&usage_lock); + uint32_t u0 = _current_cpu->usage0; + + if (u0 != 0) { + uint32_t dt = usage_now() - u0; + +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL + if (z_is_idle_thread_object(_current)) { + _kernel.idle_thread_usage += dt; + } else { + _kernel.all_thread_usage += dt; + } +#endif + _current->base.usage += dt; + } + + _current_cpu->usage0 = 0; + k_spin_unlock(&usage_lock, k); +} + +uint64_t z_sched_thread_usage(struct k_thread *thread) +{ + k_spinlock_key_t k = k_spin_lock(&usage_lock); + uint32_t u0 = _current_cpu->usage0, now = usage_now(); + uint64_t ret = thread->base.usage; + + if (u0 != 0) { + uint32_t dt = now - u0; + +#ifdef CONFIG_SCHED_THREAD_USAGE_ALL + if (z_is_idle_thread_object(thread)) { + _kernel.idle_thread_usage += dt; + } else { + _kernel.all_thread_usage += dt; + } +#endif + + ret += dt; + thread->base.usage = ret; + _current_cpu->usage0 = now; + } + + k_spin_unlock(&usage_lock, k); + return ret; +}