From fc577c4bd1e87ecfa6954236d7511d81509f8270 Mon Sep 17 00:00:00 2001 From: Daniel Leung Date: Thu, 27 Aug 2020 13:54:14 -0700 Subject: [PATCH] kernel: gather basic thread runtime statistics This adds the bits to gather the first thread runtime statictic: thread execution time. It provides a rough idea of how much time a thread is spent in active execution. Currently it is not being used, pending following commits where it combines with the trace points on context switch as they instrument the same locations. Signed-off-by: Daniel Leung --- include/kernel.h | 43 ++++++++++++++++++++ kernel/Kconfig | 13 ++++++ kernel/include/kernel_internal.h | 12 ++++++ kernel/include/kswap.h | 1 - kernel/thread.c | 69 ++++++++++++++++++++++++++++++++ 5 files changed, 137 insertions(+), 1 deletion(-) diff --git a/include/kernel.h b/include/kernel.h index a7ae21d936a..40c6830b7a4 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -280,6 +280,22 @@ struct z_poller { uint8_t mode; }; +#ifdef CONFIG_THREAD_RUNTIME_STATS +struct k_thread_runtime_stats { + /* Thread execution cycles */ + uint64_t execution_cycles; +}; + +typedef struct k_thread_runtime_stats k_thread_runtime_stats_t; + +struct _thread_runtime_stats { + /* Timestamp when last switched in */ + uint32_t last_switched_in; + + k_thread_runtime_stats_t stats; +}; +#endif + /** * @ingroup thread_apis * Thread Structure @@ -384,6 +400,11 @@ struct k_thread { uintptr_t tls; #endif /* CONFIG_THREAD_LOCAL_STORAGE */ +#ifdef CONFIG_THREAD_RUNTIME_STATS + /** Runtime statistics */ + struct _thread_runtime_stats rt_stats; +#endif + /** arch-specifics: must always be at the end */ struct _thread_arch arch; }; @@ -5062,6 +5083,28 @@ __syscall void k_str_out(char *c, size_t n); */ __syscall int k_float_disable(struct k_thread *thread); +#ifdef CONFIG_THREAD_RUNTIME_STATS + +/** + * @brief Get the runtime statistics of a thread + * + * @param thread ID of thread. + * @param stats Pointer to struct to copy statistics into. + * @return -EINVAL if null pointers, otherwise 0 + */ +int k_thread_runtime_stats_get(k_tid_t thread, + k_thread_runtime_stats_t *stats); + +/** + * @brief Get the runtime statistics of all threads + * + * @param stats Pointer to struct to copy statistics into. + * @return -EINVAL if null pointers, otherwise 0 + */ +int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats); + +#endif + #ifdef __cplusplus } #endif diff --git a/kernel/Kconfig b/kernel/Kconfig index 282ad62a5b4..251cddab4d0 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -351,6 +351,19 @@ config THREAD_MAX_NAME_LEN Thread names get stored in the k_thread struct. Indicate the max name length, including the terminating NULL byte. Reduce this value to conserve memory. + +config INSTRUMENT_THREAD_SWITCHING + bool + +config THREAD_RUNTIME_STATS + bool "Thread runtime statistics" + select INSTRUMENT_THREAD_SWITCHING + help + Gather thread runtime statistics. + + For example: + - Thread total execution cycles + endmenu menu "Work Queue Options" diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 277cea79f00..a77935d2124 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -155,6 +155,18 @@ struct gdb_ctx; extern int z_gdb_main_loop(struct gdb_ctx *ctx, bool start); #endif +#ifdef CONFIG_THREAD_RUNTIME_STATS +void z_thread_mark_switched_in(void); +void z_thread_mark_switched_out(void); +#else +static inline void z_thread_mark_switched_in(void) +{ +} + +static inline void z_thread_mark_switched_out(void) +{ +} +#endif #ifdef __cplusplus } diff --git a/kernel/include/kswap.h b/kernel/include/kswap.h index 76c634b25f2..a9c7c3e4a2b 100644 --- a/kernel/include/kswap.h +++ b/kernel/include/kswap.h @@ -113,7 +113,6 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key, arch_switch(new_thread->switch_handle, &old_thread->switch_handle); - } if (is_spinlock) { diff --git a/kernel/thread.c b/kernel/thread.c index 077a91c6b64..ca1b641d0e1 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -33,6 +33,10 @@ #include LOG_MODULE_DECLARE(os); +#ifdef CONFIG_THREAD_RUNTIME_STATS +k_thread_runtime_stats_t threads_runtime_stats; +#endif + #ifdef CONFIG_THREAD_MONITOR /* This lock protects the linked list of active threads; i.e. the * initial _kernel.threads pointer and the linked list made up of @@ -640,6 +644,10 @@ char *z_setup_new_thread(struct k_thread *new_thread, new_thread->resource_pool = _current->resource_pool; sys_trace_thread_create(new_thread); +#ifdef CONFIG_THREAD_RUNTIME_STATS + memset(&new_thread->rt_stats, 0, sizeof(new_thread->rt_stats)); +#endif + return stack_ptr; } @@ -1016,3 +1024,64 @@ static inline k_ticks_t z_vrfy_k_thread_timeout_expires_ticks( } #include #endif + +#ifdef CONFIG_THREAD_RUNTIME_STATS +void z_thread_mark_switched_in(void) +{ + struct k_thread *thread; + + thread = k_current_get(); + thread->rt_stats.last_switched_in = k_cycle_get_32(); +} + +void z_thread_mark_switched_out(void) +{ + uint32_t now; + uint64_t diff; + struct k_thread *thread; + + thread = k_current_get(); + + if (unlikely(thread->rt_stats.last_switched_in == 0)) { + /* Has not run before */ + return; + } + + if (unlikely(thread->base.thread_state == _THREAD_DUMMY)) { + /* dummy thread has no stat struct */ + return; + } + + now = k_cycle_get_32(); + diff = (uint64_t)now - thread->rt_stats.last_switched_in; + thread->rt_stats.stats.execution_cycles += diff; + thread->rt_stats.last_switched_in = 0; + + threads_runtime_stats.execution_cycles += diff; +} + +int k_thread_runtime_stats_get(k_tid_t thread, + k_thread_runtime_stats_t *stats) +{ + if ((thread == NULL) || (stats == NULL)) { + return -EINVAL; + } + + (void)memcpy(stats, &thread->rt_stats.stats, + sizeof(thread->rt_stats.stats)); + + return 0; +} + +int k_thread_runtime_stats_all_get(k_thread_runtime_stats_t *stats) +{ + if (stats == NULL) { + return -EINVAL; + } + + (void)memcpy(stats, &threads_runtime_stats, + sizeof(threads_runtime_stats)); + + return 0; +} +#endif /* CONFIG_THREAD_RUNTIME_STATS */