kernel: thread: Add k_thread_foreach_filter_by_cpu() and unlocked version

Add functions k_thread_foreach_unlocked_filter_by_cpu() and
k_thread_foreach_filter_by_cpu() to loop through the threads on the
specified cpu only.

Signed-off-by: Jyri Sarha <jyri.sarha@linux.intel.com>
This commit is contained in:
Jyri Sarha 2024-02-15 17:35:00 +02:00 committed by Fabio Baltieri
commit b8a1a62048
2 changed files with 134 additions and 0 deletions

View file

@ -122,6 +122,38 @@ typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
*/
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
/**
* @brief Iterate over all the threads in running on specified cpu.
*
* This function is does otherwise the same thing as k_thread_foreach(),
* but it only loops through the threads running on specified cpu only.
* If CONFIG_SMP is not defined the implementation this is the same as
* k_thread_foreach(), with an assert cpu == 0.
*
* @param cpu The filtered cpu number
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock to protect the _kernel.threads
* list which means creation of new threads and terminations of existing
* threads are blocked until this API returns.
*/
#ifdef CONFIG_SMP
void k_thread_foreach_filter_by_cpu(unsigned int cpu,
k_thread_user_cb_t user_cb, void *user_data);
#else
static inline
void k_thread_foreach_filter_by_cpu(unsigned int cpu,
k_thread_user_cb_t user_cb, void *user_data)
{
__ASSERT(cpu == 0, "cpu filter out of bounds");
ARG_UNUSED(cpu);
k_thread_foreach(user_cb, user_data);
}
#endif
/**
* @brief Iterate over all the threads in the system without locking.
*
@ -152,6 +184,51 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
void k_thread_foreach_unlocked(
k_thread_user_cb_t user_cb, void *user_data);
/**
* @brief Iterate over the threads in running on current cpu without locking.
*
* This function does otherwise the same thing as
* k_thread_foreach_unlocked(), but it only loops through the threads
* running on specified cpu. If CONFIG_SMP is not defined the
* implementation this is the same as k_thread_foreach_unlocked(), with an
* assert requiring cpu == 0.
*
* @param cpu The filtered cpu number
* @param user_cb Pointer to the user callback function.
* @param user_data Pointer to user data.
*
* @note @kconfig{CONFIG_THREAD_MONITOR} must be set for this function
* to be effective.
* @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
* queue elements. It unlocks it during user callback function processing.
* If a new task is created when this @c foreach function is in progress,
* the added new task would not be included in the enumeration.
* If a task is aborted during this enumeration, there would be a race here
* and there is a possibility that this aborted task would be included in the
* enumeration.
* @note If the task is aborted and the memory occupied by its @c k_thread
* structure is reused when this @c k_thread_foreach_unlocked is in progress
* it might even lead to the system behave unstable.
* This function may never return, as it would follow some @c next task
* pointers treating given pointer as a pointer to the k_thread structure
* while it is something different right now.
* Do not reuse the memory that was occupied by k_thread structure of aborted
* task if it was aborted after this function was called in any context.
*/
#ifdef CONFIG_SMP
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
k_thread_user_cb_t user_cb, void *user_data);
#else
static inline
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,
k_thread_user_cb_t user_cb, void *user_data)
{
__ASSERT(cpu == 0, "cpu filter out of bounds");
ARG_UNUSED(cpu);
k_thread_foreach_unlocked(user_cb, user_data);
}
#endif
/** @} */
/**

View file

@ -83,3 +83,60 @@ void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
k_spin_unlock(&z_thread_monitor_lock, key);
}
#ifdef CONFIG_SMP
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
/*
* Lock is needed to make sure that the _kernel.threads is not being
* modified by the user_cb either directly or indirectly.
* The indirect ways are through calling k_thread_create and
* k_thread_abort from user_cb.
*/
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
if (thread->base.cpu == cpu)
user_cb(thread, user_data);
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
k_spin_unlock(&z_thread_monitor_lock, key);
}
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
void *user_data)
{
struct k_thread *thread;
k_spinlock_key_t key;
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
key = k_spin_lock(&z_thread_monitor_lock);
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
if (thread->base.cpu == cpu) {
k_spin_unlock(&z_thread_monitor_lock, key);
user_cb(thread, user_data);
key = k_spin_lock(&z_thread_monitor_lock);
}
}
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
k_spin_unlock(&z_thread_monitor_lock, key);
}
#endif /* CONFIG_SMP */