kernel: thread_monitor: reafactor and remove duplicate code
Refactor code and remove duplication, same code almost being repeated 4 times. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
7c68855053
commit
a5413499f1
1 changed files with 43 additions and 76 deletions
|
@ -34,110 +34,77 @@ void z_thread_monitor_exit(struct k_thread *thread)
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
|
* Helper function to iterate over threads with optional filtering and locking behavior.
|
||||||
|
*/
|
||||||
|
static void thread_foreach_helper(k_thread_user_cb_t user_cb, void *user_data,
|
||||||
|
bool unlocked, bool filter_by_cpu, unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct k_thread *thread;
|
struct k_thread *thread;
|
||||||
k_spinlock_key_t key;
|
k_spinlock_key_t key;
|
||||||
|
|
||||||
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
||||||
|
|
||||||
/*
|
if (filter_by_cpu) {
|
||||||
* Lock is needed to make sure that the _kernel.threads is not being
|
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
|
||||||
* modified by the user_cb either directly or indirectly.
|
|
||||||
* The indirect ways are through calling k_thread_create and
|
|
||||||
* k_thread_abort from user_cb.
|
|
||||||
*/
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
|
|
||||||
|
|
||||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
|
||||||
user_cb(thread, user_data);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
|
key = k_spin_lock(&z_thread_monitor_lock);
|
||||||
|
|
||||||
|
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
||||||
|
/* cpu is only defined when SMP=y*/
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
bool on_cpu = (thread->base.cpu == cpu);
|
||||||
|
#else
|
||||||
|
bool on_cpu = false;
|
||||||
|
#endif
|
||||||
|
if (filter_by_cpu && !on_cpu) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unlocked) {
|
||||||
|
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||||
|
user_cb(thread, user_data);
|
||||||
|
key = k_spin_lock(&z_thread_monitor_lock);
|
||||||
|
} else {
|
||||||
|
user_cb(thread, user_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
k_spin_unlock(&z_thread_monitor_lock, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Public API functions using the helper.
|
||||||
|
*/
|
||||||
|
void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
|
||||||
|
{
|
||||||
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
|
||||||
|
thread_foreach_helper(user_cb, user_data, false, false, 0);
|
||||||
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
|
||||||
|
}
|
||||||
|
|
||||||
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
|
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
|
||||||
{
|
{
|
||||||
struct k_thread *thread;
|
|
||||||
k_spinlock_key_t key;
|
|
||||||
|
|
||||||
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
|
||||||
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
|
||||||
|
thread_foreach_helper(user_cb, user_data, true, false, 0);
|
||||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
|
||||||
user_cb(thread, user_data);
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
|
||||||
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
|
void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
|
||||||
void *user_data)
|
void *user_data)
|
||||||
{
|
{
|
||||||
struct k_thread *thread;
|
|
||||||
k_spinlock_key_t key;
|
|
||||||
|
|
||||||
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
|
||||||
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Lock is needed to make sure that the _kernel.threads is not being
|
|
||||||
* modified by the user_cb either directly or indirectly.
|
|
||||||
* The indirect ways are through calling k_thread_create and
|
|
||||||
* k_thread_abort from user_cb.
|
|
||||||
*/
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
|
||||||
|
thread_foreach_helper(user_cb, user_data, false, true, cpu);
|
||||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
|
||||||
if (thread->base.cpu == cpu) {
|
|
||||||
user_cb(thread, user_data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
|
||||||
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
|
void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
|
||||||
void *user_data)
|
void *user_data)
|
||||||
{
|
{
|
||||||
struct k_thread *thread;
|
|
||||||
k_spinlock_key_t key;
|
|
||||||
|
|
||||||
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
|
|
||||||
__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
|
|
||||||
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
|
SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
|
||||||
|
thread_foreach_helper(user_cb, user_data, true, true, cpu);
|
||||||
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
|
|
||||||
if (thread->base.cpu == cpu) {
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
|
||||||
user_cb(thread, user_data);
|
|
||||||
key = k_spin_lock(&z_thread_monitor_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
|
SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
|
||||||
|
|
||||||
k_spin_unlock(&z_thread_monitor_lock, key);
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue