kernel: use sched lock for k_thread_suspend/resume

This logic should be using the sched_lock and not its own
separate lock for these two functions.

Some simplications were made; z_thread_single_resume and
z_thread_single_suspend were only used in one place, and there was
some redundant logic for whether to reschedule in the suspend case.

Signed-off-by: Andrew Boie <andrew.p.boie@intel.com>
This commit is contained in:
Andrew Boie 2020-02-14 10:52:49 -08:00 committed by Anas Nashif
commit 6cf496f324
2 changed files with 31 additions and 49 deletions

View file

@ -422,7 +422,7 @@ void z_sched_start(struct k_thread *thread)
z_reschedule(&sched_spinlock, key); z_reschedule(&sched_spinlock, key);
} }
void z_thread_single_suspend(struct k_thread *thread) void z_impl_k_thread_suspend(struct k_thread *thread)
{ {
(void)z_abort_thread_timeout(thread); (void)z_abort_thread_timeout(thread);
@ -440,6 +440,34 @@ void z_thread_single_suspend(struct k_thread *thread)
} }
} }
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
{
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_suspend(thread);
}
#include <syscalls/k_thread_suspend_mrsh.c>
#endif
void z_impl_k_thread_resume(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&sched_spinlock);
z_mark_thread_as_not_suspended(thread);
ready_thread(thread);
z_reschedule(&sched_spinlock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
{
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_resume(thread);
}
#include <syscalls/k_thread_resume_mrsh.c>
#endif
static _wait_q_t *pended_on(struct k_thread *thread) static _wait_q_t *pended_on(struct k_thread *thread)
{ {
__ASSERT_NO_MSG(thread->base.pended_on); __ASSERT_NO_MSG(thread->base.pended_on);

View file

@ -29,7 +29,9 @@
#include <irq_offload.h> #include <irq_offload.h>
#include <sys/check.h> #include <sys/check.h>
#ifdef CONFIG_THREAD_MONITOR
static struct k_spinlock lock; static struct k_spinlock lock;
#endif
#define _FOREACH_STATIC_THREAD(thread_data) \ #define _FOREACH_STATIC_THREAD(thread_data) \
Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data) Z_STRUCT_SECTION_FOREACH(_static_thread_data, thread_data)
@ -672,54 +674,6 @@ k_tid_t z_vrfy_k_thread_create(struct k_thread *new_thread,
#endif /* CONFIG_USERSPACE */ #endif /* CONFIG_USERSPACE */
#endif /* CONFIG_MULTITHREADING */ #endif /* CONFIG_MULTITHREADING */
extern void z_thread_single_suspend(struct k_thread *thread);
void z_impl_k_thread_suspend(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&lock);
z_thread_single_suspend(thread);
if (thread == _current) {
z_reschedule(&lock, key);
} else {
k_spin_unlock(&lock, key);
}
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_suspend(struct k_thread *thread)
{
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_suspend(thread);
}
#include <syscalls/k_thread_suspend_mrsh.c>
#endif
void z_thread_single_resume(struct k_thread *thread)
{
z_mark_thread_as_not_suspended(thread);
z_ready_thread(thread);
}
void z_impl_k_thread_resume(struct k_thread *thread)
{
k_spinlock_key_t key = k_spin_lock(&lock);
z_thread_single_resume(thread);
z_reschedule(&lock, key);
}
#ifdef CONFIG_USERSPACE
static inline void z_vrfy_k_thread_resume(struct k_thread *thread)
{
Z_OOPS(Z_SYSCALL_OBJ(thread, K_OBJ_THREAD));
z_impl_k_thread_resume(thread);
}
#include <syscalls/k_thread_resume_mrsh.c>
#endif
#ifdef CONFIG_MULTITHREADING #ifdef CONFIG_MULTITHREADING
#ifdef CONFIG_USERSPACE #ifdef CONFIG_USERSPACE