kernel: Update IPI usage in k_thread_priority_set()
1. The flagging of IPIs is moved out of k_thread_priority_set() into z_thread_prio_set(). This allows for an IPI to be done for a thread that had its priority bumped due to the handling of priority inheritance from a mutex. 2. k_thread_priority_set()'s check for sched_locked only applies to non-SMP builds that are using the old arch_swap() framework to switch between threads. Incidentally, nearly all calls to flag_ipi() are now performed with sched_spinlock being locked. The only exception is in slice_timeout(). Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
afb2791ccf
commit
9ff5221d23
1 changed files with 35 additions and 13 deletions
|
@ -348,11 +348,11 @@ static void update_cache(int preempt_ok)
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool thread_active_elsewhere(struct k_thread *thread)
|
static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
/* True if the thread is currently running on another CPU.
|
/* Returns pointer to _cpu if the thread is currently running on
|
||||||
* There are more scalable designs to answer this question in
|
* another CPU. There are more scalable designs to answer this
|
||||||
* constant time, but this is fine for now.
|
* question in constant time, but this is fine for now.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int currcpu = _current_cpu->id;
|
int currcpu = _current_cpu->id;
|
||||||
|
@ -362,12 +362,12 @@ static bool thread_active_elsewhere(struct k_thread *thread)
|
||||||
for (int i = 0; i < num_cpus; i++) {
|
for (int i = 0; i < num_cpus; i++) {
|
||||||
if ((i != currcpu) &&
|
if ((i != currcpu) &&
|
||||||
(_kernel.cpus[i].current == thread)) {
|
(_kernel.cpus[i].current == thread)) {
|
||||||
return true;
|
return &_kernel.cpus[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
ARG_UNUSED(thread);
|
ARG_UNUSED(thread);
|
||||||
return false;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ready_thread(struct k_thread *thread)
|
static void ready_thread(struct k_thread *thread)
|
||||||
|
@ -390,7 +390,7 @@ static void ready_thread(struct k_thread *thread)
|
||||||
|
|
||||||
void z_ready_thread_locked(struct k_thread *thread)
|
void z_ready_thread_locked(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
if (!thread_active_elsewhere(thread)) {
|
if (thread_active_elsewhere(thread) == NULL) {
|
||||||
ready_thread(thread);
|
ready_thread(thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -398,7 +398,7 @@ void z_ready_thread_locked(struct k_thread *thread)
|
||||||
void z_ready_thread(struct k_thread *thread)
|
void z_ready_thread(struct k_thread *thread)
|
||||||
{
|
{
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
if (!thread_active_elsewhere(thread)) {
|
if (thread_active_elsewhere(thread) == NULL) {
|
||||||
ready_thread(thread);
|
ready_thread(thread);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -466,7 +466,10 @@ static void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
|
||||||
* halt itself in the IPI. Otherwise it's unscheduled, so we
|
* halt itself in the IPI. Otherwise it's unscheduled, so we
|
||||||
* can clean it up directly.
|
* can clean it up directly.
|
||||||
*/
|
*/
|
||||||
if (thread_active_elsewhere(thread)) {
|
|
||||||
|
struct _cpu *cpu = thread_active_elsewhere(thread);
|
||||||
|
|
||||||
|
if (cpu != NULL) {
|
||||||
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
|
thread->base.thread_state |= (terminate ? _THREAD_ABORTING
|
||||||
: _THREAD_SUSPENDING);
|
: _THREAD_SUSPENDING);
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
|
||||||
|
@ -731,19 +734,38 @@ void z_unpend_thread(struct k_thread *thread)
|
||||||
bool z_thread_prio_set(struct k_thread *thread, int prio)
|
bool z_thread_prio_set(struct k_thread *thread, int prio)
|
||||||
{
|
{
|
||||||
bool need_sched = 0;
|
bool need_sched = 0;
|
||||||
|
int old_prio = thread->base.prio;
|
||||||
|
|
||||||
K_SPINLOCK(&_sched_spinlock) {
|
K_SPINLOCK(&_sched_spinlock) {
|
||||||
need_sched = z_is_thread_ready(thread);
|
need_sched = z_is_thread_ready(thread);
|
||||||
|
|
||||||
if (need_sched) {
|
if (need_sched) {
|
||||||
/* Don't requeue on SMP if it's the running thread */
|
|
||||||
if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
|
if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
|
||||||
dequeue_thread(thread);
|
dequeue_thread(thread);
|
||||||
thread->base.prio = prio;
|
thread->base.prio = prio;
|
||||||
queue_thread(thread);
|
queue_thread(thread);
|
||||||
|
|
||||||
|
if (old_prio > prio) {
|
||||||
|
flag_ipi();
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
/*
|
||||||
|
* This is a running thread on SMP. Update its
|
||||||
|
* priority, but do not requeue it. An IPI is
|
||||||
|
* needed if the priority is both being lowered
|
||||||
|
* and it is running on another CPU.
|
||||||
|
*/
|
||||||
|
|
||||||
thread->base.prio = prio;
|
thread->base.prio = prio;
|
||||||
|
|
||||||
|
struct _cpu *cpu;
|
||||||
|
|
||||||
|
cpu = thread_active_elsewhere(thread);
|
||||||
|
if ((cpu != NULL) && (old_prio < prio)) {
|
||||||
|
flag_ipi();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
update_cache(1);
|
update_cache(1);
|
||||||
} else {
|
} else {
|
||||||
thread->base.prio = prio;
|
thread->base.prio = prio;
|
||||||
|
@ -1006,8 +1028,8 @@ void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
|
||||||
|
|
||||||
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
|
||||||
|
|
||||||
flag_ipi();
|
if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
|
||||||
if (need_sched && (_current->base.sched_locked == 0U)) {
|
(_current->base.sched_locked == 0U))) {
|
||||||
z_reschedule_unlocked();
|
z_reschedule_unlocked();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1219,7 +1241,7 @@ void z_impl_k_wakeup(k_tid_t thread)
|
||||||
|
|
||||||
z_mark_thread_as_not_suspended(thread);
|
z_mark_thread_as_not_suspended(thread);
|
||||||
|
|
||||||
if (!thread_active_elsewhere(thread)) {
|
if (thread_active_elsewhere(thread) == NULL) {
|
||||||
ready_thread(thread);
|
ready_thread(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue