posix: pthread: replace irq_lock with spinlock
We shouldn't use swapping with an interrupt lock held as it works incorrectly on SMP platforms. Fix that by replacing irq_lock with spinlock for pthread subsystem. NOTE: we fix that in a simple way with single spinlock for mutex / cond_var / barrier. That could be improved later (i.e. split it for several spinlocks). Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> Signed-off-by: Evgeniy Paltsev <PaltsevEvgeniy@gmail.com>
This commit is contained in:
parent
535fc38fe7
commit
4e0f7ea540
3 changed files with 27 additions and 19 deletions
|
@ -9,9 +9,11 @@
|
|||
#include <ksched.h>
|
||||
#include <wait_q.h>
|
||||
|
||||
extern struct k_spinlock z_pthread_spinlock;
|
||||
|
||||
int pthread_barrier_wait(pthread_barrier_t *b)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
int ret = 0;
|
||||
|
||||
b->count++;
|
||||
|
@ -22,10 +24,10 @@ int pthread_barrier_wait(pthread_barrier_t *b)
|
|||
while (z_waitq_head(&b->wait_q)) {
|
||||
_ready_one_thread(&b->wait_q);
|
||||
}
|
||||
z_reschedule_irqlock(key);
|
||||
z_reschedule(&z_pthread_spinlock, key);
|
||||
ret = PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
} else {
|
||||
(void) z_pend_curr_irqlock(key, &b->wait_q, K_FOREVER);
|
||||
(void) z_pend_curr(&z_pthread_spinlock, key, &b->wait_q, K_FOREVER);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include <wait_q.h>
|
||||
#include <posix/pthread.h>
|
||||
|
||||
extern struct k_spinlock z_pthread_spinlock;
|
||||
|
||||
int64_t timespec_to_timeoutms(const struct timespec *abstime);
|
||||
|
||||
static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
|
||||
|
@ -16,12 +18,13 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
|
|||
{
|
||||
__ASSERT(mut->lock_count == 1U, "");
|
||||
|
||||
int ret, key = irq_lock();
|
||||
int ret;
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
|
||||
mut->lock_count = 0U;
|
||||
mut->owner = NULL;
|
||||
_ready_one_thread(&mut->wait_q);
|
||||
ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout);
|
||||
ret = z_pend_curr(&z_pthread_spinlock, key, &cv->wait_q, timeout);
|
||||
|
||||
/* FIXME: this extra lock (and the potential context switch it
|
||||
* can cause) could be optimized out. At the point of the
|
||||
|
@ -49,23 +52,23 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut,
|
|||
|
||||
int pthread_cond_signal(pthread_cond_t *cv)
|
||||
{
|
||||
int key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
|
||||
_ready_one_thread(&cv->wait_q);
|
||||
z_reschedule_irqlock(key);
|
||||
z_reschedule(&z_pthread_spinlock, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int pthread_cond_broadcast(pthread_cond_t *cv)
|
||||
{
|
||||
int key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
|
||||
while (z_waitq_head(&cv->wait_q)) {
|
||||
_ready_one_thread(&cv->wait_q);
|
||||
}
|
||||
|
||||
z_reschedule_irqlock(key);
|
||||
z_reschedule(&z_pthread_spinlock, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include <wait_q.h>
|
||||
#include <posix/pthread.h>
|
||||
|
||||
struct k_spinlock z_pthread_spinlock;
|
||||
|
||||
int64_t timespec_to_timeoutms(const struct timespec *abstime);
|
||||
|
||||
#define MUTEX_MAX_REC_LOCK 32767
|
||||
|
@ -22,13 +24,14 @@ static const pthread_mutexattr_t def_attr = {
|
|||
|
||||
static int acquire_mutex(pthread_mutex_t *m, k_timeout_t timeout)
|
||||
{
|
||||
int rc = 0, key = irq_lock();
|
||||
int rc = 0;
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
|
||||
if (m->lock_count == 0U && m->owner == NULL) {
|
||||
m->lock_count++;
|
||||
m->owner = pthread_self();
|
||||
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return 0;
|
||||
} else if (m->owner == pthread_self()) {
|
||||
if (m->type == PTHREAD_MUTEX_RECURSIVE &&
|
||||
|
@ -41,16 +44,16 @@ static int acquire_mutex(pthread_mutex_t *m, k_timeout_t timeout)
|
|||
rc = EINVAL;
|
||||
}
|
||||
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
rc = z_pend_curr_irqlock(key, &m->wait_q, timeout);
|
||||
rc = z_pend_curr(&z_pthread_spinlock, key, &m->wait_q, timeout);
|
||||
if (rc != 0) {
|
||||
rc = ETIMEDOUT;
|
||||
}
|
||||
|
@ -121,17 +124,17 @@ int pthread_mutex_lock(pthread_mutex_t *m)
|
|||
*/
|
||||
int pthread_mutex_unlock(pthread_mutex_t *m)
|
||||
{
|
||||
unsigned int key = irq_lock();
|
||||
k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock);
|
||||
|
||||
k_tid_t thread;
|
||||
|
||||
if (m->owner != pthread_self()) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
if (m->lock_count == 0U) {
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
|
@ -144,13 +147,13 @@ int pthread_mutex_unlock(pthread_mutex_t *m)
|
|||
m->lock_count++;
|
||||
arch_thread_return_value_set(thread, 0);
|
||||
z_ready_thread(thread);
|
||||
z_reschedule_irqlock(key);
|
||||
z_reschedule(&z_pthread_spinlock, key);
|
||||
return 0;
|
||||
}
|
||||
m->owner = NULL;
|
||||
|
||||
}
|
||||
irq_unlock(key);
|
||||
k_spin_unlock(&z_pthread_spinlock, key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue