diff --git a/lib/posix/pthread_barrier.c b/lib/posix/pthread_barrier.c index 21262562ce9..dcf9287e206 100644 --- a/lib/posix/pthread_barrier.c +++ b/lib/posix/pthread_barrier.c @@ -9,9 +9,11 @@ #include #include +extern struct k_spinlock z_pthread_spinlock; + int pthread_barrier_wait(pthread_barrier_t *b) { - unsigned int key = irq_lock(); + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); int ret = 0; b->count++; @@ -22,10 +24,10 @@ int pthread_barrier_wait(pthread_barrier_t *b) while (z_waitq_head(&b->wait_q)) { _ready_one_thread(&b->wait_q); } - z_reschedule_irqlock(key); + z_reschedule(&z_pthread_spinlock, key); ret = PTHREAD_BARRIER_SERIAL_THREAD; } else { - (void) z_pend_curr_irqlock(key, &b->wait_q, K_FOREVER); + (void) z_pend_curr(&z_pthread_spinlock, key, &b->wait_q, K_FOREVER); } return ret; diff --git a/lib/posix/pthread_cond.c b/lib/posix/pthread_cond.c index e84f270cc3a..b397c4a7689 100644 --- a/lib/posix/pthread_cond.c +++ b/lib/posix/pthread_cond.c @@ -9,6 +9,8 @@ #include #include +extern struct k_spinlock z_pthread_spinlock; + int64_t timespec_to_timeoutms(const struct timespec *abstime); static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, @@ -16,12 +18,13 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, { __ASSERT(mut->lock_count == 1U, ""); - int ret, key = irq_lock(); + int ret; + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); mut->lock_count = 0U; mut->owner = NULL; _ready_one_thread(&mut->wait_q); - ret = z_pend_curr_irqlock(key, &cv->wait_q, timeout); + ret = z_pend_curr(&z_pthread_spinlock, key, &cv->wait_q, timeout); /* FIXME: this extra lock (and the potential context switch it * can cause) could be optimized out. At the point of the @@ -49,23 +52,23 @@ static int cond_wait(pthread_cond_t *cv, pthread_mutex_t *mut, int pthread_cond_signal(pthread_cond_t *cv) { - int key = irq_lock(); + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); _ready_one_thread(&cv->wait_q); - z_reschedule_irqlock(key); + z_reschedule(&z_pthread_spinlock, key); return 0; } int pthread_cond_broadcast(pthread_cond_t *cv) { - int key = irq_lock(); + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); while (z_waitq_head(&cv->wait_q)) { _ready_one_thread(&cv->wait_q); } - z_reschedule_irqlock(key); + z_reschedule(&z_pthread_spinlock, key); return 0; } diff --git a/lib/posix/pthread_mutex.c b/lib/posix/pthread_mutex.c index 3e541b27ecb..e56fca39492 100644 --- a/lib/posix/pthread_mutex.c +++ b/lib/posix/pthread_mutex.c @@ -9,6 +9,8 @@ #include #include +struct k_spinlock z_pthread_spinlock; + int64_t timespec_to_timeoutms(const struct timespec *abstime); #define MUTEX_MAX_REC_LOCK 32767 @@ -22,13 +24,14 @@ static const pthread_mutexattr_t def_attr = { static int acquire_mutex(pthread_mutex_t *m, k_timeout_t timeout) { - int rc = 0, key = irq_lock(); + int rc = 0; + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); if (m->lock_count == 0U && m->owner == NULL) { m->lock_count++; m->owner = pthread_self(); - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return 0; } else if (m->owner == pthread_self()) { if (m->type == PTHREAD_MUTEX_RECURSIVE && @@ -41,16 +44,16 @@ static int acquire_mutex(pthread_mutex_t *m, k_timeout_t timeout) rc = EINVAL; } - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return rc; } if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return EINVAL; } - rc = z_pend_curr_irqlock(key, &m->wait_q, timeout); + rc = z_pend_curr(&z_pthread_spinlock, key, &m->wait_q, timeout); if (rc != 0) { rc = ETIMEDOUT; } @@ -121,17 +124,17 @@ int pthread_mutex_lock(pthread_mutex_t *m) */ int pthread_mutex_unlock(pthread_mutex_t *m) { - unsigned int key = irq_lock(); + k_spinlock_key_t key = k_spin_lock(&z_pthread_spinlock); k_tid_t thread; if (m->owner != pthread_self()) { - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return EPERM; } if (m->lock_count == 0U) { - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return EINVAL; } @@ -144,13 +147,13 @@ int pthread_mutex_unlock(pthread_mutex_t *m) m->lock_count++; arch_thread_return_value_set(thread, 0); z_ready_thread(thread); - z_reschedule_irqlock(key); + z_reschedule(&z_pthread_spinlock, key); return 0; } m->owner = NULL; } - irq_unlock(key); + k_spin_unlock(&z_pthread_spinlock, key); return 0; }