2017-12-28 14:31:57 +05:30
|
|
|
/*
|
|
|
|
* Copyright (c) 2018 Intel Corporation
|
2023-07-03 07:04:42 -04:00
|
|
|
* Copyright (c) 2023 Meta
|
2017-12-28 14:31:57 +05:30
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
2023-06-03 20:52:11 -04:00
|
|
|
#include "posix_internal.h"
|
|
|
|
#include "pthread_sched.h"
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2022-11-16 17:40:49 -05:00
|
|
|
#include <zephyr/init.h>
|
2022-05-06 11:23:05 +02:00
|
|
|
#include <zephyr/kernel.h>
|
2023-09-17 20:00:01 -04:00
|
|
|
#include <zephyr/logging/log.h>
|
2022-05-06 11:23:05 +02:00
|
|
|
#include <zephyr/sys/atomic.h>
|
|
|
|
#include <zephyr/posix/pthread.h>
|
|
|
|
#include <zephyr/sys/slist.h>
|
2023-11-22 22:23:44 -05:00
|
|
|
#include <zephyr/sys/util.h>
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
#define ZEPHYR_TO_POSIX_PRIORITY(_zprio) \
|
|
|
|
(((_zprio) < 0) ? (-1 * ((_zprio) + 1)) : (CONFIG_NUM_PREEMPT_PRIORITIES - (_zprio)-1))
|
|
|
|
|
|
|
|
#define POSIX_TO_ZEPHYR_PRIORITY(_prio, _pol) \
|
|
|
|
(((_pol) == SCHED_FIFO) ? (-1 * ((_prio) + 1)) \
|
|
|
|
: (CONFIG_NUM_PREEMPT_PRIORITIES - (_prio)-1))
|
|
|
|
|
|
|
|
#define DEFAULT_PTHREAD_PRIORITY \
|
|
|
|
POSIX_TO_ZEPHYR_PRIORITY(K_LOWEST_APPLICATION_THREAD_PRIO, DEFAULT_PTHREAD_POLICY)
|
|
|
|
#define DEFAULT_PTHREAD_POLICY (IS_ENABLED(CONFIG_PREEMPT_ENABLED) ? SCHED_RR : SCHED_FIFO)
|
|
|
|
|
2023-09-19 10:29:16 -04:00
|
|
|
LOG_MODULE_REGISTER(pthread, CONFIG_PTHREAD_LOG_LEVEL);
|
2023-09-17 20:00:01 -04:00
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
#ifdef CONFIG_DYNAMIC_THREAD_STACK_SIZE
|
|
|
|
#define DYNAMIC_STACK_SIZE CONFIG_DYNAMIC_THREAD_STACK_SIZE
|
|
|
|
#else
|
|
|
|
#define DYNAMIC_STACK_SIZE 0
|
|
|
|
#endif
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
/* The maximum allowed stack size (for this implementation) */
|
|
|
|
#define PTHREAD_STACK_MAX (UINT16_MAX + 1)
|
|
|
|
|
|
|
|
static inline size_t __get_attr_stacksize(const struct pthread_attr *attr)
|
|
|
|
{
|
|
|
|
return attr->stacksize + 1;
|
|
|
|
}
|
2023-11-22 22:23:44 -05:00
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
static inline void __set_attr_stacksize(struct pthread_attr *attr, size_t size)
|
|
|
|
{
|
|
|
|
attr->stacksize = size - 1;
|
|
|
|
}
|
2023-06-06 07:40:50 -04:00
|
|
|
|
2023-12-13 20:10:17 -05:00
|
|
|
static inline size_t __get_attr_guardsize(const struct pthread_attr *attr)
|
|
|
|
{
|
|
|
|
return (attr->guardsize_msbit * BIT(16)) | attr->guardsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __set_attr_guardsize(struct pthread_attr *attr, size_t size)
|
|
|
|
{
|
|
|
|
attr->guardsize_msbit = size == PTHREAD_STACK_MAX;
|
|
|
|
attr->guardsize = size & BIT_MASK(16);
|
|
|
|
}
|
|
|
|
|
2023-11-23 12:04:16 -05:00
|
|
|
struct __pthread_cleanup {
|
|
|
|
void (*routine)(void *arg);
|
|
|
|
void *arg;
|
|
|
|
sys_snode_t node;
|
|
|
|
};
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
enum posix_thread_qid {
|
|
|
|
/* ready to be started via pthread_create() */
|
|
|
|
POSIX_THREAD_READY_Q,
|
|
|
|
/* running */
|
|
|
|
POSIX_THREAD_RUN_Q,
|
|
|
|
/* exited (either joinable or detached) */
|
|
|
|
POSIX_THREAD_DONE_Q,
|
|
|
|
};
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
/* only 2 bits in struct pthread_attr for schedpolicy */
|
|
|
|
BUILD_ASSERT(SCHED_OTHER < BIT(2) && SCHED_FIFO < BIT(2) && SCHED_RR < BIT(2));
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
BUILD_ASSERT((PTHREAD_CREATE_DETACHED == 0 || PTHREAD_CREATE_JOINABLE == 0) &&
|
|
|
|
(PTHREAD_CREATE_DETACHED == 1 || PTHREAD_CREATE_JOINABLE == 1));
|
|
|
|
|
|
|
|
BUILD_ASSERT((PTHREAD_CANCEL_ENABLE == 0 || PTHREAD_CANCEL_DISABLE == 0) &&
|
|
|
|
(PTHREAD_CANCEL_ENABLE == 1 || PTHREAD_CANCEL_DISABLE == 1));
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
static void posix_thread_recycle(void);
|
2023-06-06 07:40:50 -04:00
|
|
|
static sys_dlist_t ready_q = SYS_DLIST_STATIC_INIT(&ready_q);
|
|
|
|
static sys_dlist_t run_q = SYS_DLIST_STATIC_INIT(&run_q);
|
|
|
|
static sys_dlist_t done_q = SYS_DLIST_STATIC_INIT(&done_q);
|
|
|
|
static struct posix_thread posix_thread_pool[CONFIG_MAX_PTHREAD_COUNT];
|
|
|
|
static struct k_spinlock pthread_pool_lock;
|
posix: pthread: implement pthread_getconcurrency()
Zephyr must support all functionality of the XSI_THREADS_EXT
subprofiling option group in order to claim it supports that
subprofiling option group.
The XSI_THREADS_EXT option group is critical to be able to
run POSIX threads with statically allocated thread stacks, which
has been a feature of the implementation since it was initially
added.
The pthread_getconcurrency() and pthread_setconcurrency()
functions are the only remaining, unimplemented functions of
the XSI_THREADS_EXT option group.
Implement pthread_getconcurrency() and pthread_setconcurrency()
via the more "posixly correct" interpretation of the
specification.
I.e. as the pthread_t:k_thread relationship is 1:1 and not M:N,
Zephyr does not support multiplexing of user threads on top of
schedulable kernel entities (i.e. "user threads" are directly
mapped to native threads, just like linuxthreads or NPTL are in
Linux).
For that reason, to be "posixly correct", we should save the
provided value via pthread_setconcurrency(), in the absense of
errors, and also return that same value back via
pthread_getconcurrency(), even though that serves zero purpose
in Zephyr for the foreseeable future.
Note: the specification also states
"an implementation can always ignore any calls to
pthread_setconcurrency() and return a constant for
pthread_getconcurrency()."
For that reason, the implementation may be revisited at a later
time when when considering optimizations and when there is a
better system in place for documenting deviations.
Any such optimization should be explicitly controlled via
Kconfig.
Signed-off-by: Christopher Friedt <cfriedt@meta.com>
2023-11-23 01:13:43 -05:00
|
|
|
static int pthread_concurrency;
|
2018-04-11 16:00:09 +05:30
|
|
|
|
2022-11-16 22:44:53 -05:00
|
|
|
static const struct pthread_attr init_pthread_attrs = {
|
2017-12-28 14:31:57 +05:30
|
|
|
.stack = NULL,
|
|
|
|
.stacksize = 0,
|
2023-12-13 20:10:17 -05:00
|
|
|
.guardsize = (BIT_MASK(16) & CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT),
|
2023-12-13 19:43:10 -05:00
|
|
|
.priority = DEFAULT_PTHREAD_PRIORITY,
|
|
|
|
.schedpolicy = DEFAULT_PTHREAD_POLICY,
|
2023-12-13 20:10:17 -05:00
|
|
|
.guardsize_msbit = (BIT(16) & CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT),
|
2017-12-28 14:31:57 +05:30
|
|
|
.initialized = true,
|
2023-12-13 19:43:10 -05:00
|
|
|
.cancelstate = PTHREAD_CANCEL_ENABLE,
|
|
|
|
.detachstate = PTHREAD_CREATE_JOINABLE,
|
2017-12-28 14:31:57 +05:30
|
|
|
};
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
/*
|
|
|
|
* We reserve the MSB to mark a pthread_t as initialized (from the
|
|
|
|
* perspective of the application). With a linear space, this means that
|
|
|
|
* the theoretical pthread_t range is [0,2147483647].
|
|
|
|
*/
|
|
|
|
BUILD_ASSERT(CONFIG_MAX_PTHREAD_COUNT < PTHREAD_OBJ_MASK_INIT,
|
|
|
|
"CONFIG_MAX_PTHREAD_COUNT is too high");
|
2018-08-29 11:47:48 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
static inline size_t posix_thread_to_offset(struct posix_thread *t)
|
2022-10-30 07:27:44 -04:00
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
return t - posix_thread_pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t get_posix_thread_idx(pthread_t pth)
|
|
|
|
{
|
|
|
|
return mark_pthread_obj_uninitialized(pth);
|
2022-10-30 07:27:44 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct posix_thread *to_posix_thread(pthread_t pthread)
|
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
|
|
|
bool actually_initialized;
|
|
|
|
size_t bit = get_posix_thread_idx(pthread);
|
|
|
|
|
|
|
|
/* if the provided thread does not claim to be initialized, its invalid */
|
|
|
|
if (!is_pthread_obj_initialized(pthread)) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("pthread is not initialized (%x)", pthread);
|
2023-06-06 07:40:50 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bit >= CONFIG_MAX_PTHREAD_COUNT) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid pthread (%x)", pthread);
|
2022-10-30 07:27:44 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = &posix_thread_pool[bit];
|
|
|
|
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
/*
|
|
|
|
* Denote a pthread as "initialized" (i.e. allocated) if it is not in ready_q.
|
|
|
|
* This differs from other posix object allocation strategies because they use
|
|
|
|
* a bitarray to indicate whether an object has been allocated.
|
|
|
|
*/
|
|
|
|
actually_initialized =
|
|
|
|
!(t->qid == POSIX_THREAD_READY_Q ||
|
|
|
|
(t->qid == POSIX_THREAD_DONE_Q && t->detachstate == PTHREAD_CREATE_DETACHED));
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
|
|
|
|
if (!actually_initialized) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Pthread claims to be initialized (%x)", pthread);
|
2023-06-06 07:40:50 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &posix_thread_pool[bit];
|
|
|
|
}
|
|
|
|
|
|
|
|
pthread_t pthread_self(void)
|
|
|
|
{
|
|
|
|
size_t bit;
|
|
|
|
struct posix_thread *t;
|
|
|
|
|
|
|
|
t = (struct posix_thread *)CONTAINER_OF(k_current_get(), struct posix_thread, thread);
|
|
|
|
bit = posix_thread_to_offset(t);
|
|
|
|
|
|
|
|
return mark_pthread_obj_initialized(bit);
|
2022-10-30 07:27:44 -04:00
|
|
|
}
|
|
|
|
|
2023-11-22 21:23:53 -05:00
|
|
|
int pthread_equal(pthread_t pt1, pthread_t pt2)
|
|
|
|
{
|
|
|
|
return (pt1 == pt2);
|
|
|
|
}
|
|
|
|
|
2023-11-23 12:04:16 -05:00
|
|
|
static inline void __z_pthread_cleanup_init(struct __pthread_cleanup *c, void (*routine)(void *arg),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
*c = (struct __pthread_cleanup){
|
|
|
|
.routine = routine,
|
|
|
|
.arg = arg,
|
|
|
|
.node = {0},
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
void __z_pthread_cleanup_push(void *cleanup[3], void (*routine)(void *arg), void *arg)
|
|
|
|
{
|
|
|
|
struct posix_thread *const t = to_posix_thread(pthread_self());
|
|
|
|
struct __pthread_cleanup *const c = (struct __pthread_cleanup *)cleanup;
|
|
|
|
|
|
|
|
BUILD_ASSERT(3 * sizeof(void *) == sizeof(*c));
|
|
|
|
__ASSERT_NO_MSG(t != NULL);
|
|
|
|
__ASSERT_NO_MSG(c != NULL);
|
|
|
|
__ASSERT_NO_MSG(routine != NULL);
|
|
|
|
__z_pthread_cleanup_init(c, routine, arg);
|
|
|
|
sys_slist_prepend(&t->cleanup_list, &c->node);
|
|
|
|
}
|
|
|
|
|
|
|
|
void __z_pthread_cleanup_pop(int execute)
|
|
|
|
{
|
|
|
|
sys_snode_t *node;
|
|
|
|
struct __pthread_cleanup *c;
|
|
|
|
struct posix_thread *const t = to_posix_thread(pthread_self());
|
|
|
|
|
|
|
|
__ASSERT_NO_MSG(t != NULL);
|
|
|
|
node = sys_slist_get(&t->cleanup_list);
|
|
|
|
__ASSERT_NO_MSG(node != NULL);
|
|
|
|
c = CONTAINER_OF(node, struct __pthread_cleanup, node);
|
|
|
|
__ASSERT_NO_MSG(c != NULL);
|
|
|
|
__ASSERT_NO_MSG(c->routine != NULL);
|
|
|
|
if (execute) {
|
|
|
|
c->routine(c->arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-25 07:40:59 -04:00
|
|
|
static bool is_posix_policy_prio_valid(uint32_t priority, int policy)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2018-01-13 18:51:15 +05:30
|
|
|
if (priority >= sched_get_priority_min(policy) &&
|
|
|
|
priority <= sched_get_priority_max(policy)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid piority %d and / or policy %d", priority, policy);
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static uint32_t zephyr_to_posix_priority(int32_t z_prio, int *policy)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
|
|
|
if (z_prio < 0) {
|
2023-12-13 19:43:10 -05:00
|
|
|
__ASSERT_NO_MSG(z_prio < CONFIG_NUM_COOP_PRIORITIES);
|
2017-12-28 14:31:57 +05:30
|
|
|
} else {
|
2023-12-13 19:43:10 -05:00
|
|
|
__ASSERT_NO_MSG(z_prio < CONFIG_NUM_PREEMPT_PRIORITIES);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
*policy = (z_prio < 0) ? SCHED_FIFO : SCHED_RR;
|
|
|
|
return ZEPHYR_TO_POSIX_PRIORITY(z_prio);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2020-05-27 11:26:57 -05:00
|
|
|
static int32_t posix_to_zephyr_priority(uint32_t priority, int policy)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
|
|
|
if (policy == SCHED_FIFO) {
|
2023-12-13 19:43:10 -05:00
|
|
|
/* COOP: highest [CONFIG_NUM_COOP_PRIORITIES, -1] lowest */
|
2023-04-23 22:30:11 -04:00
|
|
|
__ASSERT_NO_MSG(priority < CONFIG_NUM_COOP_PRIORITIES);
|
2017-12-28 14:31:57 +05:30
|
|
|
} else {
|
2023-12-13 19:43:10 -05:00
|
|
|
/* PREEMPT: lowest [0, CONFIG_NUM_PREEMPT_PRIORITIES - 1] highest */
|
2023-04-23 22:30:11 -04:00
|
|
|
__ASSERT_NO_MSG(priority < CONFIG_NUM_PREEMPT_PRIORITIES);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
return POSIX_TO_ZEPHYR_PRIORITY(priority, policy);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set scheduling parameter attributes in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param *schedparam)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
2019-02-18 14:58:10 -05:00
|
|
|
int priority = schedparam->sched_priority;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if (attr == NULL || !attr->initialized ||
|
2023-12-13 20:10:17 -05:00
|
|
|
!is_posix_policy_prio_valid(priority, attr->schedpolicy)) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid pthread_attr_t or sched_param");
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->priority = priority;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set stack attributes in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_setstack(pthread_attr_t *_attr, void *stackaddr, size_t stacksize)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
if (stackaddr == NULL) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("NULL stack address");
|
2017-12-28 14:31:57 +05:30
|
|
|
return EACCES;
|
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if (stacksize == 0 || stacksize < PTHREAD_STACK_MIN || stacksize > PTHREAD_STACK_MAX) {
|
|
|
|
LOG_ERR("Invalid stacksize %zu", stacksize);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
attr->stack = stackaddr;
|
2023-12-13 19:43:10 -05:00
|
|
|
__set_attr_stacksize(attr, stacksize);
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
static bool pthread_attr_is_valid(const struct pthread_attr *attr)
|
|
|
|
{
|
2020-10-22 22:41:58 -04:00
|
|
|
/* auto-alloc thread stack */
|
|
|
|
if (attr == NULL) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* caller-provided thread stack */
|
2023-12-13 19:43:10 -05:00
|
|
|
if (!attr->initialized || attr->stack == NULL || attr->stacksize == 0 ||
|
|
|
|
__get_attr_stacksize(attr) < PTHREAD_STACK_MIN) {
|
|
|
|
LOG_ERR("pthread_attr_t is not initialized, has a NULL stack, or invalid size");
|
2023-06-06 07:40:50 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* require a valid scheduler policy */
|
|
|
|
if (!valid_posix_policy(attr->schedpolicy)) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid scheduler policy %d", attr->schedpolicy);
|
2023-06-06 07:40:50 -04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
static void posix_thread_recycle_work_handler(struct k_work *work)
|
|
|
|
{
|
|
|
|
ARG_UNUSED(work);
|
|
|
|
posix_thread_recycle();
|
|
|
|
}
|
|
|
|
static K_WORK_DELAYABLE_DEFINE(posix_thread_recycle_work, posix_thread_recycle_work_handler);
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
static void posix_thread_finalize(struct posix_thread *t, void *retval)
|
|
|
|
{
|
|
|
|
sys_snode_t *node_l;
|
|
|
|
k_spinlock_key_t key;
|
|
|
|
pthread_key_obj *key_obj;
|
|
|
|
pthread_thread_data *thread_spec_data;
|
|
|
|
|
|
|
|
SYS_SLIST_FOR_EACH_NODE(&t->key_list, node_l) {
|
|
|
|
thread_spec_data = (pthread_thread_data *)node_l;
|
|
|
|
if (thread_spec_data != NULL) {
|
|
|
|
key_obj = thread_spec_data->key;
|
|
|
|
if (key_obj->destructor != NULL) {
|
|
|
|
(key_obj->destructor)(thread_spec_data->spec_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* move thread from run_q to done_q */
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
sys_dlist_remove(&t->q_node);
|
|
|
|
sys_dlist_append(&done_q, &t->q_node);
|
|
|
|
t->qid = POSIX_THREAD_DONE_Q;
|
|
|
|
t->retval = retval;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
/* trigger recycle work */
|
|
|
|
(void)k_work_schedule(&posix_thread_recycle_work, K_MSEC(CONFIG_PTHREAD_RECYCLER_DELAY_MS));
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
/* abort the underlying k_thread */
|
|
|
|
k_thread_abort(&t->thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
FUNC_NORETURN
|
2017-12-28 14:31:57 +05:30
|
|
|
static void zephyr_thread_wrapper(void *arg1, void *arg2, void *arg3)
|
|
|
|
{
|
2023-06-08 19:15:11 -04:00
|
|
|
int err;
|
|
|
|
int barrier;
|
2023-06-06 07:40:50 -04:00
|
|
|
void *(*fun_ptr)(void *arg) = arg2;
|
|
|
|
struct posix_thread *t = CONTAINER_OF(k_current_get(), struct posix_thread, thread);
|
|
|
|
|
2023-06-08 19:15:11 -04:00
|
|
|
if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
|
|
|
|
/* cross the barrier so that pthread_create() can continue */
|
|
|
|
barrier = POINTER_TO_UINT(arg3);
|
|
|
|
err = pthread_barrier_wait(&barrier);
|
|
|
|
__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
posix_thread_finalize(t, fun_ptr(arg1));
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
CODE_UNREACHABLE;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
static void posix_thread_recycle(void)
|
|
|
|
{
|
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
|
|
|
struct posix_thread *safe_t;
|
|
|
|
sys_dlist_t recyclables = SYS_DLIST_STATIC_INIT(&recyclables);
|
|
|
|
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER_SAFE(&done_q, t, safe_t, q_node) {
|
|
|
|
if (t->detachstate == PTHREAD_CREATE_JOINABLE) {
|
|
|
|
/* thread has not been joined yet */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
sys_dlist_remove(&t->q_node);
|
|
|
|
sys_dlist_append(&recyclables, &t->q_node);
|
|
|
|
}
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
|
|
|
|
if (sys_dlist_is_empty(&recyclables)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Recycling %zu threads", sys_dlist_len(&recyclables));
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
if (IS_ENABLED(CONFIG_DYNAMIC_THREAD)) {
|
|
|
|
SYS_DLIST_FOR_EACH_CONTAINER(&recyclables, t, q_node) {
|
|
|
|
if (t->dynamic_stack != NULL) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Freeing thread stack %p", t->dynamic_stack);
|
2020-10-22 22:41:58 -04:00
|
|
|
(void)k_thread_stack_free(t->dynamic_stack);
|
|
|
|
t->dynamic_stack = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
while (!sys_dlist_is_empty(&recyclables)) {
|
|
|
|
sys_dlist_append(&ready_q, sys_dlist_get(&recyclables));
|
|
|
|
}
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Create a new thread.
|
|
|
|
*
|
|
|
|
* Pthread attribute should not be NULL. API will return Error on NULL
|
|
|
|
* attribute value.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadroutine)(void *),
|
|
|
|
void *arg)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2023-06-08 19:15:11 -04:00
|
|
|
int err;
|
2022-11-06 08:10:39 -05:00
|
|
|
k_spinlock_key_t key;
|
2023-06-08 19:15:11 -04:00
|
|
|
pthread_barrier_t barrier;
|
2023-06-06 07:40:50 -04:00
|
|
|
struct posix_thread *t = NULL;
|
2020-10-22 22:41:58 -04:00
|
|
|
struct pthread_attr attr_storage = init_pthread_attrs;
|
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
if (!pthread_attr_is_valid(attr)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
if (attr == NULL) {
|
|
|
|
attr = &attr_storage;
|
2023-12-13 19:43:10 -05:00
|
|
|
BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX);
|
|
|
|
__set_attr_stacksize(attr, DYNAMIC_STACK_SIZE);
|
2023-12-13 20:10:17 -05:00
|
|
|
attr->stack = k_thread_stack_alloc(__get_attr_stacksize(attr) +
|
|
|
|
__get_attr_guardsize(attr),
|
2023-12-13 19:43:10 -05:00
|
|
|
k_is_user_context() ? K_USER : 0);
|
2020-10-22 22:41:58 -04:00
|
|
|
if (attr->stack == NULL) {
|
2023-12-13 19:43:10 -05:00
|
|
|
LOG_ERR("Unable to allocate stack of size %u", DYNAMIC_STACK_SIZE);
|
2020-10-22 22:41:58 -04:00
|
|
|
return EAGAIN;
|
|
|
|
}
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Allocated thread stack %p", attr->stack);
|
2020-10-22 22:41:58 -04:00
|
|
|
} else {
|
|
|
|
__ASSERT_NO_MSG(attr != &attr_storage);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* reclaim resources greedily */
|
|
|
|
posix_thread_recycle();
|
|
|
|
|
2022-11-06 08:10:39 -05:00
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
2023-06-06 07:40:50 -04:00
|
|
|
if (!sys_dlist_is_empty(&ready_q)) {
|
|
|
|
t = CONTAINER_OF(sys_dlist_get(&ready_q), struct posix_thread, q_node);
|
|
|
|
|
|
|
|
/* initialize thread state */
|
|
|
|
sys_dlist_append(&run_q, &t->q_node);
|
|
|
|
t->qid = POSIX_THREAD_RUN_Q;
|
|
|
|
t->detachstate = attr->detachstate;
|
2023-12-13 19:43:10 -05:00
|
|
|
t->cancel_state = attr->cancelstate;
|
2023-06-06 07:40:50 -04:00
|
|
|
t->cancel_pending = false;
|
|
|
|
sys_slist_init(&t->key_list);
|
2023-11-23 12:04:16 -05:00
|
|
|
sys_slist_init(&t->cleanup_list);
|
2020-10-22 22:41:58 -04:00
|
|
|
t->dynamic_stack = _attr == NULL ? attr->stack : NULL;
|
2018-09-27 11:42:46 +05:30
|
|
|
}
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
2018-09-27 11:42:46 +05:30
|
|
|
|
2020-10-22 22:41:58 -04:00
|
|
|
if (t == NULL) {
|
|
|
|
/* no threads are ready */
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("No threads are ready");
|
2020-10-22 22:41:58 -04:00
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
|
2023-06-08 19:15:11 -04:00
|
|
|
if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
|
|
|
|
err = pthread_barrier_init(&barrier, NULL, 2);
|
|
|
|
if (err != 0) {
|
2020-10-22 22:41:58 -04:00
|
|
|
if (t->dynamic_stack != NULL) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("freeing thread stack at %p", attr->stack);
|
2020-10-22 22:41:58 -04:00
|
|
|
(void)k_thread_stack_free(attr->stack);
|
|
|
|
}
|
|
|
|
|
2023-06-08 19:15:11 -04:00
|
|
|
/* cannot allocate barrier. move thread back to ready_q */
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
sys_dlist_remove(&t->q_node);
|
|
|
|
sys_dlist_append(&ready_q, &t->q_node);
|
|
|
|
t->qid = POSIX_THREAD_READY_Q;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
t = NULL;
|
|
|
|
}
|
|
|
|
}
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
/* spawn the thread */
|
|
|
|
k_thread_create(&t->thread, attr->stack, attr->stacksize, zephyr_thread_wrapper,
|
2023-06-08 19:15:11 -04:00
|
|
|
(void *)arg, threadroutine,
|
2023-12-13 19:43:10 -05:00
|
|
|
IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER) ? UINT_TO_POINTER(barrier) : NULL,
|
|
|
|
posix_to_zephyr_priority(attr->priority, attr->schedpolicy), 0, K_NO_WAIT);
|
2018-09-19 18:40:46 +05:30
|
|
|
|
2023-06-08 19:15:11 -04:00
|
|
|
if (IS_ENABLED(CONFIG_PTHREAD_CREATE_BARRIER)) {
|
|
|
|
/* wait for the spawned thread to cross our barrier */
|
|
|
|
err = pthread_barrier_wait(&barrier);
|
|
|
|
__ASSERT_NO_MSG(err == 0 || err == PTHREAD_BARRIER_SERIAL_THREAD);
|
|
|
|
err = pthread_barrier_destroy(&barrier);
|
|
|
|
__ASSERT_NO_MSG(err == 0);
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
/* finally provide the initialized thread to the caller */
|
|
|
|
*th = mark_pthread_obj_initialized(posix_thread_to_offset(t));
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Created pthread %p", &t->thread);
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
posix: pthread: implement pthread_getconcurrency()
Zephyr must support all functionality of the XSI_THREADS_EXT
subprofiling option group in order to claim it supports that
subprofiling option group.
The XSI_THREADS_EXT option group is critical to be able to
run POSIX threads with statically allocated thread stacks, which
has been a feature of the implementation since it was initially
added.
The pthread_getconcurrency() and pthread_setconcurrency()
functions are the only remaining, unimplemented functions of
the XSI_THREADS_EXT option group.
Implement pthread_getconcurrency() and pthread_setconcurrency()
via the more "posixly correct" interpretation of the
specification.
I.e. as the pthread_t:k_thread relationship is 1:1 and not M:N,
Zephyr does not support multiplexing of user threads on top of
schedulable kernel entities (i.e. "user threads" are directly
mapped to native threads, just like linuxthreads or NPTL are in
Linux).
For that reason, to be "posixly correct", we should save the
provided value via pthread_setconcurrency(), in the absense of
errors, and also return that same value back via
pthread_getconcurrency(), even though that serves zero purpose
in Zephyr for the foreseeable future.
Note: the specification also states
"an implementation can always ignore any calls to
pthread_setconcurrency() and return a constant for
pthread_getconcurrency()."
For that reason, the implementation may be revisited at a later
time when when considering optimizations and when there is a
better system in place for documenting deviations.
Any such optimization should be explicitly controlled via
Kconfig.
Signed-off-by: Christopher Friedt <cfriedt@meta.com>
2023-11-23 01:13:43 -05:00
|
|
|
int pthread_getconcurrency(void)
|
|
|
|
{
|
2023-11-27 18:14:45 -05:00
|
|
|
int ret = 0;
|
posix: pthread: implement pthread_getconcurrency()
Zephyr must support all functionality of the XSI_THREADS_EXT
subprofiling option group in order to claim it supports that
subprofiling option group.
The XSI_THREADS_EXT option group is critical to be able to
run POSIX threads with statically allocated thread stacks, which
has been a feature of the implementation since it was initially
added.
The pthread_getconcurrency() and pthread_setconcurrency()
functions are the only remaining, unimplemented functions of
the XSI_THREADS_EXT option group.
Implement pthread_getconcurrency() and pthread_setconcurrency()
via the more "posixly correct" interpretation of the
specification.
I.e. as the pthread_t:k_thread relationship is 1:1 and not M:N,
Zephyr does not support multiplexing of user threads on top of
schedulable kernel entities (i.e. "user threads" are directly
mapped to native threads, just like linuxthreads or NPTL are in
Linux).
For that reason, to be "posixly correct", we should save the
provided value via pthread_setconcurrency(), in the absense of
errors, and also return that same value back via
pthread_getconcurrency(), even though that serves zero purpose
in Zephyr for the foreseeable future.
Note: the specification also states
"an implementation can always ignore any calls to
pthread_setconcurrency() and return a constant for
pthread_getconcurrency()."
For that reason, the implementation may be revisited at a later
time when when considering optimizations and when there is a
better system in place for documenting deviations.
Any such optimization should be explicitly controlled via
Kconfig.
Signed-off-by: Christopher Friedt <cfriedt@meta.com>
2023-11-23 01:13:43 -05:00
|
|
|
|
|
|
|
K_SPINLOCK(&pthread_pool_lock) {
|
|
|
|
ret = pthread_concurrency;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_setconcurrency(int new_level)
|
|
|
|
{
|
|
|
|
if (new_level < 0) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_level > CONFIG_MP_MAX_NUM_CPUS) {
|
|
|
|
return EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
K_SPINLOCK(&pthread_pool_lock) {
|
|
|
|
pthread_concurrency = new_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Set cancelability State.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
|
|
|
int pthread_setcancelstate(int state, int *oldstate)
|
|
|
|
{
|
2022-11-06 09:00:52 -05:00
|
|
|
bool cancel_pending;
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
|
|
|
|
|
|
|
if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid pthread state %d", state);
|
2023-06-06 07:40:50 -04:00
|
|
|
return EINVAL;
|
|
|
|
}
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = to_posix_thread(pthread_self());
|
|
|
|
if (t == NULL) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
*oldstate = t->cancel_state;
|
|
|
|
t->cancel_state = state;
|
|
|
|
cancel_pending = t->cancel_pending;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2022-11-06 09:00:52 -05:00
|
|
|
if (state == PTHREAD_CANCEL_ENABLE && cancel_pending) {
|
2023-06-06 07:40:50 -04:00
|
|
|
posix_thread_finalize(t, PTHREAD_CANCELED);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-11-22 23:09:57 -05:00
|
|
|
/**
|
|
|
|
* @brief Set cancelability Type.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
|
|
|
int pthread_setcanceltype(int type, int *oldtype)
|
|
|
|
{
|
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
|
|
|
|
|
|
|
if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS) {
|
|
|
|
LOG_ERR("Invalid pthread cancel type %d", type);
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = to_posix_thread(pthread_self());
|
|
|
|
if (t == NULL) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
*oldtype = t->cancel_type;
|
|
|
|
t->cancel_type = type;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Cancel execution of a thread.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
|
|
|
int pthread_cancel(pthread_t pthread)
|
|
|
|
{
|
2018-04-05 23:05:41 +05:30
|
|
|
int cancel_state;
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = to_posix_thread(pthread);
|
|
|
|
if (t == NULL) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
t->cancel_pending = true;
|
|
|
|
cancel_state = t->cancel_state;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2018-04-05 23:05:41 +05:30
|
|
|
if (cancel_state == PTHREAD_CANCEL_ENABLE) {
|
2023-06-06 07:40:50 -04:00
|
|
|
posix_thread_finalize(t, PTHREAD_CANCELED);
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set thread scheduling policy and parameters.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
struct posix_thread *t = to_posix_thread(pthread);
|
2017-12-28 14:31:57 +05:30
|
|
|
int new_prio;
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
if (t == NULL) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
2023-04-25 07:40:59 -04:00
|
|
|
if (!valid_posix_policy(policy)) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid scheduler policy %d", policy);
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-04-25 07:40:59 -04:00
|
|
|
if (is_posix_policy_prio_valid(param->sched_priority, policy) == false) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2020-07-22 14:30:25 +02:00
|
|
|
new_prio = posix_to_zephyr_priority(param->sched_priority, policy);
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
k_thread_priority_set(&t->thread, new_prio);
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Initialise threads attribute object
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-12-13 19:43:10 -05:00
|
|
|
int pthread_attr_init(pthread_attr_t *_attr)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2023-12-13 19:43:10 -05:00
|
|
|
struct pthread_attr *const attr = (struct pthread_attr *)_attr;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2018-05-16 11:23:20 +05:30
|
|
|
if (attr == NULL) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Invalid attr pointer");
|
2018-05-16 11:23:20 +05:30
|
|
|
return ENOMEM;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
(void)memcpy(attr, &init_pthread_attrs, sizeof(struct pthread_attr));
|
2017-12-28 14:31:57 +05:30
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get thread scheduling policy and parameters
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
int pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2020-05-27 11:26:57 -05:00
|
|
|
uint32_t priority;
|
2023-06-06 07:40:50 -04:00
|
|
|
struct posix_thread *t;
|
2018-09-24 13:24:52 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = to_posix_thread(pthread);
|
|
|
|
if (t == NULL) {
|
2018-09-24 13:24:52 +05:30
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
priority = k_thread_priority_get(&t->thread);
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2019-02-18 14:58:10 -05:00
|
|
|
param->sched_priority = zephyr_to_posix_priority(priority, policy);
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-11 16:00:09 +05:30
|
|
|
/**
|
|
|
|
* @brief Dynamic package initialization
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
|
|
|
int pthread_once(pthread_once_t *once, void (*init_func)(void))
|
|
|
|
{
|
2023-07-07 07:34:53 -04:00
|
|
|
__unused int ret;
|
2023-12-11 23:05:28 -05:00
|
|
|
bool run_init_func = false;
|
|
|
|
struct pthread_once *const _once = (struct pthread_once *)once;
|
2023-07-07 07:34:53 -04:00
|
|
|
|
2023-12-11 23:05:28 -05:00
|
|
|
if (init_func == NULL) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2018-04-11 16:00:09 +05:30
|
|
|
|
2023-12-11 23:05:28 -05:00
|
|
|
K_SPINLOCK(&pthread_pool_lock) {
|
|
|
|
if (!_once->flag) {
|
|
|
|
run_init_func = true;
|
|
|
|
_once->flag = true;
|
|
|
|
}
|
2018-04-11 16:00:09 +05:30
|
|
|
}
|
|
|
|
|
2023-12-11 23:05:28 -05:00
|
|
|
if (run_init_func) {
|
|
|
|
init_func();
|
|
|
|
}
|
2018-04-11 16:00:09 +05:30
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Terminate calling thread.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
FUNC_NORETURN
|
2017-12-28 14:31:57 +05:30
|
|
|
void pthread_exit(void *retval)
|
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *self;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
self = to_posix_thread(pthread_self());
|
|
|
|
if (self == NULL) {
|
|
|
|
/* not a valid posix_thread */
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Aborting non-pthread %p", k_current_get());
|
2023-06-06 07:40:50 -04:00
|
|
|
k_thread_abort(k_current_get());
|
2023-10-19 16:06:28 -07:00
|
|
|
|
|
|
|
CODE_UNREACHABLE;
|
2018-04-11 16:00:09 +05:30
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
/* Mark a thread as cancellable before exiting */
|
2023-06-06 07:40:50 -04:00
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
self->cancel_state = PTHREAD_CANCEL_ENABLE;
|
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
2022-11-11 06:59:02 -05:00
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
posix_thread_finalize(self, retval);
|
|
|
|
CODE_UNREACHABLE;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Wait for a thread termination.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
int pthread_join(pthread_t pthread, void **status)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
struct posix_thread *t;
|
2023-09-25 17:15:53 +03:00
|
|
|
int ret;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
if (pthread == pthread_self()) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Pthread attempted to join itself (%x)", pthread);
|
2022-10-30 07:27:44 -04:00
|
|
|
return EDEADLK;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = to_posix_thread(pthread);
|
|
|
|
if (t == NULL) {
|
2022-10-30 07:27:44 -04:00
|
|
|
return ESRCH;
|
2018-09-27 22:22:17 +05:30
|
|
|
}
|
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Pthread %p joining..", &t->thread);
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
ret = 0;
|
2023-07-30 11:16:12 -04:00
|
|
|
K_SPINLOCK(&pthread_pool_lock)
|
|
|
|
{
|
|
|
|
if (t->detachstate != PTHREAD_CREATE_JOINABLE) {
|
|
|
|
ret = EINVAL;
|
2023-09-23 16:18:24 +03:00
|
|
|
K_SPINLOCK_BREAK;
|
2023-07-30 11:16:12 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (t->qid == POSIX_THREAD_READY_Q) {
|
|
|
|
/* in case thread has moved to ready_q between to_posix_thread() and here */
|
|
|
|
ret = ESRCH;
|
2023-09-23 16:18:24 +03:00
|
|
|
K_SPINLOCK_BREAK;
|
2023-07-30 11:16:12 -04:00
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
/*
|
|
|
|
* thread is joinable and is in run_q or done_q.
|
|
|
|
* let's ensure that the thread cannot be joined again after this point.
|
|
|
|
*/
|
|
|
|
t->detachstate = PTHREAD_CREATE_DETACHED;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
2023-09-25 17:06:12 +03:00
|
|
|
switch (ret) {
|
|
|
|
case ESRCH:
|
|
|
|
LOG_ERR("Pthread %p has already been joined", &t->thread);
|
2023-07-30 11:16:12 -04:00
|
|
|
return ret;
|
2023-09-25 17:06:12 +03:00
|
|
|
case EINVAL:
|
|
|
|
LOG_ERR("Pthread %p is not a joinable", &t->thread);
|
|
|
|
return ret;
|
|
|
|
case 0:
|
|
|
|
break;
|
2022-11-10 10:11:57 -05:00
|
|
|
}
|
|
|
|
|
2023-09-25 17:15:53 +03:00
|
|
|
ret = k_thread_join(&t->thread, K_FOREVER);
|
2023-07-30 11:16:12 -04:00
|
|
|
/* other possibilities? */
|
2023-09-25 17:15:53 +03:00
|
|
|
__ASSERT_NO_MSG(ret == 0);
|
2023-07-30 11:16:12 -04:00
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Joined pthread %p", &t->thread);
|
|
|
|
|
2023-07-30 11:16:12 -04:00
|
|
|
if (status != NULL) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_DBG("Writing status to %p", status);
|
2023-07-30 11:16:12 -04:00
|
|
|
*status = t->retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
posix_thread_recycle();
|
|
|
|
|
|
|
|
return 0;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Detach a thread.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2023-06-06 07:40:50 -04:00
|
|
|
int pthread_detach(pthread_t pthread)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2023-06-06 07:40:50 -04:00
|
|
|
int ret;
|
|
|
|
k_spinlock_key_t key;
|
|
|
|
struct posix_thread *t;
|
|
|
|
enum posix_thread_qid qid;
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
t = to_posix_thread(pthread);
|
|
|
|
if (t == NULL) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
key = k_spin_lock(&pthread_pool_lock);
|
|
|
|
qid = t->qid;
|
|
|
|
if (qid == POSIX_THREAD_READY_Q || t->detachstate != PTHREAD_CREATE_JOINABLE) {
|
2023-09-17 20:00:01 -04:00
|
|
|
LOG_ERR("Pthread %p cannot be detached", &t->thread);
|
2017-12-28 14:31:57 +05:30
|
|
|
ret = EINVAL;
|
2023-06-06 07:40:50 -04:00
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
t->detachstate = PTHREAD_CREATE_DETACHED;
|
2017-12-28 14:31:57 +05:30
|
|
|
}
|
2023-06-06 07:40:50 -04:00
|
|
|
k_spin_unlock(&pthread_pool_lock, key);
|
2017-12-28 14:31:57 +05:30
|
|
|
|
2023-09-17 20:00:01 -04:00
|
|
|
if (ret == 0) {
|
|
|
|
LOG_DBG("Pthread %p detached", &t->thread);
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get detach state attribute in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_getdetachstate(const pthread_attr_t *_attr, int *detachstate)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
const struct pthread_attr *attr = (const struct pthread_attr *)_attr;
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if ((attr == NULL) || (attr->initialized == false)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*detachstate = attr->detachstate;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set detach state attribute in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_setdetachstate(pthread_attr_t *_attr, int detachstate)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if ((attr == NULL) || (attr->initialized == false) ||
|
|
|
|
((detachstate != PTHREAD_CREATE_DETACHED) &&
|
|
|
|
(detachstate != PTHREAD_CREATE_JOINABLE))) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->detachstate = detachstate;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get scheduling policy attribute in Thread attributes.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_getschedpolicy(const pthread_attr_t *_attr, int *policy)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
const struct pthread_attr *attr = (const struct pthread_attr *)_attr;
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
if ((attr == NULL) || (attr->initialized == 0U)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*policy = attr->schedpolicy;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set scheduling policy attribute in Thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_setschedpolicy(pthread_attr_t *_attr, int policy)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2023-04-25 07:40:59 -04:00
|
|
|
if ((attr == NULL) || (attr->initialized == 0U) || !valid_posix_policy(policy)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr->schedpolicy = policy;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get stack size attribute in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_getstacksize(const pthread_attr_t *_attr, size_t *stacksize)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
const struct pthread_attr *attr = (const struct pthread_attr *)_attr;
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if ((attr == NULL) || (attr->initialized == false)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
*stacksize = __get_attr_stacksize(attr);
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-04-10 12:35:15 -04:00
|
|
|
/**
|
|
|
|
* @brief Set stack size attribute in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_setstacksize(pthread_attr_t *_attr, size_t stacksize)
|
2022-04-10 12:35:15 -04:00
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2022-04-10 12:35:15 -04:00
|
|
|
if ((attr == NULL) || (attr->initialized == 0U)) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if (stacksize == 0 || stacksize < PTHREAD_STACK_MIN || stacksize > PTHREAD_STACK_MAX) {
|
2022-04-10 12:35:15 -04:00
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
__set_attr_stacksize(attr, stacksize);
|
2022-04-10 12:35:15 -04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Get stack attributes in thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t *stacksize)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
const struct pthread_attr *attr = (const struct pthread_attr *)_attr;
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if ((attr == NULL) || (attr->initialized == false)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*stackaddr = attr->stack;
|
2023-12-13 19:43:10 -05:00
|
|
|
*stacksize = __get_attr_stacksize(attr);
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-12-13 20:10:17 -05:00
|
|
|
int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize)
|
|
|
|
{
|
|
|
|
struct pthread_attr *const attr = (struct pthread_attr *)_attr;
|
|
|
|
|
|
|
|
if (attr == NULL || guardsize == NULL || !attr->initialized) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*guardsize = __get_attr_guardsize(attr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize)
|
|
|
|
{
|
|
|
|
struct pthread_attr *const attr = (struct pthread_attr *)_attr;
|
|
|
|
|
|
|
|
if (attr == NULL || !attr->initialized || guardsize > PTHREAD_STACK_MAX) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
__set_attr_guardsize(attr, guardsize);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-28 14:31:57 +05:30
|
|
|
/**
|
|
|
|
* @brief Get thread attributes object scheduling parameters.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_getschedparam(const pthread_attr_t *_attr, struct sched_param *schedparam)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2023-12-13 19:43:10 -05:00
|
|
|
if ((attr == NULL) || (attr->initialized == false)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-02-18 14:58:10 -05:00
|
|
|
schedparam->sched_priority = attr->priority;
|
2017-12-28 14:31:57 +05:30
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Destroy thread attributes object.
|
|
|
|
*
|
|
|
|
* See IEEE 1003.1
|
|
|
|
*/
|
2022-11-16 22:44:53 -05:00
|
|
|
int pthread_attr_destroy(pthread_attr_t *_attr)
|
2017-12-28 14:31:57 +05:30
|
|
|
{
|
2022-11-16 22:44:53 -05:00
|
|
|
struct pthread_attr *attr = (struct pthread_attr *)_attr;
|
|
|
|
|
2019-03-26 19:57:45 -06:00
|
|
|
if ((attr != NULL) && (attr->initialized != 0U)) {
|
2017-12-28 14:31:57 +05:30
|
|
|
attr->initialized = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return EINVAL;
|
|
|
|
}
|
2020-02-14 15:12:03 -05:00
|
|
|
|
|
|
|
int pthread_setname_np(pthread_t thread, const char *name)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_THREAD_NAME
|
2022-10-30 07:27:44 -04:00
|
|
|
k_tid_t kthread;
|
2020-02-14 15:12:03 -05:00
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
thread = get_posix_thread_idx(thread);
|
2022-10-30 07:27:44 -04:00
|
|
|
if (thread >= CONFIG_MAX_PTHREAD_COUNT) {
|
2020-02-14 15:12:03 -05:00
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
2022-10-30 07:27:44 -04:00
|
|
|
kthread = &posix_thread_pool[thread].thread;
|
|
|
|
|
2020-02-14 15:12:03 -05:00
|
|
|
if (name == NULL) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return k_thread_name_set(kthread, name);
|
|
|
|
#else
|
|
|
|
ARG_UNUSED(thread);
|
|
|
|
ARG_UNUSED(name);
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
int pthread_getname_np(pthread_t thread, char *name, size_t len)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_THREAD_NAME
|
2022-10-30 07:27:44 -04:00
|
|
|
k_tid_t kthread;
|
2020-02-14 15:12:03 -05:00
|
|
|
|
2023-06-06 07:40:50 -04:00
|
|
|
thread = get_posix_thread_idx(thread);
|
2022-10-30 07:27:44 -04:00
|
|
|
if (thread >= CONFIG_MAX_PTHREAD_COUNT) {
|
2020-02-14 15:12:03 -05:00
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (name == NULL) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(name, '\0', len);
|
2022-10-30 07:27:44 -04:00
|
|
|
kthread = &posix_thread_pool[thread].thread;
|
2023-06-06 07:40:50 -04:00
|
|
|
return k_thread_name_copy(kthread, name, len - 1);
|
2020-02-14 15:12:03 -05:00
|
|
|
#else
|
|
|
|
ARG_UNUSED(thread);
|
|
|
|
ARG_UNUSED(name);
|
|
|
|
ARG_UNUSED(len);
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
2022-11-16 17:40:49 -05:00
|
|
|
|
2023-11-22 21:29:00 -05:00
|
|
|
int pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
|
|
|
|
{
|
|
|
|
ARG_UNUSED(prepare);
|
|
|
|
ARG_UNUSED(parent);
|
|
|
|
ARG_UNUSED(child);
|
|
|
|
|
|
|
|
return ENOSYS;
|
|
|
|
}
|
|
|
|
|
2023-12-02 12:26:13 -05:00
|
|
|
/* this should probably go into signal.c but we need access to the lock */
|
|
|
|
int pthread_sigmask(int how, const sigset_t *ZRESTRICT set, sigset_t *ZRESTRICT oset)
|
|
|
|
{
|
|
|
|
struct posix_thread *t;
|
|
|
|
|
|
|
|
if (!(how == SIG_BLOCK || how == SIG_SETMASK || how == SIG_UNBLOCK)) {
|
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
t = to_posix_thread(pthread_self());
|
|
|
|
if (t == NULL) {
|
|
|
|
return ESRCH;
|
|
|
|
}
|
|
|
|
|
|
|
|
K_SPINLOCK(&pthread_pool_lock) {
|
|
|
|
if (oset != NULL) {
|
|
|
|
*oset = t->sigset;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (set == NULL) {
|
|
|
|
K_SPINLOCK_BREAK;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (how) {
|
|
|
|
case SIG_BLOCK:
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
|
|
|
|
t->sigset.sig[i] |= set->sig[i];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SIG_SETMASK:
|
|
|
|
t->sigset = *set;
|
|
|
|
break;
|
|
|
|
case SIG_UNBLOCK:
|
|
|
|
for (size_t i = 0; i < ARRAY_SIZE(set->sig); ++i) {
|
|
|
|
t->sigset.sig[i] &= ~set->sig[i];
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
init: remove the need for a dummy device pointer in SYS_INIT functions
The init infrastructure, found in `init.h`, is currently used by:
- `SYS_INIT`: to call functions before `main`
- `DEVICE_*`: to initialize devices
They are all sorted according to an initialization level + a priority.
`SYS_INIT` calls are really orthogonal to devices, however, the required
function signature requires a `const struct device *dev` as a first
argument. The only reason for that is because the same init machinery is
used by devices, so we have something like:
```c
struct init_entry {
int (*init)(const struct device *dev);
/* only set by DEVICE_*, otherwise NULL */
const struct device *dev;
}
```
As a result, we end up with such weird/ugly pattern:
```c
static int my_init(const struct device *dev)
{
/* always NULL! add ARG_UNUSED to avoid compiler warning */
ARG_UNUSED(dev);
...
}
```
This is really a result of poor internals isolation. This patch proposes
a to make init entries more flexible so that they can accept sytem
initialization calls like this:
```c
static int my_init(void)
{
...
}
```
This is achieved using a union:
```c
union init_function {
/* for SYS_INIT, used when init_entry.dev == NULL */
int (*sys)(void);
/* for DEVICE*, used when init_entry.dev != NULL */
int (*dev)(const struct device *dev);
};
struct init_entry {
/* stores init function (either for SYS_INIT or DEVICE*)
union init_function init_fn;
/* stores device pointer for DEVICE*, NULL for SYS_INIT. Allows
* to know which union entry to call.
*/
const struct device *dev;
}
```
This solution **does not increase ROM usage**, and allows to offer clean
public APIs for both SYS_INIT and DEVICE*. Note that however, init
machinery keeps a coupling with devices.
**NOTE**: This is a breaking change! All `SYS_INIT` functions will need
to be converted to the new signature. See the script offered in the
following commit.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
init: convert SYS_INIT functions to the new signature
Conversion scripted using scripts/utils/migrate_sys_init.py.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
manifest: update projects for SYS_INIT changes
Update modules with updated SYS_INIT calls:
- hal_ti
- lvgl
- sof
- TraceRecorderSource
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: devicetree: devices: adjust test
Adjust test according to the recently introduced SYS_INIT
infrastructure.
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
tests: kernel: threads: adjust SYS_INIT call
Adjust to the new signature: int (*init_fn)(void);
Signed-off-by: Gerard Marull-Paretas <gerard.marull@nordicsemi.no>
2022-10-19 09:33:44 +02:00
|
|
|
static int posix_thread_pool_init(void)
|
2022-11-16 17:40:49 -05:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < CONFIG_MAX_PTHREAD_COUNT; ++i) {
|
2023-06-06 07:40:50 -04:00
|
|
|
sys_dlist_append(&ready_q, &posix_thread_pool[i].q_node);
|
2022-11-16 17:40:49 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
SYS_INIT(posix_thread_pool_init, PRE_KERNEL_1, 0);
|