diff --git a/include/zephyr/posix/posix_types.h b/include/zephyr/posix/posix_types.h index 566fd63f1fb..fb77f191bac 100644 --- a/include/zephyr/posix/posix_types.h +++ b/include/zephyr/posix/posix_types.h @@ -39,8 +39,10 @@ typedef unsigned long timer_t; struct pthread_attr { void *stack; uint16_t stacksize; + uint16_t guardsize; int8_t priority; uint8_t schedpolicy: 2; + uint8_t guardsize_msbit: 1; bool initialized: 1; bool cancelstate: 1; bool detachstate: 1; diff --git a/include/zephyr/posix/pthread.h b/include/zephyr/posix/pthread.h index d380e691d66..55d617715f5 100644 --- a/include/zephyr/posix/pthread.h +++ b/include/zephyr/posix/pthread.h @@ -428,7 +428,9 @@ static inline int pthread_rwlockattr_init(pthread_rwlockattr_t *attr) return 0; } +int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT attr, size_t *ZRESTRICT guardsize); int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize); +int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize); int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize); int pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy); int pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy); diff --git a/lib/posix/Kconfig.pthread b/lib/posix/Kconfig.pthread index 388a30c5fa4..c27f55b68b9 100644 --- a/lib/posix/Kconfig.pthread +++ b/lib/posix/Kconfig.pthread @@ -27,4 +27,17 @@ config PTHREAD_RECYCLER_DELAY_MS Note: this option should be considered temporary and will likely be removed once a more synchronous solution is available. +config POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT + int "Default size of stack guard area" + range 0 65536 + default 0 + help + This is the default amount of space to reserve at the overflow end of a + pthread stack. Since Zephyr already supports both software-based stack + protection (canaries) and hardware-based stack protection (MMU or MPU), + this is set to 0 by default. However, a conforming application would be + required to set this to PAGESIZE. Eventually, this option might + facilitate a more dynamic approach to guard areas (via software or + hardware) but for now it simply increases the size of thread stacks. + endif diff --git a/lib/posix/pthread.c b/lib/posix/pthread.c index 207236e085c..a1f74908f13 100644 --- a/lib/posix/pthread.c +++ b/lib/posix/pthread.c @@ -50,6 +50,17 @@ static inline void __set_attr_stacksize(struct pthread_attr *attr, size_t size) attr->stacksize = size - 1; } +static inline size_t __get_attr_guardsize(const struct pthread_attr *attr) +{ + return (attr->guardsize_msbit * BIT(16)) | attr->guardsize; +} + +static inline void __set_attr_guardsize(struct pthread_attr *attr, size_t size) +{ + attr->guardsize_msbit = size == PTHREAD_STACK_MAX; + attr->guardsize = size & BIT_MASK(16); +} + struct __pthread_cleanup { void (*routine)(void *arg); void *arg; @@ -85,8 +96,10 @@ static int pthread_concurrency; static const struct pthread_attr init_pthread_attrs = { .stack = NULL, .stacksize = 0, + .guardsize = (BIT_MASK(16) & CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT), .priority = DEFAULT_PTHREAD_PRIORITY, .schedpolicy = DEFAULT_PTHREAD_POLICY, + .guardsize_msbit = (BIT(16) & CONFIG_POSIX_PTHREAD_ATTR_GUARDSIZE_DEFAULT), .initialized = true, .cancelstate = PTHREAD_CANCEL_ENABLE, .detachstate = PTHREAD_CREATE_JOINABLE, @@ -253,7 +266,7 @@ int pthread_attr_setschedparam(pthread_attr_t *_attr, const struct sched_param * int priority = schedparam->sched_priority; if (attr == NULL || !attr->initialized || - is_posix_policy_prio_valid(priority, attr->schedpolicy == false)) { + !is_posix_policy_prio_valid(priority, attr->schedpolicy)) { LOG_ERR("Invalid pthread_attr_t or sched_param"); return EINVAL; } @@ -436,7 +449,8 @@ int pthread_create(pthread_t *th, const pthread_attr_t *_attr, void *(*threadrou attr = &attr_storage; BUILD_ASSERT(DYNAMIC_STACK_SIZE <= PTHREAD_STACK_MAX); __set_attr_stacksize(attr, DYNAMIC_STACK_SIZE); - attr->stack = k_thread_stack_alloc(__get_attr_stacksize(attr), + attr->stack = k_thread_stack_alloc(__get_attr_stacksize(attr) + + __get_attr_guardsize(attr), k_is_user_context() ? K_USER : 0); if (attr->stack == NULL) { LOG_ERR("Unable to allocate stack of size %u", DYNAMIC_STACK_SIZE); @@ -988,6 +1002,32 @@ int pthread_attr_getstack(const pthread_attr_t *_attr, void **stackaddr, size_t return 0; } +int pthread_attr_getguardsize(const pthread_attr_t *ZRESTRICT _attr, size_t *ZRESTRICT guardsize) +{ + struct pthread_attr *const attr = (struct pthread_attr *)_attr; + + if (attr == NULL || guardsize == NULL || !attr->initialized) { + return EINVAL; + } + + *guardsize = __get_attr_guardsize(attr); + + return 0; +} + +int pthread_attr_setguardsize(pthread_attr_t *_attr, size_t guardsize) +{ + struct pthread_attr *const attr = (struct pthread_attr *)_attr; + + if (attr == NULL || !attr->initialized || guardsize > PTHREAD_STACK_MAX) { + return EINVAL; + } + + __set_attr_guardsize(attr, guardsize); + + return 0; +} + /** * @brief Get thread attributes object scheduling parameters. *