tests: newlib: thread_safety: Add internal lock and userspace tests

This commit adds the tests for the newlib retargetable locking
interface, as well as the tests for the internal lock functions that
are supposed to internally invoke the retargetable locking interface.

All of these tests must pass when the toolchain newlib is compiled with
the `retargetable-locking` and `multithread` options, which are
required to ensure that the newlib is thread-safe, enabled. If the
toolchain newlib is compiled with either of these options disabled,
this test will fail.

This commit also adds the userspace testcases to ensure that the newlib
is thread-safe in the user mode.

Signed-off-by: Stephanos Ioannidis <root@stephanos.io>
This commit is contained in:
Stephanos Ioannidis 2021-06-12 22:30:48 +09:00 committed by Kumar Gala
commit fb4325ea32
8 changed files with 668 additions and 91 deletions

View file

@ -4,5 +4,14 @@ cmake_minimum_required(VERSION 3.13.1)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(newlib_thread_safety)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})
target_sources(app PRIVATE src/main.c)
target_sources_ifdef(
CONFIG_NEWLIB_THREAD_SAFETY_TEST_LOCKS
app PRIVATE src/locks.c
)
target_sources_ifdef(
CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS
app PRIVATE src/stress.c
)

View file

@ -0,0 +1,11 @@
# Copyright (c) 2021 Stephanos Ioannidis <root@stephanos.io>
# SPDX-License-Identifier: Apache-2.0
config NEWLIB_THREAD_SAFETY_TEST_LOCKS
bool "Test: Locks"
default y
config NEWLIB_THREAD_SAFETY_TEST_STRESS
bool "Test: Stress"
source "Kconfig"

View file

@ -1,3 +1,4 @@
CONFIG_ZTEST=y
CONFIG_NEWLIB_LIBC=y
CONFIG_NEWLIB_LIBC_NANO=n
CONFIG_TIMESLICE_SIZE=1

View file

@ -0,0 +1,8 @@
CONFIG_ZTEST=y
CONFIG_NEWLIB_LIBC=y
CONFIG_NEWLIB_LIBC_NANO=n
CONFIG_TIMESLICE_SIZE=1
CONFIG_TEST_USERSPACE=y
CONFIG_MAX_THREAD_BYTES=10
CONFIG_HEAP_MEM_POOL_SIZE=2048

View file

@ -0,0 +1,474 @@
/*
* Copyright (c) 2021 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* @file Newlib thread-safety lock test
*
* This file contains a set of tests to verify that the newlib retargetable
* locking interface is functional and the internal newlib locks function as
* intended.
*/
#include <zephyr.h>
#include <ztest.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#include <envlock.h>
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#ifdef CONFIG_USERSPACE
#define THREAD_OPT (K_USER | K_INHERIT_PERMS)
#else
#define THREAD_OPT (0)
#endif /* CONFIG_USERSPACE */
static struct k_thread tdata;
static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
/* Newlib internal lock functions */
extern void __sfp_lock_acquire(void);
extern void __sfp_lock_release(void);
extern void __sinit_lock_acquire(void);
extern void __sinit_lock_release(void);
extern void __tz_lock(void);
extern void __tz_unlock(void);
/* Static locks */
extern struct k_mutex __lock___sinit_recursive_mutex;
extern struct k_mutex __lock___sfp_recursive_mutex;
extern struct k_mutex __lock___atexit_recursive_mutex;
extern struct k_mutex __lock___malloc_recursive_mutex;
extern struct k_mutex __lock___env_recursive_mutex;
extern struct k_sem __lock___at_quick_exit_mutex;
extern struct k_sem __lock___tz_mutex;
extern struct k_sem __lock___dd_hash_mutex;
extern struct k_sem __lock___arc4random_mutex;
/**
* @brief Test retargetable locking non-recursive (semaphore) interface
*
* This test verifies that a non-recursive lock (semaphore) can be dynamically
* created, acquired, released and closed through the retargetable locking
* interface.
*/
static void test_retargetable_lock_sem(void)
{
_LOCK_T lock = NULL;
/* Dynamically allocate and initialise a new lock */
__retarget_lock_init(&lock);
zassert_not_null(lock, "non-recursive lock init failed");
/* Acquire lock and verify acquisition */
__retarget_lock_acquire(lock);
zassert_equal(__retarget_lock_try_acquire(lock), 0,
"non-recursive lock acquisition failed");
/* Release lock and verify release */
__retarget_lock_release(lock);
zassert_not_equal(__retarget_lock_try_acquire(lock), 0,
"non-recursive lock release failed");
/* Close and deallocate lock */
__retarget_lock_close(lock);
}
static void retargetable_lock_mutex_thread_acq(void *p1, void *p2, void *p3)
{
_LOCK_T lock = p1;
int ret;
/*
* Attempt to lock the recursive lock from child thread and verify
* that it fails.
*/
ret = __retarget_lock_try_acquire_recursive(lock);
zassert_equal(ret, 0, "recursive lock acquisition failed");
}
static void retargetable_lock_mutex_thread_rel(void *p1, void *p2, void *p3)
{
_LOCK_T lock = p1;
int ret;
/*
* Attempt to lock the recursive lock from child thread and verify
* that it fails.
*/
ret = __retarget_lock_try_acquire_recursive(lock);
zassert_not_equal(ret, 0, "recursive lock release failed");
}
/**
* @brief Test retargetable locking recursive (mutex) interface
*
* This test verifies that a recursive lock (mutex) can be dynamically created,
* acquired, released, and closed through the retargetable locking interface.
*/
static void test_retargetable_lock_mutex(void)
{
_LOCK_T lock = NULL;
k_tid_t tid;
/* Dynamically allocate and initialise a new lock */
__retarget_lock_init_recursive(&lock);
zassert_not_null(lock, "recursive lock init failed");
/* Acquire lock from parent thread */
__retarget_lock_acquire_recursive(lock);
/* Spawn a lock acquisition check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
retargetable_lock_mutex_thread_acq, lock,
NULL, NULL, K_PRIO_PREEMPT(0), THREAD_OPT,
K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Release lock from parent thread */
__retarget_lock_release_recursive(lock);
/* Spawn a lock release check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
retargetable_lock_mutex_thread_rel, lock,
NULL, NULL, K_PRIO_PREEMPT(0), THREAD_OPT,
K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Close and deallocate lock */
__retarget_lock_close_recursive(lock);
}
static void sinit_lock_thread_acq(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the sinit mutex from child thread using
* retargetable locking interface. This operation should fail if the
* __sinit_lock_acquire() implementation internally uses the
* retargetable locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___sinit_recursive_mutex);
zassert_equal(ret, 0, "__sinit_lock_acquire() is not using "
"retargetable locking interface");
}
static void sinit_lock_thread_rel(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the sinit mutex from child thread using
* retargetable locking interface. This operation should succeed if the
* __sinit_lock_release() implementation internally uses the
* retargetable locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___sinit_recursive_mutex);
zassert_not_equal(ret, 0, "__sinit_lock_release() is not using "
"retargetable locking interface");
/* Release sinit lock */
__retarget_lock_release_recursive(
(_LOCK_T)&__lock___sinit_recursive_mutex);
}
/**
* @brief Test sinit lock functions
*
* This test calls the __sinit_lock_acquire() and __sinit_lock_release()
* functions to verify that sinit lock is functional and its implementation
* is provided by the retargetable locking interface.
*/
static void test_sinit_lock(void)
{
k_tid_t tid;
/* Lock the sinit mutex from parent thread */
__sinit_lock_acquire();
/* Spawn a lock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
sinit_lock_thread_acq, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Unlock the sinit mutex from parent thread */
__sinit_lock_release();
/* Spawn an unlock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
sinit_lock_thread_rel, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
}
static void sfp_lock_thread_acq(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the sfp mutex from child thread using retargetable
* locking interface. This operation should fail if the
* __sfp_lock_acquire() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___sfp_recursive_mutex);
zassert_equal(ret, 0, "__sfp_lock_acquire() is not using "
"retargetable locking interface");
}
static void sfp_lock_thread_rel(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the sfp mutex from child thread using retargetable
* locking interface. This operation should succeed if the
* __sfp_lock_release() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___sfp_recursive_mutex);
zassert_not_equal(ret, 0, "__sfp_lock_release() is not using "
"retargetable locking interface");
/* Release sfp lock */
__retarget_lock_release_recursive(
(_LOCK_T)&__lock___sfp_recursive_mutex);
}
/**
* @brief Test sfp lock functions
*
* This test calls the __sfp_lock_acquire() and __sfp_lock_release() functions
* to verify that sfp lock is functional and its implementation is provided by
* the retargetable locking interface.
*/
static void test_sfp_lock(void)
{
k_tid_t tid;
/* Lock the sfp mutex from parent thread */
__sfp_lock_acquire();
/* Spawn a lock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
sfp_lock_thread_acq, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Unlock the sfp mutex from parent thread */
__sfp_lock_release();
/* Spawn an unlock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
sfp_lock_thread_rel, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
}
static void malloc_lock_thread_lock(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the malloc mutex from child thread using
* retargetable locking interface. This operation should fail if the
* __malloc_lock() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___malloc_recursive_mutex);
zassert_equal(ret, 0, "__malloc_lock() is not using retargetable "
"locking interface");
}
static void malloc_lock_thread_unlock(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the malloc mutex from child thread using
* retargetable locking interface. This operation should succeed if the
* __malloc_unlock() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___malloc_recursive_mutex);
zassert_not_equal(ret, 0, "__malloc_unlock() is not using "
"retargetable locking interface");
/* Release malloc lock */
__retarget_lock_release_recursive(
(_LOCK_T)&__lock___malloc_recursive_mutex);
}
/**
* @brief Test malloc lock functions
*
* This test calls the __malloc_lock() and __malloc_unlock() functions to
* verify that malloc lock is functional and its implementation is provided by
* the retargetable locking interface.
*/
static void test_malloc_lock(void)
{
k_tid_t tid;
/* Lock the malloc mutex from parent thread */
__malloc_lock(_REENT);
/* Spawn a lock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
malloc_lock_thread_lock, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Unlock the malloc mutex from parent thread */
__malloc_unlock(_REENT);
/* Spawn an unlock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
malloc_lock_thread_unlock, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
}
static void env_lock_thread_lock(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the env mutex from child thread using
* retargetable locking interface. This operation should fail if the
* __env_lock() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___env_recursive_mutex);
zassert_equal(ret, 0, "__env_lock() is not using retargetable "
"locking interface");
}
static void env_lock_thread_unlock(void *p1, void *p2, void *p3)
{
int ret;
/*
* Attempt to lock the env mutex from child thread using
* retargetable locking interface. This operation should succeed if the
* __env_unlock() implementation internally uses the retargetable
* locking interface.
*/
ret = __retarget_lock_try_acquire_recursive(
(_LOCK_T)&__lock___env_recursive_mutex);
zassert_not_equal(ret, 0, "__env_unlock() is not using "
"retargetable locking interface");
/* Release env lock */
__retarget_lock_release_recursive(
(_LOCK_T)&__lock___env_recursive_mutex);
}
/**
* @brief Test env lock functions
*
* This test calls the __env_lock() and __env_unlock() functions to verify
* that env lock is functional and its implementation is provided by the
* retargetable locking interface.
*/
static void test_env_lock(void)
{
k_tid_t tid;
/* Lock the env mutex from parent thread */
__env_lock(_REENT);
/* Spawn a lock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
env_lock_thread_lock, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
/* Unlock the env mutex from parent thread */
__env_unlock(_REENT);
/* Spawn an unlock check thread and wait for exit */
tid = k_thread_create(&tdata, tstack, STACK_SIZE,
env_lock_thread_unlock, NULL, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT, K_NO_WAIT);
k_thread_join(tid, K_FOREVER);
}
/**
* @brief Test tz lock functions
*
* This test calls the __tz_lock() and __tz_unlock() functions to verify that
* tz lock is functional and its implementation is provided by the retargetable
* locking interface.
*/
static void test_tz_lock(void)
{
/* Lock the tz semaphore */
__tz_lock();
/* Attempt to acquire lock and verify failure */
zassert_equal(
__retarget_lock_try_acquire((_LOCK_T)&__lock___tz_mutex), 0,
"__tz_lock() is not using retargetable locking interface");
/* Unlock the tz semaphore */
__tz_unlock();
/* Attempt to acquire lock and verify success */
zassert_not_equal(
__retarget_lock_try_acquire((_LOCK_T)&__lock___tz_mutex), 0,
"__tz_unlock() is not using retargetable locking interface");
/* Clean up */
__retarget_lock_release((_LOCK_T)&__lock___tz_mutex);
}
void test_newlib_thread_safety_locks(void)
{
#ifdef CONFIG_USERSPACE
k_thread_access_grant(k_current_get(), &tdata, &tstack);
#endif /* CONFIG_USERSPACE */
ztest_test_suite(newlib_thread_safety_locks,
ztest_user_unit_test(test_retargetable_lock_sem),
ztest_user_unit_test(test_retargetable_lock_mutex),
ztest_user_unit_test(test_sinit_lock),
ztest_user_unit_test(test_sfp_lock),
ztest_user_unit_test(test_malloc_lock),
ztest_user_unit_test(test_env_lock),
ztest_user_unit_test(test_tz_lock));
ztest_run_test_suite(newlib_thread_safety_locks);
}

View file

@ -4,97 +4,23 @@
* SPDX-License-Identifier: Apache-2.0
*/
/*
* @file Newlib thread safety test
*
* This file contains a set of tests to verify that the C standard functions
* provided by newlib are thread safe (i.e. synchronised) and that the thread-
* specific contexts are properly handled (i.e. re-entrant).
*/
#include <zephyr.h>
#include <ztest.h>
#include <stdio.h>
#include <stdlib.h>
#define THREAD_COUNT (64)
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define TEST_INTERVAL (30) /* seconds */
static struct k_thread tdata[THREAD_COUNT];
static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREAD_COUNT, STACK_SIZE);
static void malloc_thread(void *p1, void *p2, void *p3)
{
static atomic_t count;
bool *aborted = p1;
int *volatile ptr;
int val;
while (*aborted == false) {
/* Compute unique value specific to this iteration. */
val = atomic_inc(&count);
/* Allocate memory block and write a unique value to it. */
ptr = malloc(sizeof(int));
zassert_not_null(ptr, "Out of memory");
*ptr = val;
/* Busy wait to increase the likelihood of preemption. */
k_busy_wait(10);
/*
* Verify that the unique value previously written to the
* memory block is valid. This value will become corrupted if
* the newlib heap is not properly synchronised.
*/
zassert_equal(*ptr, val, "Corrupted memory block");
/* Free memory block. */
free(ptr);
}
}
/**
* @brief Test thread safety of newlib memory management functions
*
* This test calls the malloc() and free() functions from multiple threads to
* verify that no corruption occurs in the newlib memory heap.
*/
static void test_malloc_thread_safety(void)
{
int i;
k_tid_t tid[THREAD_COUNT];
bool aborted = false;
/* Create worker threads. */
for (i = 0; i < ARRAY_SIZE(tid); i++) {
tid[i] = k_thread_create(&tdata[i], tstack[i], STACK_SIZE,
malloc_thread, &aborted, NULL, NULL,
K_PRIO_PREEMPT(0), 0, K_NO_WAIT);
}
TC_PRINT("Created %d worker threads.\n", THREAD_COUNT);
/* Wait and see if any failures occur. */
TC_PRINT("Waiting %d seconds to see if any failures occur ...\n",
TEST_INTERVAL);
k_sleep(K_SECONDS(TEST_INTERVAL));
/* Abort all worker threads. */
aborted = true;
for (i = 0; i < ARRAY_SIZE(tid); i++) {
k_thread_join(tid[i], K_FOREVER);
}
}
extern void test_newlib_thread_safety_locks(void);
extern void test_newlib_thread_safety_stress(void);
void test_main(void)
{
ztest_test_suite(newlib_thread_safety,
ztest_unit_test(test_malloc_thread_safety));
#ifdef CONFIG_USERSPACE
k_thread_system_pool_assign(k_current_get());
#endif /* CONFIG_USERSPACE */
ztest_run_test_suite(newlib_thread_safety);
/* Invoke testsuites */
#ifdef CONFIG_NEWLIB_THREAD_SAFETY_TEST_LOCKS
test_newlib_thread_safety_locks();
#endif /* CONFIG_NEWLIB_THREAD_SAFETY_TEST_LOCKS */
#ifdef CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS
test_newlib_thread_safety_stress();
#endif /* CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS */
}

View file

@ -0,0 +1,108 @@
/*
* Copyright (c) 2021 Stephanos Ioannidis <root@stephanos.io>
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
* @file Newlib thread-safety stress test
*
* This file contains a set of tests to verify that the C standard functions
* provided by newlib are thread safe (i.e. synchronised) and that the thread-
* specific contexts are properly handled (i.e. re-entrant).
*/
#include <zephyr.h>
#include <ztest.h>
#include <stdio.h>
#include <stdlib.h>
#include <malloc.h>
#define THREAD_COUNT (64)
#define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACKSIZE)
#define TEST_INTERVAL (30) /* seconds */
#ifdef CONFIG_USERSPACE
#define THREAD_OPT (K_USER | K_INHERIT_PERMS)
#else
#define THREAD_OPT (0)
#endif /* CONFIG_USERSPACE */
static struct k_thread tdata[THREAD_COUNT];
static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREAD_COUNT, STACK_SIZE);
static void malloc_thread(void *p1, void *p2, void *p3)
{
static ZTEST_BMEM atomic_t count;
bool *aborted = p1;
int val;
int *volatile ptr;
while (*aborted == false) {
/* Compute unique value specific to this iteration. */
val = atomic_inc(&count);
/* Allocate memory block and write a unique value to it. */
ptr = malloc(sizeof(int));
zassert_not_null(ptr, "Out of memory");
*ptr = val;
/* Busy wait to increase the likelihood of preemption. */
k_busy_wait(10);
/*
* Verify that the unique value previously written to the
* memory block is valid. This value will become corrupted if
* the newlib heap is not properly synchronised.
*/
zassert_equal(*ptr, val, "Corrupted memory block");
/* Free memory block. */
free(ptr);
}
}
/**
* @brief Test thread safety of newlib memory management functions
*
* This test calls the malloc() and free() functions from multiple threads to
* verify that no corruption occurs in the newlib memory heap.
*/
static void test_malloc_thread_safety(void)
{
int i;
k_tid_t tid[THREAD_COUNT];
static ZTEST_BMEM bool aborted;
/* Create worker threads. */
for (i = 0; i < ARRAY_SIZE(tid); i++) {
tid[i] = k_thread_create(&tdata[i], tstack[i], STACK_SIZE,
malloc_thread, &aborted, NULL, NULL,
K_PRIO_PREEMPT(0), THREAD_OPT,
K_NO_WAIT);
}
TC_PRINT("Created %d worker threads.\n", THREAD_COUNT);
/* Wait and see if any failures occur. */
TC_PRINT("Waiting %d seconds to see if any failures occur ...\n",
TEST_INTERVAL);
k_sleep(K_SECONDS(TEST_INTERVAL));
/* Abort all worker threads. */
aborted = true;
for (i = 0; i < ARRAY_SIZE(tid); i++) {
k_thread_join(tid[i], K_FOREVER);
}
}
void test_newlib_thread_safety_stress(void)
{
ztest_test_suite(newlib_thread_safety_stress,
ztest_unit_test(test_malloc_thread_safety));
ztest_run_test_suite(newlib_thread_safety_stress);
}

View file

@ -1,13 +1,53 @@
common:
filter: TOOLCHAIN_HAS_NEWLIB == 1
min_ram: 64
tags: clib newlib
tests:
libraries.libc.newlib.thread_safety:
slow: true
tags: clib newlib
libraries.libc.newlib_nano.thread_safety:
slow: true
tags: clib newlib
filter: CONFIG_HAS_NEWLIB_LIBC_NANO
extra_configs:
- CONFIG_NEWLIB_LIBC_NANO=y
libraries.libc.newlib.thread_safety.stress:
tags: clib newlib
slow: true
extra_configs:
- CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS=y
libraries.libc.newlib_nano.thread_safety.stress:
tags: clib newlib
filter: CONFIG_HAS_NEWLIB_LIBC_NANO
slow: true
extra_configs:
- CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS=y
- CONFIG_NEWLIB_LIBC_NANO=y
libraries.libc.newlib.thread_safety.userspace:
tags: clib newlib userspace
filter: CONFIG_ARCH_HAS_USERSPACE
extra_args: CONF_FILE=prj_userspace.conf
libraries.libc.newlib_nano.thread_safety.userspace:
tags: clib newlib userspace
filter: CONFIG_ARCH_HAS_USERSPACE and CONFIG_HAS_NEWLIB_LIBC_NANO
extra_args: CONF_FILE=prj_userspace.conf
extra_configs:
- CONFIG_NEWLIB_LIBC_NANO=y
libraries.libc.newlib.thread_safety.userspace.stress:
tags: clib newlib userspace
filter: CONFIG_ARCH_HAS_USERSPACE
slow: true
timeout: 120
extra_args: CONF_FILE=prj_userspace.conf
extra_configs:
- CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS=y
- CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE=8192
libraries.libc.newlib_nano.thread_safety.userspace.stress:
tags: clib newlib userspace
filter: CONFIG_ARCH_HAS_USERSPACE and CONFIG_HAS_NEWLIB_LIBC_NANO
slow: true
timeout: 120
extra_args: CONF_FILE=prj_userspace.conf
extra_configs:
- CONFIG_NEWLIB_THREAD_SAFETY_TEST_STRESS=y
- CONFIG_NEWLIB_LIBC_NANO=y
- CONFIG_NEWLIB_LIBC_ALIGNED_HEAP_SIZE=2048