ztest: Convert CONFIG_MP_NUM_CPUS handling

For test_1cpu_start/test_1cpu_stop make the code only build if
CONFIG_SMP and move to using arch_num_cpus() for runtime loops
and CONFIG_MP_MAX_NUM_CPUS for array decleration.

Signed-off-by: Kumar Gala <kumar.gala@intel.com>
This commit is contained in:
Kumar Gala 2022-10-26 09:59:41 -05:00 committed by Anas Nashif
commit 8eb0cdfcfb
3 changed files with 35 additions and 35 deletions

View file

@ -83,15 +83,12 @@ static int cleanup_test(struct unit_test *test)
}
#ifdef KERNEL
#ifdef CONFIG_SMP
#define NUM_CPUHOLD (CONFIG_MP_NUM_CPUS - 1)
#else
#define NUM_CPUHOLD 0
#endif
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
static struct k_thread cpuhold_threads[NUM_CPUHOLD];
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, NUM_CPUHOLD, CPUHOLD_STACK_SZ);
#ifdef CONFIG_SMP
#define MAX_NUM_CPUHOLD (CONFIG_MP_MAX_NUM_CPUS - 1)
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
static struct k_thread cpuhold_threads[MAX_NUM_CPUHOLD];
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD, CPUHOLD_STACK_SZ);
static struct k_sem cpuhold_sem;
volatile int cpuhold_active;
@ -135,9 +132,13 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3)
"1cpu test took too long (%d ms)", dt);
arch_irq_unlock(key);
}
#endif /* CONFIG_SMP */
void z_impl_z_test_1cpu_start(void)
{
#ifdef CONFIG_SMP
unsigned int num_cpus = arch_num_cpus();
cpuhold_active = 1;
#ifdef CONFIG_THREAD_NAME
char tname[CONFIG_THREAD_MAX_NAME_LEN];
@ -146,12 +147,8 @@ void z_impl_z_test_1cpu_start(void)
/* Spawn N-1 threads to "hold" the other CPUs, waiting for
* each to signal us that it's locked and spinning.
*
* Note that NUM_CPUHOLD can be a value that causes coverity
* to flag the following loop as DEADCODE so suppress the warning.
*/
/* coverity[DEADCODE] */
for (int i = 0; i < NUM_CPUHOLD; i++) {
for (int i = 0; i < num_cpus - 1; i++) {
k_thread_create(&cpuhold_threads[i],
cpuhold_stacks[i], CPUHOLD_STACK_SZ,
(k_thread_entry_t) cpu_hold, NULL, NULL, NULL,
@ -162,19 +159,23 @@ void z_impl_z_test_1cpu_start(void)
#endif
k_sem_take(&cpuhold_sem, K_FOREVER);
}
#endif
}
void z_impl_z_test_1cpu_stop(void)
{
#ifdef CONFIG_SMP
unsigned int num_cpus = arch_num_cpus();
cpuhold_active = 0;
/* Note that NUM_CPUHOLD can be a value that causes coverity
* to flag the following loop as DEADCODE so suppress the warning.
*/
/* coverity[DEADCODE] */
for (int i = 0; i < NUM_CPUHOLD; i++) {
for (int i = 0; i < num_cpus - 1; i++) {
k_thread_abort(&cpuhold_threads[i]);
}
#endif
}
#ifdef CONFIG_USERSPACE

View file

@ -104,15 +104,13 @@ static int cleanup_test(struct ztest_unit_test *test)
}
#ifdef KERNEL
#ifdef CONFIG_SMP
#define NUM_CPUHOLD (CONFIG_MP_NUM_CPUS - 1)
#else
#define NUM_CPUHOLD 0
#endif
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
static struct k_thread cpuhold_threads[NUM_CPUHOLD];
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, NUM_CPUHOLD, CPUHOLD_STACK_SZ);
#ifdef CONFIG_SMP
#define MAX_NUM_CPUHOLD (CONFIG_MP_MAX_NUM_CPUS - 1)
#define CPUHOLD_STACK_SZ (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
static struct k_thread cpuhold_threads[MAX_NUM_CPUHOLD];
K_KERNEL_STACK_ARRAY_DEFINE(cpuhold_stacks, MAX_NUM_CPUHOLD, CPUHOLD_STACK_SZ);
static struct k_sem cpuhold_sem;
volatile int cpuhold_active;
@ -156,9 +154,13 @@ static void cpu_hold(void *arg1, void *arg2, void *arg3)
"1cpu test took too long (%d ms)", dt);
arch_irq_unlock(key);
}
#endif /* CONFIG_SMP */
void z_impl_z_test_1cpu_start(void)
{
#ifdef CONFIG_SMP
unsigned int num_cpus = arch_num_cpus();
cpuhold_active = 1;
char tname[CONFIG_THREAD_MAX_NAME_LEN];
@ -166,12 +168,8 @@ void z_impl_z_test_1cpu_start(void)
/* Spawn N-1 threads to "hold" the other CPUs, waiting for
* each to signal us that it's locked and spinning.
*
* Note that NUM_CPUHOLD can be a value that causes coverity
* to flag the following loop as DEADCODE so suppress the warning.
*/
/* coverity[DEADCODE] */
for (int i = 0; i < NUM_CPUHOLD; i++) {
for (int i = 0; i < num_cpus - 1; i++) {
k_thread_create(&cpuhold_threads[i], cpuhold_stacks[i], CPUHOLD_STACK_SZ,
(k_thread_entry_t)cpu_hold, NULL, NULL, NULL, K_HIGHEST_THREAD_PRIO,
0, K_NO_WAIT);
@ -181,19 +179,20 @@ void z_impl_z_test_1cpu_start(void)
}
k_sem_take(&cpuhold_sem, K_FOREVER);
}
#endif
}
void z_impl_z_test_1cpu_stop(void)
{
#ifdef CONFIG_SMP
unsigned int num_cpus = arch_num_cpus();
cpuhold_active = 0;
/* Note that NUM_CPUHOLD can be a value that causes coverity
* to flag the following loop as DEADCODE so suppress the warning.
*/
/* coverity[DEADCODE] */
for (int i = 0; i < NUM_CPUHOLD; i++) {
for (int i = 0; i < num_cpus - 1; i++) {
k_thread_abort(&cpuhold_threads[i]);
}
#endif
}
#ifdef CONFIG_USERSPACE

View file

@ -274,14 +274,14 @@ static void ztress_thread(void *data, void *prio, void *unused)
static void thread_cb(const struct k_thread *cthread, void *user_data)
{
#define GET_IDLE_TID(i, tid) do {\
if (strcmp(tname, (CONFIG_MP_NUM_CPUS == 1) ? "idle" : "idle 0" STRINGIFY(i)) == 0) { \
if (strcmp(tname, (CONFIG_MP_MAX_NUM_CPUS == 1) ? "idle" : "idle 0" STRINGIFY(i)) == 0) { \
idle_tid[i] = tid; \
} \
} while (0)
const char *tname = k_thread_name_get((struct k_thread *)cthread);
LISTIFY(CONFIG_MP_NUM_CPUS, GET_IDLE_TID, (;), (k_tid_t)cthread);
LISTIFY(CONFIG_MP_MAX_NUM_CPUS, GET_IDLE_TID, (;), (k_tid_t)cthread);
}
static void ztress_init(struct ztress_context_data *thread_data)