tests: adding test cases for arch-dependent SMP function
Add one another test case for testing both arch_curr_cpu() and arch_sched_ipi() architecture layer interface. Signed-off-by: Enjia Mai <enjiax.mai@intel.com>
This commit is contained in:
parent
0cb8c50238
commit
7ac40aabc0
4 changed files with 133 additions and 3 deletions
|
@ -747,6 +747,15 @@ config SCHED_IPI_SUPPORTED
|
||||||
take an interrupt, which can be arbitrarily far in the
|
take an interrupt, which can be arbitrarily far in the
|
||||||
future).
|
future).
|
||||||
|
|
||||||
|
config TRACE_SCHED_IPI
|
||||||
|
bool "Enable Test IPI"
|
||||||
|
help
|
||||||
|
When true, it will add a hook into z_sched_ipi(), in order
|
||||||
|
to check if schedule IPI has called or not, for testing
|
||||||
|
purpose.
|
||||||
|
depends on SCHED_IPI_SUPPORTED
|
||||||
|
depends on MP_NUM_CPUS>1
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
config TICKLESS_IDLE
|
config TICKLESS_IDLE
|
||||||
|
|
|
@ -1268,12 +1268,19 @@ void z_impl_k_wakeup(k_tid_t thread)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACE_SCHED_IPI
|
||||||
|
extern void z_trace_sched_ipi(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
void z_sched_ipi(void)
|
void z_sched_ipi(void)
|
||||||
{
|
{
|
||||||
/* NOTE: When adding code to this, make sure this is called
|
/* NOTE: When adding code to this, make sure this is called
|
||||||
* at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
|
* at appropriate location when !CONFIG_SCHED_IPI_SUPPORTED.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_TRACE_SCHED_IPI
|
||||||
|
z_trace_sched_ipi();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void z_sched_abort(struct k_thread *thread)
|
void z_sched_abort(struct k_thread *thread)
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
CONFIG_ZTEST=y
|
CONFIG_ZTEST=y
|
||||||
CONFIG_SMP=y
|
CONFIG_SMP=y
|
||||||
|
CONFIG_TRACE_SCHED_IPI=y
|
||||||
|
|
|
@ -189,8 +189,8 @@ static void spin_for_threads_exit(void)
|
||||||
k_busy_wait(DELAY_US);
|
k_busy_wait(DELAY_US);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void spawn_threads(int prio, int thread_num,
|
static void spawn_threads(int prio, int thread_num, int equal_prio,
|
||||||
int equal_prio, k_thread_entry_t thread_entry, int delay)
|
k_thread_entry_t thread_entry, int delay)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -441,6 +441,117 @@ void test_wakeup_threads(void)
|
||||||
cleanup_resources();
|
cleanup_resources();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* a thread for testing get current cpu */
|
||||||
|
static void thread_get_cpu_entry(void *p1, void *p2, void *p3)
|
||||||
|
{
|
||||||
|
int bsp_id = *(int *)p1;
|
||||||
|
int cpu_id = -1;
|
||||||
|
|
||||||
|
/* get current cpu number for running thread */
|
||||||
|
_cpu_t *curr_cpu = arch_curr_cpu();
|
||||||
|
|
||||||
|
/**TESTPOINT: call arch_curr_cpu() to get cpu struct */
|
||||||
|
zassert_true(curr_cpu != NULL,
|
||||||
|
"test failed to get current cpu.");
|
||||||
|
|
||||||
|
cpu_id = curr_cpu->id;
|
||||||
|
|
||||||
|
zassert_true(bsp_id != cpu_id,
|
||||||
|
"should not be the same with our BSP");
|
||||||
|
|
||||||
|
/* loop forever to ensure running on this CPU */
|
||||||
|
while (1) {
|
||||||
|
k_busy_wait(DELAY_US);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Test get a pointer of CPU
|
||||||
|
*
|
||||||
|
* @ingroup kernel_smp_tests
|
||||||
|
*
|
||||||
|
* @details Architecture layer provides a mechanism to return a pointer to the
|
||||||
|
* current kernel CPU record of the running CPU.
|
||||||
|
*
|
||||||
|
* We call arch_curr_cpu() and get it's member, both in main and spwaned thread
|
||||||
|
* speratively, and compare them. They shall be different in SMP enviornment.
|
||||||
|
*
|
||||||
|
* @see arch_curr_cpu()
|
||||||
|
*/
|
||||||
|
void test_get_cpu(void)
|
||||||
|
{
|
||||||
|
k_tid_t thread_id;
|
||||||
|
|
||||||
|
/* get current cpu number */
|
||||||
|
int cpu_id = arch_curr_cpu()->id;
|
||||||
|
|
||||||
|
thread_id = k_thread_create(&t2, t2_stack, T2_STACK_SIZE,
|
||||||
|
(k_thread_entry_t)thread_get_cpu_entry,
|
||||||
|
&cpu_id, NULL, NULL,
|
||||||
|
K_PRIO_COOP(2),
|
||||||
|
K_INHERIT_PERMS, K_NO_WAIT);
|
||||||
|
|
||||||
|
k_busy_wait(DELAY_US);
|
||||||
|
|
||||||
|
k_thread_abort(thread_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_TRACE_SCHED_IPI
|
||||||
|
/* global variable for testing send IPI */
|
||||||
|
static int sched_ipi_has_called = -1;
|
||||||
|
|
||||||
|
void z_trace_sched_ipi(void)
|
||||||
|
{
|
||||||
|
sched_ipi_has_called++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Test interprocessor interrupt
|
||||||
|
*
|
||||||
|
* @ingroup kernel_smp_tests
|
||||||
|
*
|
||||||
|
* @details Architecture layer provides a mechanism to issue an interprocessor
|
||||||
|
* interrupt to all other CPUs in the system that calls the scheduler IPI
|
||||||
|
* handler.
|
||||||
|
*
|
||||||
|
* We simply add a hook in z_sched_ipi(), in order to check if it has been
|
||||||
|
* called once in another CPU except the caller, when arch_sched_ipi() is
|
||||||
|
* called.
|
||||||
|
*
|
||||||
|
* @see arch_sched_ipi()
|
||||||
|
*/
|
||||||
|
void test_smp_ipi(void)
|
||||||
|
{
|
||||||
|
TC_PRINT("cpu num=%d", CONFIG_MP_NUM_CPUS);
|
||||||
|
|
||||||
|
sched_ipi_has_called = 0;
|
||||||
|
|
||||||
|
k_busy_wait(DELAY_US);
|
||||||
|
|
||||||
|
/* It shouldn't enter our IPI interrupt handler at this moment */
|
||||||
|
zassert_true(sched_ipi_has_called == 0, "shouldn't receive IPI,(%d)",
|
||||||
|
sched_ipi_has_called);
|
||||||
|
|
||||||
|
for (int i = 1; i <= 3 ; i++) {
|
||||||
|
/* issue a sched ipi to tell other CPU to run thread */
|
||||||
|
arch_sched_ipi();
|
||||||
|
|
||||||
|
/* do busy wait here until get IPI */
|
||||||
|
k_busy_wait(DELAY_US);
|
||||||
|
|
||||||
|
/**TESTPOINT: check if enter our IPI interrupt handler */
|
||||||
|
zassert_true(sched_ipi_has_called == i,
|
||||||
|
"did not receive IPI.(%d)",
|
||||||
|
sched_ipi_has_called);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
void test_smp_ipi(void)
|
||||||
|
{
|
||||||
|
ztest_test_skip();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void test_main(void)
|
void test_main(void)
|
||||||
{
|
{
|
||||||
/* Sleep a bit to guarantee that both CPUs enter an idle
|
/* Sleep a bit to guarantee that both CPUs enter an idle
|
||||||
|
@ -456,7 +567,9 @@ void test_main(void)
|
||||||
ztest_unit_test(test_preempt_resched_threads),
|
ztest_unit_test(test_preempt_resched_threads),
|
||||||
ztest_unit_test(test_yield_threads),
|
ztest_unit_test(test_yield_threads),
|
||||||
ztest_unit_test(test_sleep_threads),
|
ztest_unit_test(test_sleep_threads),
|
||||||
ztest_unit_test(test_wakeup_threads)
|
ztest_unit_test(test_wakeup_threads),
|
||||||
|
ztest_unit_test(test_smp_ipi),
|
||||||
|
ztest_unit_test(test_get_cpu)
|
||||||
);
|
);
|
||||||
ztest_run_test_suite(smp);
|
ztest_run_test_suite(smp);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue