tests/kernel/sched/schedule_api: Restore spinning for timer alignment

Commit 0cc362f873 ("tests/kernel: Simplify timer spinning") was
added to work around a qemu bug with dropped interrupts on x86_64.
But it turns out that the tick alignment that the original
implementation provided (fundamentally, it spins waiting on the timer
driver to report tick changes) was needed for correct operation on
nRF52.

The effectively revert that commit (and refactors all the spinning
into a single utility) and replaces it with a workaround targeted to
qemu on x86_64 only.  Fixes #11721

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
Andy Ross 2019-02-27 14:29:30 -08:00 committed by Kumar Gala
commit 3f4aa6316c
4 changed files with 27 additions and 3 deletions

View file

@ -10,6 +10,28 @@
K_THREAD_STACK_DEFINE(tstack, STACK_SIZE); K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
K_THREAD_STACK_ARRAY_DEFINE(tstacks, MAX_NUM_THREAD, STACK_SIZE); K_THREAD_STACK_ARRAY_DEFINE(tstacks, MAX_NUM_THREAD, STACK_SIZE);
void spin_for_ms(int ms)
{
#if defined(CONFIG_X86_64) && defined(CONFIG_QEMU_TARGET)
/* qemu-system-x86_64 has a known bug with the hpet device
* where it will drop interrupts if you try to spin on the
* counter.
*/
k_busy_wait(ms * 1000);
#else
u32_t t32 = k_uptime_get_32();
while (k_uptime_get_32() - t32 < ms) {
/* In the posix arch, a busy loop takes no time, so
* let's make it take some
*/
if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
k_busy_wait(50);
}
}
#endif
}
/** /**
* @brief Test scheduling * @brief Test scheduling
* *

View file

@ -23,6 +23,8 @@ struct thread_data {
int executed; int executed;
}; };
void spin_for_ms(int ticks);
void test_priority_cooperative(void); void test_priority_cooperative(void);
void test_priority_preemptible(void); void test_priority_preemptible(void);
void test_yield_cooperative(void); void test_yield_cooperative(void);

View file

@ -51,7 +51,7 @@ static void thread_tslice(void *p1, void *p2, void *p3)
/* Keep the current thread busy for more than one slice, even though, /* Keep the current thread busy for more than one slice, even though,
* when timeslice used up the next thread should be scheduled in. * when timeslice used up the next thread should be scheduled in.
*/ */
k_busy_wait(1000 * BUSY_MS); spin_for_ms(BUSY_MS);
k_sem_give(&sema); k_sem_give(&sema);
} }

View file

@ -55,7 +55,7 @@ static void thread_tslice(void *p1, void *p2, void *p3)
* even though, when timeslice used up the next thread * even though, when timeslice used up the next thread
* should be scheduled in. * should be scheduled in.
*/ */
k_busy_wait(1000 * BUSY_MS); spin_for_ms(BUSY_MS);
k_sem_give(&sema1); k_sem_give(&sema1);
} }
@ -102,7 +102,7 @@ void test_slice_scheduling(void)
* even though, when timeslice used up the next thread * even though, when timeslice used up the next thread
* should be scheduled in. * should be scheduled in.
*/ */
k_busy_wait(1000 * BUSY_MS); spin_for_ms(BUSY_MS);
/* relinquish CPU and wait for each thread to complete*/ /* relinquish CPU and wait for each thread to complete*/
for (int i = 0; i < NUM_THREAD; i++) { for (int i = 0; i < NUM_THREAD; i++) {