tests/kernel/sched/schedule_api: Restore spinning for timer alignment
Commit 0cc362f873
("tests/kernel: Simplify timer spinning") was
added to work around a qemu bug with dropped interrupts on x86_64.
But it turns out that the tick alignment that the original
implementation provided (fundamentally, it spins waiting on the timer
driver to report tick changes) was needed for correct operation on
nRF52.
The effectively revert that commit (and refactors all the spinning
into a single utility) and replaces it with a workaround targeted to
qemu on x86_64 only. Fixes #11721
Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
This commit is contained in:
parent
a334ac2045
commit
3f4aa6316c
4 changed files with 27 additions and 3 deletions
|
@ -10,6 +10,28 @@
|
|||
K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
|
||||
K_THREAD_STACK_ARRAY_DEFINE(tstacks, MAX_NUM_THREAD, STACK_SIZE);
|
||||
|
||||
void spin_for_ms(int ms)
|
||||
{
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_QEMU_TARGET)
|
||||
/* qemu-system-x86_64 has a known bug with the hpet device
|
||||
* where it will drop interrupts if you try to spin on the
|
||||
* counter.
|
||||
*/
|
||||
k_busy_wait(ms * 1000);
|
||||
#else
|
||||
u32_t t32 = k_uptime_get_32();
|
||||
|
||||
while (k_uptime_get_32() - t32 < ms) {
|
||||
/* In the posix arch, a busy loop takes no time, so
|
||||
* let's make it take some
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARCH_POSIX)) {
|
||||
k_busy_wait(50);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Test scheduling
|
||||
*
|
||||
|
|
|
@ -23,6 +23,8 @@ struct thread_data {
|
|||
int executed;
|
||||
};
|
||||
|
||||
void spin_for_ms(int ticks);
|
||||
|
||||
void test_priority_cooperative(void);
|
||||
void test_priority_preemptible(void);
|
||||
void test_yield_cooperative(void);
|
||||
|
|
|
@ -51,7 +51,7 @@ static void thread_tslice(void *p1, void *p2, void *p3)
|
|||
/* Keep the current thread busy for more than one slice, even though,
|
||||
* when timeslice used up the next thread should be scheduled in.
|
||||
*/
|
||||
k_busy_wait(1000 * BUSY_MS);
|
||||
spin_for_ms(BUSY_MS);
|
||||
k_sem_give(&sema);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ static void thread_tslice(void *p1, void *p2, void *p3)
|
|||
* even though, when timeslice used up the next thread
|
||||
* should be scheduled in.
|
||||
*/
|
||||
k_busy_wait(1000 * BUSY_MS);
|
||||
spin_for_ms(BUSY_MS);
|
||||
k_sem_give(&sema1);
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ void test_slice_scheduling(void)
|
|||
* even though, when timeslice used up the next thread
|
||||
* should be scheduled in.
|
||||
*/
|
||||
k_busy_wait(1000 * BUSY_MS);
|
||||
spin_for_ms(BUSY_MS);
|
||||
|
||||
/* relinquish CPU and wait for each thread to complete*/
|
||||
for (int i = 0; i < NUM_THREAD; i++) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue