tests: latency_measure: Update interrupt latency
Updates the interrupt to thread benchmark tests to address a number of items. 1. Updates descriptions to correctly indicate that it is not interrupt latency being measured, but the time to leave the interrupt and return to a thread. 2. Repeats the test numerous times instead of just once to get an average. 3. Overhead from obtaining timestamps is now accounted for. 4. Adds support for returning to a user thread. Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
parent
8c80bbb96e
commit
4e7bb482b1
3 changed files with 151 additions and 143 deletions
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2014 Wind River Systems, Inc.
|
||||
* Copyright (c) 2017 Intel Corporation.
|
||||
* Copyright (c) 2017, 2023 Intel Corporation.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
@ -10,8 +10,16 @@
|
|||
*
|
||||
* @brief Measure time from ISR back to interrupted thread
|
||||
*
|
||||
* This file contains test that measures time to switch from the interrupt
|
||||
* handler back to the interrupted thread.
|
||||
* This file covers three interrupt to threads scenarios:
|
||||
* 1. ISR returning to the interrupted kernel thread
|
||||
* 2. ISR returning to a different (kernel) thread
|
||||
* 3. ISR returning to a different (user) thread
|
||||
*
|
||||
* In all three scenarios, the source of the ISR is a software generated
|
||||
* interrupt originating from a kernel thread. Ideally, these tests would
|
||||
* also cover the scenarios where the interrupted thread is a user thread.
|
||||
* However, some implementations of the irq_offload() routine lock interrupts,
|
||||
* which is not allowed in userspace.
|
||||
*/
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
|
@ -19,39 +27,133 @@
|
|||
|
||||
#include <zephyr/irq_offload.h>
|
||||
|
||||
static volatile int flag_var;
|
||||
|
||||
static timing_t timestamp_start;
|
||||
static timing_t timestamp_end;
|
||||
static K_SEM_DEFINE(isr_sem, 0, 1);
|
||||
|
||||
/**
|
||||
* @brief Test ISR used to measure time to return to thread
|
||||
*
|
||||
* @brief Test ISR used to measure best case interrupt latency
|
||||
*
|
||||
* The interrupt handler gets the second timestamp.
|
||||
*
|
||||
* The interrupt handler gets the first timestamp used in the test.
|
||||
* It then copies the timetsamp into a message queue and returns.
|
||||
*/
|
||||
static void latency_test_isr(const void *unused)
|
||||
static void test_isr(const void *arg)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
flag_var = 1;
|
||||
struct k_sem *sem = (struct k_sem *)arg;
|
||||
|
||||
timestamp_start = timing_counter_get();
|
||||
if (arg != NULL) {
|
||||
k_sem_give(sem);
|
||||
}
|
||||
|
||||
timestamp.sample = timing_counter_get();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Measure time to return from interrupt
|
||||
*
|
||||
* @brief Interrupt preparation function
|
||||
*
|
||||
* Function makes all the test preparations: registers the interrupt handler,
|
||||
* gets the first timestamp and invokes the software interrupt.
|
||||
*
|
||||
* This function is used to measure the time it takes to return from an
|
||||
* interrupt.
|
||||
*/
|
||||
static void make_int(void)
|
||||
static void int_to_interrupted_thread(uint32_t num_iterations, uint64_t *sum)
|
||||
{
|
||||
flag_var = 0;
|
||||
irq_offload(latency_test_isr, NULL);
|
||||
timestamp_end = timing_counter_get();
|
||||
timing_t start;
|
||||
timing_t finish;
|
||||
|
||||
*sum = 0ull;
|
||||
|
||||
for (uint32_t i = 0; i < num_iterations; i++) {
|
||||
irq_offload(test_isr, NULL);
|
||||
finish = timing_counter_get();
|
||||
start = timestamp.sample;
|
||||
|
||||
*sum += timing_cycles_get(&start, &finish);
|
||||
}
|
||||
}
|
||||
|
||||
static void start_thread_entry(void *p1, void *p2, void *p3)
|
||||
{
|
||||
uint32_t num_iterations = (uint32_t)(uintptr_t)p1;
|
||||
struct k_sem *sem = p2;
|
||||
|
||||
ARG_UNUSED(p3);
|
||||
|
||||
uint64_t sum = 0ull;
|
||||
timing_t start;
|
||||
timing_t finish;
|
||||
|
||||
/* Ensure that <isr_sem> is unavailable */
|
||||
|
||||
(void) k_sem_take(sem, K_NO_WAIT);
|
||||
k_thread_start(&alt_thread);
|
||||
|
||||
for (uint32_t i = 0; i < num_iterations; i++) {
|
||||
|
||||
/* 1. Wait on an unavailable semaphore */
|
||||
|
||||
k_sem_take(sem, K_FOREVER);
|
||||
|
||||
/* 3. Obtain the start and finish timestamps */
|
||||
|
||||
finish = timing_counter_get();
|
||||
start = timestamp.sample;
|
||||
|
||||
sum += timing_cycles_get(&start, &finish);
|
||||
}
|
||||
|
||||
timestamp.cycles = sum;
|
||||
}
|
||||
|
||||
static void alt_thread_entry(void *p1, void *p2, void *p3)
|
||||
{
|
||||
uint32_t num_iterations = (uint32_t)(uintptr_t)p1;
|
||||
struct k_sem *sem = p2;
|
||||
|
||||
ARG_UNUSED(p3);
|
||||
|
||||
for (uint32_t i = 0; i < num_iterations; i++) {
|
||||
|
||||
/* 2. Trigger the test_isr() to execute */
|
||||
|
||||
irq_offload(test_isr, sem);
|
||||
|
||||
/*
|
||||
* ISR expected to have awakened higher priority start_thread
|
||||
* thereby preempting alt_thread.
|
||||
*/
|
||||
}
|
||||
|
||||
k_thread_join(&start_thread, K_FOREVER);
|
||||
}
|
||||
|
||||
static void int_to_another_thread(uint32_t num_iterations, uint64_t *sum,
|
||||
uint32_t options)
|
||||
{
|
||||
int priority;
|
||||
*sum = 0ull;
|
||||
|
||||
priority = k_thread_priority_get(k_current_get());
|
||||
|
||||
k_thread_create(&start_thread, start_stack,
|
||||
K_THREAD_STACK_SIZEOF(start_stack),
|
||||
start_thread_entry,
|
||||
(void *)(uintptr_t)num_iterations, &isr_sem, NULL,
|
||||
priority - 2, options, K_FOREVER);
|
||||
|
||||
k_thread_create(&alt_thread, alt_stack,
|
||||
K_THREAD_STACK_SIZEOF(alt_stack),
|
||||
alt_thread_entry,
|
||||
(void *)(uintptr_t)num_iterations, &isr_sem, NULL,
|
||||
priority - 1, 0, K_FOREVER);
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
if (options != 0) {
|
||||
k_thread_access_grant(&start_thread, &isr_sem, &alt_thread);
|
||||
}
|
||||
#endif
|
||||
|
||||
k_thread_start(&start_thread);
|
||||
|
||||
k_thread_join(&alt_thread, K_FOREVER);
|
||||
|
||||
*sum = timestamp.cycles;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -60,24 +162,34 @@ static void make_int(void)
|
|||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int int_to_thread(void)
|
||||
int int_to_thread(uint32_t num_iterations)
|
||||
{
|
||||
uint32_t diff;
|
||||
bool failed = false;
|
||||
const char *notes = "";
|
||||
uint64_t sum;
|
||||
|
||||
timing_start();
|
||||
TICK_SYNCH();
|
||||
make_int();
|
||||
if (flag_var != 1) {
|
||||
error_count++;
|
||||
notes = "Flag variable did not change";
|
||||
failed = true;
|
||||
}
|
||||
|
||||
diff = timing_cycles_get(×tamp_start, ×tamp_end);
|
||||
PRINT_STATS("Switch from ISR back to interrupted thread",
|
||||
diff, failed, notes);
|
||||
int_to_interrupted_thread(num_iterations, &sum);
|
||||
|
||||
PRINT_STATS_AVG("Switch from ISR back to interrupted thread",
|
||||
(uint32_t)sum, num_iterations, false, "");
|
||||
|
||||
/* ************** */
|
||||
|
||||
int_to_another_thread(num_iterations, &sum, 0);
|
||||
|
||||
PRINT_STATS_AVG("Switch from ISR to another thread (kernel)",
|
||||
(uint32_t)sum, num_iterations, false, "");
|
||||
|
||||
/* ************** */
|
||||
|
||||
#if CONFIG_USERSPACE
|
||||
int_to_another_thread(num_iterations, &sum, K_USER);
|
||||
|
||||
PRINT_STATS_AVG("Switch from ISR to another thread (user)",
|
||||
(uint32_t)sum, num_iterations, false, "");
|
||||
#endif
|
||||
|
||||
timing_stop();
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2012-2014 Wind River Systems, Inc.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
*
|
||||
* @brief measure time from ISR to a rescheduled thread
|
||||
*
|
||||
* This file contains test that measures time to switch from an interrupt
|
||||
* handler to executing a thread after rescheduling. In other words, execution
|
||||
* after interrupt handler resume in a different thread than the one which got
|
||||
* interrupted.
|
||||
*/
|
||||
|
||||
#include <zephyr/kernel.h>
|
||||
#include <zephyr/irq_offload.h>
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
static timing_t timestamp_start;
|
||||
static timing_t timestamp_end;
|
||||
static struct k_work work;
|
||||
|
||||
K_SEM_DEFINE(INTSEMA, 0, 1);
|
||||
K_SEM_DEFINE(WORKSEMA, 0, 1);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Test ISR used to measure best case interrupt latency
|
||||
*
|
||||
* The interrupt handler gets the second timestamp.
|
||||
*
|
||||
*/
|
||||
static void latency_test_isr(const void *unused)
|
||||
{
|
||||
ARG_UNUSED(unused);
|
||||
|
||||
k_work_submit(&work);
|
||||
timestamp_start = timing_counter_get();
|
||||
}
|
||||
|
||||
static void worker(struct k_work *item)
|
||||
{
|
||||
(void)item;
|
||||
|
||||
timestamp_end = timing_counter_get();
|
||||
k_sem_give(&WORKSEMA);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief Software interrupt generating thread
|
||||
*
|
||||
* Lower priority thread that, when it starts, it waits for a semaphore. When
|
||||
* it gets it, released by the main thread, sets up the interrupt handler and
|
||||
* generates the software interrupt
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
void int_thread(void *p1, void *p2, void *p3)
|
||||
{
|
||||
ARG_UNUSED(p1);
|
||||
ARG_UNUSED(p2);
|
||||
ARG_UNUSED(p3);
|
||||
|
||||
k_sem_take(&INTSEMA, K_FOREVER);
|
||||
irq_offload(latency_test_isr, NULL);
|
||||
k_thread_suspend(k_current_get());
|
||||
}
|
||||
|
||||
K_THREAD_DEFINE(int_thread_id, 512, int_thread, NULL, NULL,
|
||||
NULL, 11, 0, 0);
|
||||
|
||||
/**
|
||||
*
|
||||
* @brief The test main function
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int int_to_thread_evt(void)
|
||||
{
|
||||
uint32_t diff;
|
||||
|
||||
k_work_init(&work, worker);
|
||||
|
||||
timing_start();
|
||||
TICK_SYNCH();
|
||||
k_sem_give(&INTSEMA);
|
||||
k_sem_take(&WORKSEMA, K_FOREVER);
|
||||
timing_stop();
|
||||
|
||||
diff = timing_cycles_get(×tamp_start, ×tamp_end);
|
||||
|
||||
PRINT_STATS("Time from ISR to executing a different thread",
|
||||
diff, false, "");
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -45,8 +45,7 @@ struct k_thread alt_thread;
|
|||
int error_count; /* track number of errors */
|
||||
|
||||
extern void thread_switch_yield(uint32_t num_iterations, bool is_cooperative);
|
||||
extern void int_to_thread(void);
|
||||
extern void int_to_thread_evt(void);
|
||||
extern void int_to_thread(uint32_t num_iterations);
|
||||
extern void sema_test_signal(void);
|
||||
extern void mutex_lock_unlock(void);
|
||||
extern int sema_context_switch(void);
|
||||
|
@ -81,9 +80,7 @@ static void test_thread(void *arg1, void *arg2, void *arg3)
|
|||
/* Cooperative threads context switching */
|
||||
thread_switch_yield(NUM_ITERATIONS, true);
|
||||
|
||||
int_to_thread();
|
||||
|
||||
int_to_thread_evt();
|
||||
int_to_thread(NUM_ITERATIONS);
|
||||
|
||||
suspend_resume();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue