tests: arc_vpx_lock

Add tests to verify arc_vpx_lock/unlock API.

Signed-off-by: Peter Mitsis <peter.mitsis@intel.com>
This commit is contained in:
Peter Mitsis 2024-08-26 20:30:31 +00:00 committed by Anas Nashif
commit 091c6664c5
5 changed files with 189 additions and 0 deletions

View file

@ -0,0 +1,15 @@
# Copyright (c) 2024 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(arc_vpx_lock)
target_sources(app PRIVATE
src/main.c
)
if(COMPILER STREQUAL arcmwdt)
zephyr_include_directories(${ARCMWDT_TOOLCHAIN_PATH}/MetaWare/arc/lib/src/fx/include/)
endif()

View file

@ -0,0 +1,12 @@
Title: ARC VPX Lock
Description:
This test verifies that the ARC VPX lock/unlock mechanism used to bookend
code that uses the ARC VPX vector registers works correctly. As this VPX
lock/unlock mechanism does not technically require those registers to be
used to control access to them (they bookend the relevant code sections),
the test does not actually access those VPX registers.
However, it does check that the system behaves as expected when the ARC VPX
lock/unlock mechanism is used.

View file

@ -0,0 +1,3 @@
CONFIG_ZTEST=y
CONFIG_MAIN_STACK_SIZE=1024
CONFIG_ARC_VPX_COOPERATIVE_SHARING=y

View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2024 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
#include <zephyr/arch/arc/v2/vpx/arc_vpx.h>
#ifndef CONFIG_ARC_VPX_COOPERATIVE_SHARING
#error "Rebuild with the ARC_VPX_COOPERATIVE_SHARING config option enabled"
#endif
#define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
static void timer_func(struct k_timer *timer);
K_THREAD_STACK_DEFINE(payload_stack, STACK_SIZE);
static K_TIMER_DEFINE(my_timer, timer_func, NULL);
static struct k_thread payload_thread;
static volatile int isr_result;
static volatile unsigned int isr_vpx_lock_id;
/**
* Obtain the current CPU id.
*/
static int current_cpu_id_get(void)
{
int key;
int id;
key = arch_irq_lock();
id = _current_cpu->id;
arch_irq_unlock(key);
return id;
}
static void timer_func(struct k_timer *timer)
{
arc_vpx_unlock_force(isr_vpx_lock_id);
}
static void arc_vpx_lock_unlock_timed_payload(void *p1, void *p2, void *p3)
{
int status;
unsigned int cpu_id;
unsigned int lock_id;
cpu_id = (unsigned int)(uintptr_t)(p1);
ARG_UNUSED(p2);
ARG_UNUSED(p3);
status = arc_vpx_lock(K_NO_WAIT);
zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
/*
* In 1 second, forcibly release the VPX lock. However, wait up to
* 5 seconds before considering this a failure.
*/
isr_vpx_lock_id = cpu_id;
k_timer_start(&my_timer, K_MSEC(1000), K_FOREVER);
status = arc_vpx_lock(K_MSEC(5000));
zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
arc_vpx_unlock();
}
ZTEST(vpx_lock, test_arc_vpx_lock_unlock_timed)
{
int priority;
int cpu_id;
priority = k_thread_priority_get(k_current_get());
cpu_id = current_cpu_id_get();
k_thread_create(&payload_thread, payload_stack, STACK_SIZE,
arc_vpx_lock_unlock_timed_payload,
(void *)(uintptr_t)cpu_id, NULL, NULL,
priority - 2, 0, K_FOREVER);
#if defined(CONFIG_SCHED_CPU_MASK) && (CONFIG_MP_MAX_NUM_CPUS > 1)
k_thread_cpu_pin(&payload_thread, cpu_id);
#endif
k_thread_start(&payload_thread);
k_thread_join(&payload_thread, K_FOREVER);
}
static void arc_vpx_lock_unlock_payload(void *p1, void *p2, void *p3)
{
int status;
ARG_UNUSED(p1);
ARG_UNUSED(p2);
ARG_UNUSED(p3);
/* The VPX lock is available; take it. */
status = arc_vpx_lock(K_NO_WAIT);
zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
/* The VPX lock has already been taken; expect errors */
status = arc_vpx_lock(K_NO_WAIT);
zassert_equal(-EBUSY, status, "Expected return value %d (-EBUSY), not %d\n",
-EBUSY, status);
status = arc_vpx_lock(K_MSEC(10));
zassert_equal(-EAGAIN, status, "Expected return value %d (-EAGAIN), not %d\n",
-EAGAIN, status);
/* Verify that unlocking makes it available */
arc_vpx_unlock();
status = arc_vpx_lock(K_NO_WAIT);
zassert_equal(0, status, "Expected return value %d, not %d\n", 0, status);
arc_vpx_unlock();
}
ZTEST(vpx_lock, test_arc_vpx_lock_unlock)
{
int priority;
int cpu_id;
priority = k_thread_priority_get(k_current_get());
cpu_id = current_cpu_id_get();
k_thread_create(&payload_thread, payload_stack, STACK_SIZE,
arc_vpx_lock_unlock_payload, NULL, NULL, NULL,
priority - 2, 0, K_FOREVER);
#if defined(CONFIG_SCHED_CPU_MASK) && (CONFIG_MP_MAX_NUM_CPUS > 1)
k_thread_cpu_pin(&payload_thread, cpu_id);
#endif
k_thread_start(&payload_thread);
k_thread_join(&payload_thread, K_FOREVER);
}
ZTEST_SUITE(vpx_lock, NULL, NULL, NULL, NULL, NULL);

View file

@ -0,0 +1,11 @@
tests:
arch.arc.vpx_lock:
filter: CONFIG_ISA_ARCV2
toolchain_allow: arcmwdt
platform_allow: nsim/nsim_vpx5
arch.arc.vpx_lock.cpu_mask:
filter: CONFIG_ISA_ARCV2 and (CONFIG_MP_MAX_NUM_CPUS > 1)
toolchain_allow: arcmwdt
platform_allow: nsim/nsim_vpx5
extra_configs:
- CONFIG_SCHED_CPU_MASK=y