scripts: tests: twister: Black box testing

In addition to the white-box testing and unit tests,
we would like to have some black-box testing for the
console commands of twister.

This serves as a Proof of Concept for future expansion.

Special dummy tests are made for blackbox testing.

Signed-off-by: Lukasz Mrugala <lukaszx.mrugala@intel.com>
This commit is contained in:
Lukasz Mrugala 2023-08-08 11:44:07 +02:00 committed by Carles Cufí
commit 4d467af7f4
21 changed files with 688 additions and 0 deletions

90
.github/workflows/blackbox_tests.yml vendored Normal file
View file

@ -0,0 +1,90 @@
# Copyright (c) 2023 Intel Corporation.
# SPDX-License-Identifier: Apache-2.0
name: Twister BlackBox TestSuite
on:
push:
branches:
- main
paths:
- 'scripts/pylib/twister/**'
- 'scripts/twister'
- 'scripts/tests/twister_blackbox/**'
- '.github/workflows/blackbox_tests.yml'
pull_request:
branches:
- main
paths:
- 'scripts/pylib/twister/**'
- 'scripts/twister'
- 'scripts/tests/twister_blackbox/**'
- '.github/workflows/blackbox_tests.yml'
jobs:
twister-tests:
name: Twister Black Box Tests
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: [3.8, 3.9, '3.10']
os: [ubuntu-22.04]
container:
image: ghcr.io/zephyrproject-rtos/ci:v0.26.4
env:
ZEPHYR_SDK_INSTALL_DIR: /opt/toolchains/zephyr-sdk-0.16.1
steps:
- name: Apply Container Owner Mismatch Workaround
run: |
# FIXME: The owner UID of the GITHUB_WORKSPACE directory may not
# match the container user UID because of the way GitHub
# Actions runner is implemented. Remove this workaround when
# GitHub comes up with a fundamental fix for this problem.
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: Checkout
uses: actions/checkout@v3
- name: Set Up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Go Into Venv
shell: bash
run: |
python3 -m pip install --user virtualenv
python3 -m venv env
source env/bin/activate
echo "$(which python)"
- name: Install Packages
run: |
python3 -m pip install -U -r scripts/requirements-base.txt -r scripts/requirements-build-test.txt -r scripts/requirements-run-test.txt
- name: Run Pytest For Twister Black Box Tests
shell: bash
env:
ZEPHYR_BASE: ./
ZEPHYR_TOOLCHAIN_VARIANT: zephyr
run: |
echo "Run twister tests"
source zephyr-env.sh
PYTHONPATH="./scripts/tests" pytest ./scripts/tests/twister_blackbox
- name: Upload Unit Test Results
if: success() || failure()
uses: actions/upload-artifact@v2
with:
name: Black Box Test Results (Python ${{ matrix.python-version }})
path: |
twister-out*/twister.log
twister-out*/twister.json
twister-out*/testplan.log
retention-days: 14
- name: Clear Workspace
if: success() || failure()
run: |
rm -rf twister-out*/

View file

@ -0,0 +1,45 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
'''Common fixtures for use in testing the twister tool.'''
import logging
import mock
import os
import pytest
import sys
ZEPHYR_BASE = os.getenv('ZEPHYR_BASE')
TEST_DATA = os.path.join(ZEPHYR_BASE, 'scripts', 'tests',
'twister_blackbox', 'test_data')
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts"))
testsuite_filename_mock = mock.PropertyMock(return_value='test_data.yaml')
@pytest.fixture(name='zephyr_base')
def zephyr_base_directory():
return ZEPHYR_BASE
@pytest.fixture(name='zephyr_test_data')
def zephyr_test_directory():
return TEST_DATA
@pytest.fixture
def clear_log():
# Required to fix the pytest logging error
# See: https://github.com/pytest-dev/pytest/issues/5502
loggers = [logging.getLogger()] \
+ list(logging.Logger.manager.loggerDict.values()) \
+ [logging.getLogger(name) for \
name in logging.root.manager.loggerDict]
for logger in loggers:
handlers = getattr(logger, 'handlers', [])
for handler in handlers:
logger.removeHandler(handler)

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(integration)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -0,0 +1,2 @@
CONFIG_ZTEST=y
CONFIG_ZTEST_NEW_API=y

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2023 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_1_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_1_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}

View file

@ -0,0 +1,11 @@
tests:
dummy.agnostic.group1.subgroup1:
platform_allow:
- native_posix
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_posix
tags:
- agnostic
- subgrouped

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(integration)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -0,0 +1,2 @@
CONFIG_ZTEST=y
CONFIG_ZTEST_NEW_API=y

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2023 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a1_2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a1_2_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}

View file

@ -0,0 +1,12 @@
tests:
dummy.agnostic.group1.subgroup2:
build_only: true
platform_allow:
- native_posix
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_posix
tags:
- agnostic
- subgrouped

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(integration)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -0,0 +1,2 @@
CONFIG_ZTEST=y
CONFIG_ZTEST_NEW_API=y

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2023 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(a2_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a2_tests, test_assert1)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}
ZTEST(a2_tests, test_assert2)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}

View file

@ -0,0 +1,24 @@
/*
* Copyright (c) 2023 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(a2_tests, test_assert3)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}

View file

@ -0,0 +1,9 @@
tests:
dummy.agnostic.group2:
platform_allow:
- native_posix
- qemu_x86
- qemu_x86_64
integration_platforms:
- native_posix
tags: agnostic

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0
cmake_minimum_required(VERSION 3.20.0)
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(integration)
FILE(GLOB app_sources src/*.c)
target_sources(app PRIVATE ${app_sources})

View file

@ -0,0 +1,2 @@
CONFIG_ZTEST=y
CONFIG_ZTEST_NEW_API=y

View file

@ -0,0 +1,26 @@
/*
* Copyright (c) 2023 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr/ztest.h>
ZTEST_SUITE(d_tests, NULL, NULL, NULL, NULL, NULL);
/**
* @brief Test Asserts
*
* This test verifies various assert macros provided by ztest.
*
*/
ZTEST(d_tests, test_assert)
{
zassert_true(1, "1 was false");
zassert_false(0, "0 was true");
zassert_is_null(NULL, "NULL was not NULL");
zassert_not_null("foo", "\"foo\" was NULL");
zassert_equal(1, 1, "1 was not equal to 1");
zassert_equal_ptr(NULL, NULL, "NULL was not equal to NULL");
}

View file

@ -0,0 +1,6 @@
tests:
dummy.device.group:
platform_allow: frdm_k64f
integration_platforms:
- frdm_k64f
tags: device

View file

@ -0,0 +1,168 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Blackbox tests for twister's command line functions
"""
import importlib
import mock
import os
import pytest
import sys
from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestPrintOuts:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['agnostic', 'subgrouped']
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
['device']
),
]
TESTDATA_2 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
[
'dummy.agnostic.group1.subgroup1.assert',
'dummy.agnostic.group1.subgroup2.assert',
'dummy.agnostic.group2.assert1',
'dummy.agnostic.group2.assert2',
'dummy.agnostic.group2.assert3'
]
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
[
'dummy.device.group.assert'
]
),
]
TESTDATA_3 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
'Testsuite\n' \
'├── Samples\n' \
'└── Tests\n' \
' └── dummy\n' \
' └── agnostic\n' \
' ├── dummy.agnostic.group1.subgroup1.assert\n' \
' ├── dummy.agnostic.group1.subgroup2.assert\n' \
' ├── dummy.agnostic.group2.assert1\n' \
' ├── dummy.agnostic.group2.assert2\n' \
' └── dummy.agnostic.group2.assert3\n'
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
'Testsuite\n'
'├── Samples\n'
'└── Tests\n'
' └── dummy\n'
' └── device\n'
' └── dummy.device.group.assert\n'
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.usefixtures("clear_log")
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_1,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_list_tags(self, capfd, test_path, expected):
args = ['-T', test_path, '--list-tags']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
printed_tags = [tag.strip() for tag in out.split('- ')[1:]]
assert all([tag in printed_tags for tag in expected])
assert all([tag in expected for tag in printed_tags])
assert str(sys_exit.value) == '0'
@pytest.mark.usefixtures("clear_log")
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_2,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_list_tests(self, capfd, test_path, expected):
args = ['-T', test_path, '--list-tests']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
printed_tests = [test.strip() for test in out.split('- ')[1:]]
printed_tests[-1] = printed_tests[-1].split('\n')[0]
assert all([test in printed_tests for test in expected])
assert all([test in expected for test in printed_tests])
assert str(sys_exit.value) == '0'
@pytest.mark.usefixtures("clear_log")
@pytest.mark.parametrize(
'test_path, expected',
TESTDATA_3,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_tree(self, capfd, test_path, expected):
args = ['-T', test_path, '--test-tree']
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
assert expected in out
assert str(sys_exit.value) == '0'

View file

@ -0,0 +1,169 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Blackbox tests for twister's command line functions
"""
import importlib
import mock
import os
import pytest
import re
import sys
from conftest import TEST_DATA, ZEPHYR_BASE, testsuite_filename_mock
from twisterlib.testplan import TestPlan
@mock.patch.object(TestPlan, 'TESTSUITE_FILENAME', testsuite_filename_mock)
class TestQEMU:
TESTDATA_1 = [
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'agnostic'),
['qemu_x86', 'qemu_x86_64', 'frdm_k64f'],
{
'selected_test_scenarios': 3,
'selected_test_instances': 9,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
'passed_configurations': 6,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 10,
'skipped_test_cases': 5,
'platform_count': 3,
'executed_on_platform': 4,
'only_built': 2
}
),
(
os.path.join(TEST_DATA, 'tests', 'dummy', 'device'),
['qemu_x86', 'qemu_x86_64', 'frdm_k64f'],
{
'selected_test_scenarios': 1,
'selected_test_instances': 3,
'skipped_configurations': 3,
'skipped_by_static_filter': 3,
'skipped_at_runtime': 0,
'passed_configurations': 0,
'failed_configurations': 0,
'errored_configurations': 0,
'executed_test_cases': 0,
'skipped_test_cases': 3,
'platform_count': 3,
'executed_on_platform': 0,
'only_built': 0
}
),
]
@classmethod
def setup_class(cls):
apath = os.path.join(ZEPHYR_BASE, 'scripts', 'twister')
cls.loader = importlib.machinery.SourceFileLoader('__main__', apath)
cls.spec = importlib.util.spec_from_loader(cls.loader.name, cls.loader)
cls.twister_module = importlib.util.module_from_spec(cls.spec)
@classmethod
def teardown_class(cls):
pass
@pytest.mark.usefixtures("clear_log")
@pytest.mark.parametrize(
'test_path, test_platforms, expected',
TESTDATA_1,
ids=[
'tests/dummy/agnostic',
'tests/dummy/device',
]
)
def test_emulation_only(self, capfd, test_path, test_platforms, expected):
args = ['-T', test_path, '--emulation-only'] + \
[val for pair in zip(
['-p'] * len(test_platforms), test_platforms
) for val in pair]
with mock.patch.object(sys, 'argv', [sys.argv[0]] + args), \
pytest.raises(SystemExit) as sys_exit:
self.loader.exec_module(self.twister_module)
select_regex = r'^INFO - (?P<test_scenarios>[0-9]+) test scenarios' \
r' \((?P<test_instances>[0-9]+) test instances\) selected,' \
r' (?P<skipped_configurations>[0-9]+) configurations skipped' \
r' \((?P<skipped_by_static_filter>[0-9]+) by static filter,' \
r' (?P<skipped_at_runtime>[0-9]+) at runtime\)\.$'
pass_regex = r'^INFO - (?P<passed_configurations>[0-9]+) of' \
r' (?P<test_instances>[0-9]+) test configurations passed' \
r' \([0-9]+\.[0-9]+%\), (?P<failed_configurations>[0-9]+) failed,' \
r' (?P<errored_configurations>[0-9]+) errored,' \
r' (?P<skipped_configurations>[0-9]+) skipped with' \
r' [0-9]+ warnings in [0-9]+\.[0-9]+ seconds$'
case_regex = r'^INFO - In total (?P<executed_test_cases>[0-9]+)' \
r' test cases were executed, (?P<skipped_test_cases>[0-9]+) skipped' \
r' on (?P<platform_count>[0-9]+) out of total [0-9]+ platforms' \
r' \([0-9]+\.[0-9]+%\)$'
built_regex = r'^INFO - (?P<executed_on_platform>[0-9]+)' \
r' test configurations executed on platforms, (?P<only_built>[0-9]+)' \
r' test configurations were only built.$'
out, err = capfd.readouterr()
sys.stdout.write(out)
sys.stderr.write(err)
select_search = re.search(select_regex, err, re.MULTILINE)
assert select_search
assert int(select_search.group('test_scenarios')) == \
expected['selected_test_scenarios']
assert int(select_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(select_search.group('skipped_configurations')) == \
expected['skipped_configurations']
assert int(select_search.group('skipped_by_static_filter')) == \
expected['skipped_by_static_filter']
assert int(select_search.group('skipped_at_runtime')) == \
expected['skipped_at_runtime']
pass_search = re.search(pass_regex, err, re.MULTILINE)
assert pass_search
assert int(pass_search.group('passed_configurations')) == \
expected['passed_configurations']
assert int(pass_search.group('test_instances')) == \
expected['selected_test_instances']
assert int(pass_search.group('failed_configurations')) == \
expected['failed_configurations']
assert int(pass_search.group('errored_configurations')) == \
expected['errored_configurations']
assert int(pass_search.group('skipped_configurations')) == \
expected['skipped_configurations']
case_search = re.search(case_regex, err, re.MULTILINE)
assert case_search
assert int(case_search.group('executed_test_cases')) == \
expected['executed_test_cases']
assert int(case_search.group('skipped_test_cases')) == \
expected['skipped_test_cases']
assert int(case_search.group('platform_count')) == \
expected['platform_count']
built_search = re.search(built_regex, err, re.MULTILINE)
assert built_search
assert int(built_search.group('executed_on_platform')) == \
expected['executed_on_platform']
assert int(built_search.group('only_built')) == \
expected['only_built']
assert str(sys_exit.value) == '0'