scripts: tests: sanitycheck: Add testcases for TestCase class
test_testinstance.py: Add testcases to scan file and path for sub testcases test_data/testcases: Also added the ztest test files test_testsuite_class.py: changed get_all_tests() to match count of sub testcases in ztest files Signed-off-by: Spoorthy Priya Yerabolu <spoorthy.priya.yerabolu@intel.com>
This commit is contained in:
parent
ad4d4fc7d1
commit
473ed3412f
5 changed files with 122 additions and 7 deletions
|
@ -0,0 +1,27 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020 Intel Corporation
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
static void test_1b(void)
|
||||||
|
{
|
||||||
|
ztest_test_skip();
|
||||||
|
}
|
||||||
|
|
||||||
|
void test_main(void)
|
||||||
|
{
|
||||||
|
#ifdef TEST_feature1
|
||||||
|
ztest_test_suite(feature1,
|
||||||
|
ztest_unit_test(1a), ztest_unit_test(test_1b),
|
||||||
|
ztest_unit_test(test_1c)
|
||||||
|
);
|
||||||
|
#endif
|
||||||
|
#ifdef TEST_feature2
|
||||||
|
ztest_test_suite(feature2,
|
||||||
|
ztest_unit_test(test_2a),
|
||||||
|
ztest_unit_test(test_2b)
|
||||||
|
);
|
||||||
|
ztest_run_test_suite(feature2);
|
||||||
|
#endif
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020 Intel Corporation
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
ztest_test_suite(feature3,
|
||||||
|
ztest_unit_test(test_unit_1a),
|
||||||
|
#ifdef CONFIG_WHATEVER
|
||||||
|
ztest_unit_test(test_unit_1b),
|
||||||
|
#endif
|
||||||
|
ztest_unit_test(test_Unit_1c)
|
||||||
|
);
|
||||||
|
ztest_run_test_suite(feature3);
|
|
@ -0,0 +1,16 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020 Intel Corporation
|
||||||
|
*
|
||||||
|
* SPDX-License-Identifier: Apache-2.0
|
||||||
|
*/
|
||||||
|
ztest_test_suite(test_api,
|
||||||
|
ztest_1cpu_unit_test(test_a) /* comment! */,
|
||||||
|
/* comment */ztest_1cpu_unit_test(test_b),
|
||||||
|
ztest_1cpu_unit_test(test_c),
|
||||||
|
ztest_unit_test(test_unit_a), ztest_unit_test(test_unit_b),
|
||||||
|
ztest_1cpu_unit_test(
|
||||||
|
test_newline),
|
||||||
|
ztest_1cpu_unit_test(test_test_test_aa),
|
||||||
|
ztest_user_unit_test(test_user),
|
||||||
|
ztest_1cpu_unit_test(test_last));
|
||||||
|
ztest_run_test_suite(test_api);
|
|
@ -13,7 +13,7 @@ import pytest
|
||||||
|
|
||||||
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
|
||||||
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
|
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
|
||||||
from sanitylib import TestInstance, BuildError
|
from sanitylib import TestInstance, BuildError, TestCase, SanityCheckException
|
||||||
|
|
||||||
|
|
||||||
TESTDATA_1 = [
|
TESTDATA_1 = [
|
||||||
|
@ -62,7 +62,7 @@ TESTDATA_2 = [
|
||||||
]
|
]
|
||||||
|
|
||||||
@pytest.mark.parametrize("enable_asan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
|
@pytest.mark.parametrize("enable_asan, enable_coverage, coverage_platform, platform_type, expected_content", TESTDATA_2)
|
||||||
def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, test_data, enable_asan, enable_coverage, coverage_platform, platform_type, expected_content):
|
def test_create_overlay(class_testsuite, all_testcases_dict, platforms_list, enable_asan, enable_coverage, coverage_platform, platform_type, expected_content):
|
||||||
"""Test correct content is written to testcase_extra.conf based on if conditions
|
"""Test correct content is written to testcase_extra.conf based on if conditions
|
||||||
TO DO: Add extra_configs to the input list"""
|
TO DO: Add extra_configs to the input list"""
|
||||||
class_testsuite.testcases = all_testcases_dict
|
class_testsuite.testcases = all_testcases_dict
|
||||||
|
@ -84,3 +84,63 @@ def test_calculate_sizes(class_testsuite, all_testcases_dict, platforms_list):
|
||||||
|
|
||||||
with pytest.raises(BuildError):
|
with pytest.raises(BuildError):
|
||||||
assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
|
assert testinstance.calculate_sizes() == "Missing/multiple output ELF binary"
|
||||||
|
|
||||||
|
TESTDATA_3 = [
|
||||||
|
(ZEPHYR_BASE + '/scripts/tests/sanitycheck/test_data/testcases', ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1', '/scripts/tests/sanitycheck/test_data/testcases/tests/test_a/test_a.check_1'),
|
||||||
|
(ZEPHYR_BASE, '.', 'test_a.check_1', 'test_a.check_1'),
|
||||||
|
(ZEPHYR_BASE, '/scripts/tests/sanitycheck/test_data/testcases/test_b', 'test_b.check_1', '/scripts/tests/sanitycheck/test_data/testcases/test_b/test_b.check_1'),
|
||||||
|
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', 'test_b.check_1', 'test_b.check_1'),
|
||||||
|
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '.', '.'),
|
||||||
|
(ZEPHYR_BASE, '.', 'test_a.check_1.check_2', 'test_a.check_1.check_2'),
|
||||||
|
]
|
||||||
|
@pytest.mark.parametrize("testcase_root, workdir, name, expected", TESTDATA_3)
|
||||||
|
def test_get_unique(testcase_root, workdir, name, expected):
|
||||||
|
'''Test to check if the unique name is given for each testcase root and workdir'''
|
||||||
|
unique = TestCase(testcase_root, workdir, name)
|
||||||
|
assert unique.name == expected
|
||||||
|
|
||||||
|
TESTDATA_4 = [
|
||||||
|
(ZEPHYR_BASE, '.', 'test_c', 'Tests should reference the category and subsystem with a dot as a separator.'),
|
||||||
|
(os.path.join(ZEPHYR_BASE, '/scripts/tests'), '.', '', 'Tests should reference the category and subsystem with a dot as a separator.'),
|
||||||
|
]
|
||||||
|
@pytest.mark.parametrize("testcase_root, workdir, name, exception", TESTDATA_4)
|
||||||
|
def test_get_unique_exception(testcase_root, workdir, name, exception):
|
||||||
|
'''Test to check if tests reference the category and subsystem with a dot as a separator'''
|
||||||
|
|
||||||
|
with pytest.raises(SanityCheckException):
|
||||||
|
unique = TestCase(testcase_root, workdir, name)
|
||||||
|
assert unique == exception
|
||||||
|
|
||||||
|
TESTDATA_5 = [
|
||||||
|
("testcases/tests/test_ztest.c", None, ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
|
||||||
|
("testcases/tests/test_a/test_ztest_error.c", "Found a test that does not start with test_", ['1a', '1c', '2a', '2b']),
|
||||||
|
("testcases/tests/test_a/test_ztest_error_1.c", "found invalid #ifdef, #endif in ztest_test_suite()", ['unit_1a', 'unit_1b', 'Unit_1c']),
|
||||||
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("test_file, expected_warnings, expected_subcases", TESTDATA_5)
|
||||||
|
def test_scan_file(test_data, test_file, expected_warnings, expected_subcases):
|
||||||
|
'''Testing scan_file method with different ztest files for warnings and results'''
|
||||||
|
|
||||||
|
testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
|
||||||
|
|
||||||
|
results, warnings = testcase.scan_file(os.path.join(test_data, test_file))
|
||||||
|
assert sorted(results) == sorted(expected_subcases)
|
||||||
|
assert warnings == expected_warnings
|
||||||
|
|
||||||
|
|
||||||
|
TESTDATA_6 = [
|
||||||
|
("testcases/tests", ['a', 'c', 'unit_a', 'newline', 'aa', 'user', 'last']),
|
||||||
|
("testcases/tests/test_a", ['unit_1a', 'unit_1b', 'Unit_1c', '1a', '1c', '2a', '2b']),
|
||||||
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("test_path, expected_subcases", TESTDATA_6)
|
||||||
|
def test_subcases(test_data, test_path, expected_subcases):
|
||||||
|
'''Testing scan path and parse subcases methods for expected subcases'''
|
||||||
|
testcase = TestCase("/scripts/tests/sanitycheck/test_data/testcases/tests", ".", "test_a.check_1")
|
||||||
|
|
||||||
|
subcases = testcase.scan_path(os.path.join(test_data, test_path))
|
||||||
|
assert sorted(subcases) == sorted(expected_subcases)
|
||||||
|
|
||||||
|
testcase.id = "test_id"
|
||||||
|
testcase.parse_subcases(test_data + test_path)
|
||||||
|
assert sorted(testcase.cases) == [testcase.id + '.' + x for x in sorted(expected_subcases)]
|
||||||
|
|
|
@ -58,11 +58,9 @@ def test_add_configurations(test_data, class_testsuite, board_root_dir):
|
||||||
def test_get_all_testcases(class_testsuite, all_testcases_dict):
|
def test_get_all_testcases(class_testsuite, all_testcases_dict):
|
||||||
""" Testing get_all_testcases function of TestSuite class in Sanitycheck """
|
""" Testing get_all_testcases function of TestSuite class in Sanitycheck """
|
||||||
class_testsuite.testcases = all_testcases_dict
|
class_testsuite.testcases = all_testcases_dict
|
||||||
expected_tests = ['test_b.check_1', 'test_b.check_2',
|
expected_tests = ['sample_test.app', 'test_a.check_1.1a', 'test_a.check_1.1c',
|
||||||
'test_c.check_1', 'test_c.check_2',
|
'test_a.check_1.2a', 'test_a.check_1.2b', 'test_a.check_1.Unit_1c', 'test_a.check_1.unit_1a', 'test_a.check_1.unit_1b', 'test_a.check_2.1a', 'test_a.check_2.1c', 'test_a.check_2.2a', 'test_a.check_2.2b', 'test_a.check_2.Unit_1c', 'test_a.check_2.unit_1a', 'test_a.check_2.unit_1b', 'test_b.check_1', 'test_b.check_2', 'test_c.check_1', 'test_c.check_2']
|
||||||
'test_a.check_1', 'test_a.check_2',
|
assert len(class_testsuite.get_all_tests()) == 19
|
||||||
'sample_test.app']
|
|
||||||
assert len(class_testsuite.get_all_tests()) == 7
|
|
||||||
assert sorted(class_testsuite.get_all_tests()) == sorted(expected_tests)
|
assert sorted(class_testsuite.get_all_tests()) == sorted(expected_tests)
|
||||||
|
|
||||||
def test_get_toolchain(class_testsuite, monkeypatch, capsys):
|
def test_get_toolchain(class_testsuite, monkeypatch, capsys):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue