kunit: test: add test plan to KUnit TAP format
TAP 14 allows an optional test plan to be emitted before the start of the start of testing[1]; this is valuable because it makes it possible for a test harness to detect whether the number of tests run matches the number of tests expected to be run, ensuring that no tests silently failed. Link[1]: https://github.com/isaacs/testanything.github.io/blob/tap14/tap-version-14-specification.md#the-plan Signed-off-by: Brendan Higgins <brendanhiggins@google.com> Reviewed-by: Stephen Boyd <sboyd@kernel.org> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
parent
8c0d884986
commit
45dcbb6f5e
6 changed files with 82 additions and 25 deletions
|
@ -11,10 +11,27 @@ extern struct kunit_suite * const * const __kunit_suites_end[];
|
||||||
|
|
||||||
#if IS_BUILTIN(CONFIG_KUNIT)
|
#if IS_BUILTIN(CONFIG_KUNIT)
|
||||||
|
|
||||||
|
static void kunit_print_tap_header(void)
|
||||||
|
{
|
||||||
|
struct kunit_suite * const * const *suites, * const *subsuite;
|
||||||
|
int num_of_suites = 0;
|
||||||
|
|
||||||
|
for (suites = __kunit_suites_start;
|
||||||
|
suites < __kunit_suites_end;
|
||||||
|
suites++)
|
||||||
|
for (subsuite = *suites; *subsuite != NULL; subsuite++)
|
||||||
|
num_of_suites++;
|
||||||
|
|
||||||
|
pr_info("TAP version 14\n");
|
||||||
|
pr_info("1..%d\n", num_of_suites);
|
||||||
|
}
|
||||||
|
|
||||||
int kunit_run_all_tests(void)
|
int kunit_run_all_tests(void)
|
||||||
{
|
{
|
||||||
struct kunit_suite * const * const *suites;
|
struct kunit_suite * const * const *suites;
|
||||||
|
|
||||||
|
kunit_print_tap_header();
|
||||||
|
|
||||||
for (suites = __kunit_suites_start;
|
for (suites = __kunit_suites_start;
|
||||||
suites < __kunit_suites_end;
|
suites < __kunit_suites_end;
|
||||||
suites++)
|
suites++)
|
||||||
|
|
|
@ -20,16 +20,6 @@ static void kunit_set_failure(struct kunit *test)
|
||||||
WRITE_ONCE(test->success, false);
|
WRITE_ONCE(test->success, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kunit_print_tap_version(void)
|
|
||||||
{
|
|
||||||
static bool kunit_has_printed_tap_version;
|
|
||||||
|
|
||||||
if (!kunit_has_printed_tap_version) {
|
|
||||||
pr_info("TAP version 14\n");
|
|
||||||
kunit_has_printed_tap_version = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Append formatted message to log, size of which is limited to
|
* Append formatted message to log, size of which is limited to
|
||||||
* KUNIT_LOG_SIZE bytes (including null terminating byte).
|
* KUNIT_LOG_SIZE bytes (including null terminating byte).
|
||||||
|
@ -69,7 +59,6 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
|
||||||
|
|
||||||
static void kunit_print_subtest_start(struct kunit_suite *suite)
|
static void kunit_print_subtest_start(struct kunit_suite *suite)
|
||||||
{
|
{
|
||||||
kunit_print_tap_version();
|
|
||||||
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
|
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
|
||||||
suite->name);
|
suite->name);
|
||||||
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
|
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
|
||||||
|
|
|
@ -45,10 +45,11 @@ class TestStatus(Enum):
|
||||||
FAILURE = auto()
|
FAILURE = auto()
|
||||||
TEST_CRASHED = auto()
|
TEST_CRASHED = auto()
|
||||||
NO_TESTS = auto()
|
NO_TESTS = auto()
|
||||||
|
FAILURE_TO_PARSE_TESTS = auto()
|
||||||
|
|
||||||
kunit_start_re = re.compile(r'TAP version [0-9]+$')
|
kunit_start_re = re.compile(r'TAP version [0-9]+$')
|
||||||
kunit_end_re = re.compile('(List of all partitions:|'
|
kunit_end_re = re.compile('(List of all partitions:|'
|
||||||
'Kernel panic - not syncing: VFS:|reboot: System halted)')
|
'Kernel panic - not syncing: VFS:)')
|
||||||
|
|
||||||
def isolate_kunit_output(kernel_output):
|
def isolate_kunit_output(kernel_output):
|
||||||
started = False
|
started = False
|
||||||
|
@ -109,7 +110,7 @@ OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
|
||||||
|
|
||||||
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
|
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
|
||||||
|
|
||||||
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) [0-9]+ - (.*)$')
|
OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
|
||||||
|
|
||||||
def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
|
def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
|
||||||
save_non_diagnositic(lines, test_case)
|
save_non_diagnositic(lines, test_case)
|
||||||
|
@ -197,7 +198,9 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
|
||||||
else:
|
else:
|
||||||
return TestStatus.SUCCESS
|
return TestStatus.SUCCESS
|
||||||
|
|
||||||
def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite) -> bool:
|
def parse_ok_not_ok_test_suite(lines: List[str],
|
||||||
|
test_suite: TestSuite,
|
||||||
|
expected_suite_index: int) -> bool:
|
||||||
consume_non_diagnositic(lines)
|
consume_non_diagnositic(lines)
|
||||||
if not lines:
|
if not lines:
|
||||||
test_suite.status = TestStatus.TEST_CRASHED
|
test_suite.status = TestStatus.TEST_CRASHED
|
||||||
|
@ -210,6 +213,12 @@ def parse_ok_not_ok_test_suite(lines: List[str], test_suite: TestSuite) -> bool:
|
||||||
test_suite.status = TestStatus.SUCCESS
|
test_suite.status = TestStatus.SUCCESS
|
||||||
else:
|
else:
|
||||||
test_suite.status = TestStatus.FAILURE
|
test_suite.status = TestStatus.FAILURE
|
||||||
|
suite_index = int(match.group(2))
|
||||||
|
if suite_index != expected_suite_index:
|
||||||
|
print_with_timestamp(
|
||||||
|
red('[ERROR] ') + 'expected_suite_index ' +
|
||||||
|
str(expected_suite_index) + ', but got ' +
|
||||||
|
str(suite_index))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
@ -222,7 +231,7 @@ def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
|
||||||
max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
|
max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
|
||||||
return max_status(max_test_case_status, test_suite.status)
|
return max_status(max_test_case_status, test_suite.status)
|
||||||
|
|
||||||
def parse_test_suite(lines: List[str]) -> TestSuite:
|
def parse_test_suite(lines: List[str], expected_suite_index: int) -> TestSuite:
|
||||||
if not lines:
|
if not lines:
|
||||||
return None
|
return None
|
||||||
consume_non_diagnositic(lines)
|
consume_non_diagnositic(lines)
|
||||||
|
@ -241,7 +250,7 @@ def parse_test_suite(lines: List[str]) -> TestSuite:
|
||||||
break
|
break
|
||||||
test_suite.cases.append(test_case)
|
test_suite.cases.append(test_case)
|
||||||
expected_test_case_num -= 1
|
expected_test_case_num -= 1
|
||||||
if parse_ok_not_ok_test_suite(lines, test_suite):
|
if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index):
|
||||||
test_suite.status = bubble_up_test_case_errors(test_suite)
|
test_suite.status = bubble_up_test_case_errors(test_suite)
|
||||||
return test_suite
|
return test_suite
|
||||||
elif not lines:
|
elif not lines:
|
||||||
|
@ -261,6 +270,17 @@ def parse_tap_header(lines: List[str]) -> bool:
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
|
||||||
|
|
||||||
|
def parse_test_plan(lines: List[str]) -> int:
|
||||||
|
consume_non_diagnositic(lines)
|
||||||
|
match = TEST_PLAN.match(lines[0])
|
||||||
|
if match:
|
||||||
|
lines.pop(0)
|
||||||
|
return int(match.group(1))
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
|
def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
|
||||||
return bubble_up_errors(lambda x: x.status, test_suite_list)
|
return bubble_up_errors(lambda x: x.status, test_suite_list)
|
||||||
|
|
||||||
|
@ -268,20 +288,33 @@ def parse_test_result(lines: List[str]) -> TestResult:
|
||||||
consume_non_diagnositic(lines)
|
consume_non_diagnositic(lines)
|
||||||
if not lines or not parse_tap_header(lines):
|
if not lines or not parse_tap_header(lines):
|
||||||
return TestResult(TestStatus.NO_TESTS, [], lines)
|
return TestResult(TestStatus.NO_TESTS, [], lines)
|
||||||
|
expected_test_suite_num = parse_test_plan(lines)
|
||||||
|
if not expected_test_suite_num:
|
||||||
|
return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines)
|
||||||
test_suites = []
|
test_suites = []
|
||||||
test_suite = parse_test_suite(lines)
|
for i in range(1, expected_test_suite_num + 1):
|
||||||
while test_suite:
|
test_suite = parse_test_suite(lines, i)
|
||||||
|
if test_suite:
|
||||||
test_suites.append(test_suite)
|
test_suites.append(test_suite)
|
||||||
test_suite = parse_test_suite(lines)
|
else:
|
||||||
|
print_with_timestamp(
|
||||||
|
red('[ERROR] ') + ' expected ' +
|
||||||
|
str(expected_test_suite_num) +
|
||||||
|
' test suites, but got ' + str(i - 2))
|
||||||
|
break
|
||||||
|
test_suite = parse_test_suite(lines, -1)
|
||||||
|
if test_suite:
|
||||||
|
print_with_timestamp(red('[ERROR] ') +
|
||||||
|
'got unexpected test suite: ' + test_suite.name)
|
||||||
|
if test_suites:
|
||||||
return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
|
return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines)
|
||||||
|
else:
|
||||||
|
return TestResult(TestStatus.NO_TESTS, [], lines)
|
||||||
|
|
||||||
def parse_run_tests(kernel_output) -> TestResult:
|
def print_and_count_results(test_result: TestResult) -> None:
|
||||||
total_tests = 0
|
total_tests = 0
|
||||||
failed_tests = 0
|
failed_tests = 0
|
||||||
crashed_tests = 0
|
crashed_tests = 0
|
||||||
test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
|
|
||||||
if test_result.status == TestStatus.NO_TESTS:
|
|
||||||
print_with_timestamp(red('[ERROR] ') + 'no kunit output detected')
|
|
||||||
for test_suite in test_result.suites:
|
for test_suite in test_result.suites:
|
||||||
if test_suite.status == TestStatus.SUCCESS:
|
if test_suite.status == TestStatus.SUCCESS:
|
||||||
print_suite_divider(green('[PASSED] ') + test_suite.name)
|
print_suite_divider(green('[PASSED] ') + test_suite.name)
|
||||||
|
@ -303,6 +336,21 @@ def parse_run_tests(kernel_output) -> TestResult:
|
||||||
print_with_timestamp(red('[FAILED] ') + test_case.name)
|
print_with_timestamp(red('[FAILED] ') + test_case.name)
|
||||||
print_log(map(yellow, test_case.log))
|
print_log(map(yellow, test_case.log))
|
||||||
print_with_timestamp('')
|
print_with_timestamp('')
|
||||||
|
return total_tests, failed_tests, crashed_tests
|
||||||
|
|
||||||
|
def parse_run_tests(kernel_output) -> TestResult:
|
||||||
|
total_tests = 0
|
||||||
|
failed_tests = 0
|
||||||
|
crashed_tests = 0
|
||||||
|
test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
|
||||||
|
if test_result.status == TestStatus.NO_TESTS:
|
||||||
|
print(red('[ERROR] ') + yellow('no tests run!'))
|
||||||
|
elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
|
||||||
|
print(red('[ERROR] ') + yellow('could not parse test results!'))
|
||||||
|
else:
|
||||||
|
(total_tests,
|
||||||
|
failed_tests,
|
||||||
|
crashed_tests) = print_and_count_results(test_result)
|
||||||
print_with_timestamp(DIVIDER)
|
print_with_timestamp(DIVIDER)
|
||||||
fmt = green if test_result.status == TestStatus.SUCCESS else red
|
fmt = green if test_result.status == TestStatus.SUCCESS else red
|
||||||
print_with_timestamp(
|
print_with_timestamp(
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
TAP version 14
|
TAP version 14
|
||||||
|
1..2
|
||||||
# Subtest: sysctl_test
|
# Subtest: sysctl_test
|
||||||
1..8
|
1..8
|
||||||
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
printk: console [tty0] enabled
|
printk: console [tty0] enabled
|
||||||
printk: console [mc-1] enabled
|
printk: console [mc-1] enabled
|
||||||
TAP version 14
|
TAP version 14
|
||||||
|
1..2
|
||||||
# Subtest: sysctl_test
|
# Subtest: sysctl_test
|
||||||
1..8
|
1..8
|
||||||
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
TAP version 14
|
TAP version 14
|
||||||
|
1..2
|
||||||
# Subtest: sysctl_test
|
# Subtest: sysctl_test
|
||||||
1..8
|
1..8
|
||||||
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
# sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
|
||||||
|
|
Loading…
Add table
Reference in a new issue