linux-stable/tools/testing/kunit/kunit.py

592 lines
20 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
#
# A thin wrapper on top of the KUnit Kernel
#
# Copyright (C) 2019, Google LLC.
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
import argparse
import os
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
import re
import shlex
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
import sys
import time
assert sys.version_info >= (3, 7), "Python version is too old"
from dataclasses import dataclass
from enum import Enum, auto
from typing import Iterable, List, Optional, Sequence, Tuple
import kunit_json
import kunit_kernel
import kunit_parser
from kunit_printer import stdout
class KunitStatus(Enum):
SUCCESS = auto()
CONFIG_FAILURE = auto()
BUILD_FAILURE = auto()
TEST_FAILURE = auto()
@dataclass
class KunitResult:
status: KunitStatus
elapsed_time: float
@dataclass
class KunitConfigRequest:
build_dir: str
make_options: Optional[List[str]]
@dataclass
class KunitBuildRequest(KunitConfigRequest):
jobs: int
@dataclass
class KunitParseRequest:
raw_output: Optional[str]
json: Optional[str]
@dataclass
class KunitExecRequest(KunitParseRequest):
build_dir: str
timeout: int
filter_glob: str
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
filter: str
filter_action: Optional[str]
kernel_args: Optional[List[str]]
run_isolated: Optional[str]
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
list_tests: bool
list_tests_attr: bool
@dataclass
class KunitRequest(KunitExecRequest, KunitBuildRequest):
pass
def get_kernel_root_path() -> str:
path = sys.argv[0] if not __file__ else __file__
parts = os.path.realpath(path).split('tools/testing/kunit')
if len(parts) != 2:
sys.exit(1)
return parts[0]
def config_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitConfigRequest) -> KunitResult:
stdout.print_with_timestamp('Configuring KUnit Kernel ...')
config_start = time.time()
success = linux.build_reconfig(request.build_dir, request.make_options)
config_end = time.time()
status = KunitStatus.SUCCESS if success else KunitStatus.CONFIG_FAILURE
return KunitResult(status, config_end - config_start)
def build_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitBuildRequest) -> KunitResult:
stdout.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
success = linux.build_kernel(request.jobs,
request.build_dir,
request.make_options)
build_end = time.time()
status = KunitStatus.SUCCESS if success else KunitStatus.BUILD_FAILURE
return KunitResult(status, build_end - build_start)
def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitBuildRequest) -> KunitResult:
config_result = config_tests(linux, request)
if config_result.status != KunitStatus.SUCCESS:
return config_result
return build_tests(linux, request)
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
args = ['kunit.action=list']
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
timeout=request.timeout,
filter_glob=request.filter_glob,
filter=request.filter,
filter_action=request.filter_action,
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
# Hack! Drop the dummy TAP version header that the executor prints out.
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]:
args = ['kunit.action=list_attr']
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
timeout=request.timeout,
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
filter_glob=request.filter_glob,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
filter=request.filter,
filter_action=request.filter_action,
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
# Hack! Drop the dummy TAP version header that the executor prints out.
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
return lines
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
def _suites_from_test_list(tests: List[str]) -> List[str]:
"""Extracts all the suites from an ordered list of tests."""
suites = [] # type: List[str]
for t in tests:
parts = t.split('.', maxsplit=2)
if len(parts) != 2:
raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"')
suite, _ = parts
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
if not suites or suites[-1] != suite:
suites.append(suite)
return suites
def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
filter_globs = [request.filter_glob]
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
if request.list_tests:
output = _list_tests(linux, request)
for line in output:
print(line.rstrip())
return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
if request.list_tests_attr:
attr_output = _list_tests_attr(linux, request)
for line in attr_output:
print(line.rstrip())
return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
if request.run_isolated:
tests = _list_tests(linux, request)
if request.run_isolated == 'test':
filter_globs = tests
elif request.run_isolated == 'suite':
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
filter_globs = _suites_from_test_list(tests)
# Apply the test-part of the user's glob, if present.
if '.' in request.filter_glob:
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig')
kunit: tool: improve compatibility of kunit_parser with KTAP specification Update to kunit_parser to improve compatibility with KTAP specification including arbitrarily nested tests. Patch accomplishes three major changes: - Use a general Test object to represent all tests rather than TestCase and TestSuite objects. This allows for easier implementation of arbitrary levels of nested tests and promotes the idea that both test suites and test cases are tests. - Print errors incrementally rather than all at once after the parsing finishes to maximize information given to the user in the case of the parser given invalid input and to increase the helpfulness of the timestamps given during printing. Note that kunit.py parse does not print incrementally yet. However, this fix brings us closer to this feature. - Increase compatibility for different formats of input. Arbitrary levels of nested tests supported. Also, test cases and test suites are now supported to be present on the same level of testing. This patch now implements the draft KTAP specification here: https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/ We'll update the parser as the spec evolves. This patch adjusts the kunit_tool_test.py file to check for the correct outputs from the new parser and adds a new test to check the parsing for a KTAP result log with correct format for multiple nested subtests (test_is_test_passed-all_passed_nested.log). This patch also alters the kunit_json.py file to allow for arbitrarily nested tests. Signed-off-by: Rae Moar <rmoar@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-11 21:50:37 +00:00
test_counts = kunit_parser.TestCounts()
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
test_start = time.time()
run_result = linux.run_kernel(
args=request.kernel_args,
timeout=request.timeout,
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
filter_glob=filter_glob,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
filter=request.filter,
filter_action=request.filter_action,
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
build_dir=request.build_dir)
_, test_result = parse_tests(request, metadata, run_result)
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
# run_kernel() doesn't block on the kernel exiting.
# That only happens after we get the last line of output from `run_result`.
# So exec_time here actually contains parsing + execution time, which is fine.
test_end = time.time()
exec_time += test_end - test_start
test_counts.add_subtest_counts(test_result.counts)
kunit: tool: suggest using decode_stacktrace.sh on kernel crash kunit.py isn't very clear that 1) it stashes a copy of the unparsed output in $BUILD_DIR/test.log 2) it sets $BUILD_DIR=.kunit by default So it's trickier than it should be for a user to come up with the right command to do so. Make kunit.py print out a command for this if a) we saw a test case crash b) we only ran one kernel (test.log only contains output from the last) Example suggested command: $ scripts/decode_stacktrace.sh .kunit/vmlinux .kunit < .kunit/test.log | tee .kunit/decoded.log | ./tools/testing/kunit/kunit.py parse Without debug info a user might see something like [14:11:25] Call Trace: [14:11:25] ? kunit_binary_assert_format (:?) [14:11:25] kunit_try_run_case (test.c:?) [14:11:25] ? __kthread_parkme (kthread.c:?) [14:11:25] kunit_generic_run_threadfn_adapter (try-catch.c:?) [14:11:25] ? kunit_generic_run_threadfn_adapter (try-catch.c:?) [14:11:25] kthread (kthread.c:?) [14:11:25] new_thread_handler (:?) [14:11:25] [CRASHED] `tee` is in GNU coreutils, so it seems fine to add that into the pipeline by default, that way users can inspect the otuput in more detail. Note: to turn on debug info, users would need to do something like $ echo -e 'CONFIG_DEBUG_KERNEL=y\nCONFIG_DEBUG_INFO=y' >> .kunit/.kunitconfig $ ./tools/testing/kunit/kunit.py config $ ./tools/testing/kunit/kunit.py build $ <then run decode_stacktrace.sh now vmlinux is updated> This feels too clunky to include in the instructions. With --kconfig_add [1], it would become a bit less painful. [1] https://lore.kernel.org/linux-kselftest/20211106013058.2621799-2-dlatypov@google.com/ Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-11-20 03:24:01 +00:00
if len(filter_globs) == 1 and test_counts.crashed > 0:
bd = request.build_dir
print('The kernel seems to have crashed; you can decode the stack traces with:')
print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format(
bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0]))
kunit: tool: improve compatibility of kunit_parser with KTAP specification Update to kunit_parser to improve compatibility with KTAP specification including arbitrarily nested tests. Patch accomplishes three major changes: - Use a general Test object to represent all tests rather than TestCase and TestSuite objects. This allows for easier implementation of arbitrary levels of nested tests and promotes the idea that both test suites and test cases are tests. - Print errors incrementally rather than all at once after the parsing finishes to maximize information given to the user in the case of the parser given invalid input and to increase the helpfulness of the timestamps given during printing. Note that kunit.py parse does not print incrementally yet. However, this fix brings us closer to this feature. - Increase compatibility for different formats of input. Arbitrary levels of nested tests supported. Also, test cases and test suites are now supported to be present on the same level of testing. This patch now implements the draft KTAP specification here: https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/ We'll update the parser as the spec evolves. This patch adjusts the kunit_tool_test.py file to check for the correct outputs from the new parser and adds a new test to check the parsing for a KTAP result log with correct format for multiple nested subtests (test_is_test_passed-all_passed_nested.log). This patch also alters the kunit_json.py file to allow for arbitrarily nested tests. Signed-off-by: Rae Moar <rmoar@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-11 21:50:37 +00:00
kunit_status = _map_to_overall_status(test_counts.get_status())
return KunitResult(status=kunit_status, elapsed_time=exec_time)
kunit: tool: improve compatibility of kunit_parser with KTAP specification Update to kunit_parser to improve compatibility with KTAP specification including arbitrarily nested tests. Patch accomplishes three major changes: - Use a general Test object to represent all tests rather than TestCase and TestSuite objects. This allows for easier implementation of arbitrary levels of nested tests and promotes the idea that both test suites and test cases are tests. - Print errors incrementally rather than all at once after the parsing finishes to maximize information given to the user in the case of the parser given invalid input and to increase the helpfulness of the timestamps given during printing. Note that kunit.py parse does not print incrementally yet. However, this fix brings us closer to this feature. - Increase compatibility for different formats of input. Arbitrary levels of nested tests supported. Also, test cases and test suites are now supported to be present on the same level of testing. This patch now implements the draft KTAP specification here: https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/ We'll update the parser as the spec evolves. This patch adjusts the kunit_tool_test.py file to check for the correct outputs from the new parser and adds a new test to check the parsing for a KTAP result log with correct format for multiple nested subtests (test_is_test_passed-all_passed_nested.log). This patch also alters the kunit_json.py file to allow for arbitrarily nested tests. Signed-off-by: Rae Moar <rmoar@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-11 21:50:37 +00:00
def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
return KunitStatus.SUCCESS
return KunitStatus.TEST_FAILURE
def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
parse_start = time.time()
if request.raw_output:
kunit: tool: improve compatibility of kunit_parser with KTAP specification Update to kunit_parser to improve compatibility with KTAP specification including arbitrarily nested tests. Patch accomplishes three major changes: - Use a general Test object to represent all tests rather than TestCase and TestSuite objects. This allows for easier implementation of arbitrary levels of nested tests and promotes the idea that both test suites and test cases are tests. - Print errors incrementally rather than all at once after the parsing finishes to maximize information given to the user in the case of the parser given invalid input and to increase the helpfulness of the timestamps given during printing. Note that kunit.py parse does not print incrementally yet. However, this fix brings us closer to this feature. - Increase compatibility for different formats of input. Arbitrary levels of nested tests supported. Also, test cases and test suites are now supported to be present on the same level of testing. This patch now implements the draft KTAP specification here: https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/ We'll update the parser as the spec evolves. This patch adjusts the kunit_tool_test.py file to check for the correct outputs from the new parser and adds a new test to check the parsing for a KTAP result log with correct format for multiple nested subtests (test_is_test_passed-all_passed_nested.log). This patch also alters the kunit_json.py file to allow for arbitrarily nested tests. Signed-off-by: Rae Moar <rmoar@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-11 21:50:37 +00:00
# Treat unparsed results as one passing test.
fake_test = kunit_parser.Test()
fake_test.status = kunit_parser.TestStatus.SUCCESS
fake_test.counts.passed = 1
kunit: tool: improve compatibility of kunit_parser with KTAP specification Update to kunit_parser to improve compatibility with KTAP specification including arbitrarily nested tests. Patch accomplishes three major changes: - Use a general Test object to represent all tests rather than TestCase and TestSuite objects. This allows for easier implementation of arbitrary levels of nested tests and promotes the idea that both test suites and test cases are tests. - Print errors incrementally rather than all at once after the parsing finishes to maximize information given to the user in the case of the parser given invalid input and to increase the helpfulness of the timestamps given during printing. Note that kunit.py parse does not print incrementally yet. However, this fix brings us closer to this feature. - Increase compatibility for different formats of input. Arbitrary levels of nested tests supported. Also, test cases and test suites are now supported to be present on the same level of testing. This patch now implements the draft KTAP specification here: https://lore.kernel.org/linux-kselftest/CA+GJov6tdjvY9x12JsJT14qn6c7NViJxqaJk+r-K1YJzPggFDQ@mail.gmail.com/ We'll update the parser as the spec evolves. This patch adjusts the kunit_tool_test.py file to check for the correct outputs from the new parser and adds a new test to check the parsing for a KTAP result log with correct format for multiple nested subtests (test_is_test_passed-all_passed_nested.log). This patch also alters the kunit_json.py file to allow for arbitrarily nested tests. Signed-off-by: Rae Moar <rmoar@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-10-11 21:50:37 +00:00
output: Iterable[str] = input_data
if request.raw_output == 'all':
pass
elif request.raw_output == 'kunit':
kunit: tool: make parser preserve whitespace when printing test log Currently, kunit_parser.py is stripping all leading whitespace to make parsing easier. But this means we can't accurately show kernel output for failing tests or when the kernel crashes. Embarassingly, this affects even KUnit's own output, e.g. [13:40:46] Expected 2 + 1 == 2, but [13:40:46] 2 + 1 == 3 (0x3) [13:40:46] not ok 1 example_simple_test [13:40:46] [FAILED] example_simple_test After this change, here's what the output in context would look like [13:40:46] =================== example (4 subtests) =================== [13:40:46] # example_simple_test: initializing [13:40:46] # example_simple_test: EXPECTATION FAILED at lib/kunit/kunit-example-test.c:29 [13:40:46] Expected 2 + 1 == 2, but [13:40:46] 2 + 1 == 3 (0x3) [13:40:46] [FAILED] example_simple_test [13:40:46] [SKIPPED] example_skip_test [13:40:46] [SKIPPED] example_mark_skipped_test [13:40:46] [PASSED] example_all_expect_macros_test [13:40:46] # example: initializing suite [13:40:46] # example: pass:1 fail:1 skip:2 total:4 [13:40:46] # Totals: pass:1 fail:1 skip:2 total:4 [13:40:46] ===================== [FAILED] example ===================== This example shows one minor cosmetic defect this approach has. The test counts lines prevent us from dedenting the suite-level output. But at the same time, any form of non-KUnit output would do the same unless it happened to be indented as well. Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-11-30 18:54:19 +00:00
output = kunit_parser.extract_tap_lines(output)
for line in output:
print(line.rstrip())
parse_time = time.time() - parse_start
return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
# Actually parse the test results.
test = kunit_parser.parse_run_tests(input_data)
parse_time = time.time() - parse_start
if request.json:
json_str = kunit_json.get_json_result(
test=test,
metadata=metadata)
if request.json == 'stdout':
print(json_str)
else:
with open(request.json, 'w') as f:
f.write(json_str)
stdout.print_with_timestamp("Test results stored in %s" %
os.path.abspath(request.json))
if test.status != kunit_parser.TestStatus.SUCCESS:
return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test
return KunitResult(KunitStatus.SUCCESS, parse_time), test
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
run_start = time.time()
config_result = config_tests(linux, request)
if config_result.status != KunitStatus.SUCCESS:
return config_result
build_result = build_tests(linux, request)
if build_result.status != KunitStatus.SUCCESS:
return build_result
exec_result = exec_tests(linux, request)
run_end = time.time()
stdout.print_with_timestamp((
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
'building, %.3fs running\n') % (
run_end - run_start,
config_result.elapsed_time,
build_result.elapsed_time,
exec_result.elapsed_time))
return exec_result
# Problem:
# $ kunit.py run --json
# works as one would expect and prints the parsed test results as JSON.
# $ kunit.py run --json suite_name
# would *not* pass suite_name as the filter_glob and print as json.
# argparse will consider it to be another way of writing
# $ kunit.py run --json=suite_name
# i.e. it would run all tests, and dump the json to a `suite_name` file.
# So we hackily automatically rewrite --json => --json=stdout
pseudo_bool_flag_defaults = {
'--json': 'stdout',
'--raw_output': 'kunit',
}
def massage_argv(argv: Sequence[str]) -> Sequence[str]:
def massage_arg(arg: str) -> str:
if arg not in pseudo_bool_flag_defaults:
return arg
return f'{arg}={pseudo_bool_flag_defaults[arg]}'
return list(map(massage_arg, argv))
def get_default_jobs() -> int:
return len(os.sched_getaffinity(0))
def add_common_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
type=str, default='.kunit', metavar='DIR')
parser.add_argument('--make_options',
help='X=Y make option, can be repeated.',
action='append', metavar='X=Y')
parser.add_argument('--alltests',
help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config',
action='store_true')
parser.add_argument('--kunitconfig',
help='Path to Kconfig fragment that enables KUnit tests.'
' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
kunit: tool: make --kunitconfig repeatable, blindly concat It's come up a few times that it would be useful to have --kunitconfig be repeatable [1][2]. This could be done before with a bit of shell-fu, e.g. $ find fs/ -name '.kunitconfig' -exec cat {} + | \ ./tools/testing/kunit/kunit.py run --kunitconfig=/dev/stdin or equivalently: $ cat fs/ext4/.kunitconfig fs/fat/.kunitconfig | \ ./tools/testing/kunit/kunit.py run --kunitconfig=/dev/stdin But this can be fairly clunky to use in practice. And having explicit support in kunit.py opens the door to having more config fragments of interest, e.g. options for PCI on UML [1], UML coverage [2], variants of tests [3]. There's another argument to be made that users can just use multiple --kconfig_add's, but this gets very clunky very fast (e.g. [2]). Note: there's a big caveat here that some kconfig options might be incompatible. We try to give a clearish error message in the simple case where the same option appears multiple times with conflicting values, but more subtle ones (e.g. mutually exclusive options) will be potentially very confusing for the user. I don't know we can do better. Note 2: if you want to combine a --kunitconfig with the default, you either have to do to specify the current build_dir > --kunitconfig=.kunit --kunitconfig=additional.config or > --kunitconfig=tools/testing/kunit/configs/default.config --kunitconifg=additional.config each of which have their downsides (former depends on --build_dir, doesn't work if you don't have a .kunitconfig yet), etc. Example with conflicting values: > $ ./tools/testing/kunit/kunit.py config --kunitconfig=lib/kunit --kunitconfig=/dev/stdin <<EOF > CONFIG_KUNIT_TEST=n > CONFIG_KUNIT=m > EOF > ... > kunit_kernel.ConfigError: Multiple values specified for 2 options in kunitconfig: > CONFIG_KUNIT=y > vs from /dev/stdin > CONFIG_KUNIT=m > > CONFIG_KUNIT_TEST=y > vs from /dev/stdin > # CONFIG_KUNIT_TEST is not set [1] https://lists.freedesktop.org/archives/dri-devel/2022-June/357616.html [2] https://lore.kernel.org/linux-kselftest/CAFd5g45f3X3xF2vz2BkTHRqOC4uW6GZxtUUMaP5mwwbK8uNVtA@mail.gmail.com/ [3] https://lore.kernel.org/linux-kselftest/CANpmjNOdSy6DuO6CYZ4UxhGxqhjzx4tn0sJMbRqo2xRFv9kX6Q@mail.gmail.com/ Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-07-08 01:36:32 +00:00
'will get automatically appended. If repeated, the files '
'blindly concatenated, which might not work in all cases.',
action='append', metavar='PATHS')
parser.add_argument('--kconfig_add',
help='Additional Kconfig options to append to the '
'.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.',
action='append', metavar='CONFIG_X=Y')
parser.add_argument('--arch',
help=('Specifies the architecture to run tests under. '
'The architecture specified here must match the '
'string passed to the ARCH make param, '
'e.g. i386, x86_64, arm, um, etc. Non-UML '
'architectures run on QEMU.'),
type=str, default='um', metavar='ARCH')
parser.add_argument('--cross_compile',
help=('Sets make\'s CROSS_COMPILE variable; it should '
'be set to a toolchain path prefix (the prefix '
'of gcc and other tools in your toolchain, for '
'example `sparc64-linux-gnu-` if you have the '
'sparc toolchain installed on your system, or '
'`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` '
'if you have downloaded the microblaze toolchain '
'from the 0-day website to a directory in your '
'home directory called `toolchains`).'),
metavar='PREFIX')
parser.add_argument('--qemu_config',
help=('Takes a path to a path to a file containing '
'a QemuArchParams object.'),
type=str, metavar='FILE')
parser.add_argument('--qemu_args',
help='Additional QEMU arguments, e.g. "-smp 8"',
action='append', metavar='')
def add_build_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
type=int, default=get_default_jobs(), metavar='N')
def add_exec_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--timeout',
help='maximum number of seconds to allow for all tests '
'to run. This does not include time taken to build the '
'tests.',
type=int,
default=300,
metavar='SECONDS')
parser.add_argument('filter_glob',
help='Filter which KUnit test suites/tests run at '
'boot-time, e.g. list* or list*.*del_test',
type=str,
nargs='?',
default='',
metavar='filter_glob')
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
parser.add_argument('--filter',
help='Filter KUnit tests with attributes, '
'e.g. module=example or speed>slow',
type=str,
default='')
parser.add_argument('--filter_action',
help='If set to skip, filtered tests will be skipped, '
'e.g. --filter_action=skip. Otherwise they will not run.',
type=str,
choices=['skip'])
parser.add_argument('--kernel_args',
help='Kernel command-line parameters. Maybe be repeated',
action='append', metavar='')
kunit: tool: support running each suite/test separately The new --run_isolated flag makes the tool boot the kernel once per suite or test, preventing leftover state from one suite to impact the other. This can be useful as a starting point to debugging test hermeticity issues. Note: it takes a lot longer, so people should not use it normally. Consider the following very simplified example: bool disable_something_for_test = false; void function_being_tested() { ... if (disable_something_for_test) return; ... } static void test_before(struct kunit *test) { disable_something_for_test = true; function_being_tested(); /* oops, we forgot to reset it back to false */ } static void test_after(struct kunit *test) { /* oops, now "fixing" test_before can cause test_after to fail! */ function_being_tested(); } Presented like this, the issues are obvious, but it gets a lot more complicated to track down as the amount of test setup and helper functions increases. Another use case is memory corruption. It might not be surfaced as a failure/crash in the test case or suite that caused it. I've noticed in kunit's own unit tests, the 3rd suite after might be the one to finally crash after an out-of-bounds write, for example. Example usage: Per suite: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite ... Starting KUnit Kernel (1/7)... ============================================================ ======== [PASSED] kunit_executor_test ======== .... Testing complete. 5 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/7)... ============================================================ ======== [PASSED] kunit-try-catch-test ======== ... Per test: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=test Starting KUnit Kernel (1/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] parse_filter_test ============================================================ Testing complete. 1 tests run. 0 failed. 0 crashed. 0 skipped. Starting KUnit Kernel (2/23)... ============================================================ ======== [PASSED] kunit_executor_test ======== [PASSED] filter_subsuite_test ... It works with filters as well: $ ./tools/testing/kunit/kunit.py run --kunitconfig=lib/kunit --run_isolated=suite example ... Starting KUnit Kernel (1/1)... ============================================================ ======== [PASSED] example ======== ... It also handles test filters, '*.*skip*' runs these 3 tests: kunit_status.kunit_status_mark_skipped_test example.example_skip_test example.example_mark_skipped_test Fixed up merge conflict between: d8c23ead708b ("kunit: tool: better handling of quasi-bool args (--json, --raw_output)") and 6710951ee039 ("kunit: tool: support running each suite/test separately") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2021-09-30 22:20:48 +00:00
parser.add_argument('--run_isolated', help='If set, boot the kernel for each '
'individual suite/test. This is can be useful for debugging '
'a non-hermetic test, one that might pass/fail based on '
'what ran before it.',
type=str,
choices=['suite', 'test'])
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
parser.add_argument('--list_tests', help='If set, list all tests that will be '
'run.',
action='store_true')
parser.add_argument('--list_tests_attr', help='If set, list all tests and test '
'attributes.',
action='store_true')
def add_parse_opts(parser: argparse.ArgumentParser) -> None:
parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
'By default, filters to just KUnit output. Use '
'--raw_output=all to show everything',
type=str, nargs='?', const='all', default=None, choices=['all', 'kunit'])
parser.add_argument('--json',
nargs='?',
help='Prints parsed test results as JSON to stdout or a file if '
'a filename is specified. Does nothing if --raw_output is set.',
type=str, const='stdout', default=None, metavar='FILE')
def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
"""Returns a LinuxSourceTree based on the user's arguments."""
# Allow users to specify multiple arguments in one string, e.g. '-smp 8'
qemu_args: List[str] = []
if cli_args.qemu_args:
for arg in cli_args.qemu_args:
qemu_args.extend(shlex.split(arg))
kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else []
if cli_args.alltests:
# Prepend so user-specified options take prio if we ever allow
# --kunitconfig options to have differing options.
kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs
return kunit_kernel.LinuxSourceTree(cli_args.build_dir,
kunitconfig_paths=kunitconfigs,
kconfig_add=cli_args.kconfig_add,
arch=cli_args.arch,
cross_compile=cli_args.cross_compile,
qemu_config_path=cli_args.qemu_config,
extra_qemu_args=qemu_args)
def run_handler(cli_args: argparse.Namespace) -> None:
if not os.path.exists(cli_args.build_dir):
os.mkdir(cli_args.build_dir)
linux = tree_from_args(cli_args)
request = KunitRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options,
jobs=cli_args.jobs,
raw_output=cli_args.raw_output,
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
filter=cli_args.filter,
filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
run_isolated=cli_args.run_isolated,
list_tests=cli_args.list_tests,
list_tests_attr=cli_args.list_tests_attr)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
def config_handler(cli_args: argparse.Namespace) -> None:
if cli_args.build_dir and (
not os.path.exists(cli_args.build_dir)):
os.mkdir(cli_args.build_dir)
linux = tree_from_args(cli_args)
request = KunitConfigRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options)
result = config_tests(linux, request)
stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
def build_handler(cli_args: argparse.Namespace) -> None:
linux = tree_from_args(cli_args)
request = KunitBuildRequest(build_dir=cli_args.build_dir,
make_options=cli_args.make_options,
jobs=cli_args.jobs)
result = config_and_build_tests(linux, request)
stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
def exec_handler(cli_args: argparse.Namespace) -> None:
linux = tree_from_args(cli_args)
exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
build_dir=cli_args.build_dir,
json=cli_args.json,
timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
filter=cli_args.filter,
filter_action=cli_args.filter_action,
kernel_args=cli_args.kernel_args,
kunit: tool: Add command line interface to filter and report attributes Add ability to kunit.py to filter attributes and report a list of tests including attributes without running tests. Add flag "--filter" to input filters on test attributes. Tests will be filtered out if they do not match all inputted filters. Example: --filter speed=slow (This filter would run only the tests that are marked as slow) Filters have operations: <, >, <=, >=, !=, and =. But note that the characters < and > are often interpreted by the shell, so they may need to be quoted or escaped. Example: --filter "speed>slow" or --filter speed\>slow (This filter would run only the tests that have the speed faster than slow. Additionally, multiple filters can be used. Example: --filter "speed=slow, module!=example" (This filter would run only the tests that have the speed slow and are not in the "example" module) Note if the user wants to skip filtered tests instead of not running/showing them use the "--filter_action=skip" flag instead. Expose the output of kunit.action=list option with flag "--list_tests" to output a list of tests. Additionally, add flag "--list_tests_attr" to output a list of tests and their attributes. These flags are useful to see tests and test attributes without needing to run tests. Example of the output of "--list_tests_attr": example example.test_1 example.test_2 # example.test_2.speed: slow This output includes a suite, example, with two test cases, test_1 and test_2. And in this instance test_2 has been marked as slow. Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Rae Moar <rmoar@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-25 21:25:16 +00:00
run_isolated=cli_args.run_isolated,
list_tests=cli_args.list_tests,
list_tests_attr=cli_args.list_tests_attr)
result = exec_tests(linux, exec_request)
stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
def parse_handler(cli_args: argparse.Namespace) -> None:
if cli_args.file is None:
sys.stdin.reconfigure(errors='backslashreplace') # type: ignore
kunit_output = sys.stdin # type: Iterable[str]
else:
with open(cli_args.file, 'r', errors='backslashreplace') as f:
kunit_output = f.read().splitlines()
# We know nothing about how the result was created!
metadata = kunit_json.Metadata()
request = KunitParseRequest(raw_output=cli_args.raw_output,
json=cli_args.json)
result, _ = parse_tests(request, metadata, kunit_output)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
subcommand_handlers_map = {
'run': run_handler,
'config': config_handler,
'build': build_handler,
'exec': exec_handler,
'parse': parse_handler
}
def main(argv: Sequence[str]) -> None:
parser = argparse.ArgumentParser(
description='Helps writing and running KUnit tests.')
subparser = parser.add_subparsers(dest='subcommand')
# The 'run' command will config, build, exec, and parse in one go.
run_parser = subparser.add_parser('run', help='Runs KUnit tests.')
add_common_opts(run_parser)
add_build_opts(run_parser)
add_exec_opts(run_parser)
add_parse_opts(run_parser)
config_parser = subparser.add_parser('config',
help='Ensures that .config contains all of '
'the options in .kunitconfig')
add_common_opts(config_parser)
build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests')
add_common_opts(build_parser)
add_build_opts(build_parser)
exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests')
add_common_opts(exec_parser)
add_exec_opts(exec_parser)
add_parse_opts(exec_parser)
# The 'parse' option is special, as it doesn't need the kernel source
# (therefore there is no need for a build_dir, hence no add_common_opts)
# and the '--file' argument is not relevant to 'run', so isn't in
# add_parse_opts()
parse_parser = subparser.add_parser('parse',
help='Parses KUnit results from a file, '
'and parses formatted results.')
add_parse_opts(parse_parser)
parse_parser.add_argument('file',
help='Specifies the file to read results from.',
type=str, nargs='?', metavar='input_file')
cli_args = parser.parse_args(massage_argv(argv))
if get_kernel_root_path():
os.chdir(get_kernel_root_path())
subcomand_handler = subcommand_handlers_map.get(cli_args.subcommand, None)
if subcomand_handler is None:
parser.print_help()
return
subcomand_handler(cli_args)
if __name__ == '__main__':
main(sys.argv[1:])