IMPALA-5886 & IMPALA-4812 Update run-tests.py to handle exit_code 5 exit_code for EE tests when no tests are collected.After this change return_code will be either 0 if no tests are expected to be collected (dry-run) and 1 if tests are expected to be collected but are not collected due to some error
Testing: - Ran end-to-end shell, hs2 tests for IMPALA-5886 with debug statements to verify the exit_codes - Ran end-to-end shell tests with collect-only for IMPALA-4812 Change-Id: If82f974cc2d1e917464d4053563eaf4afc559150 Reviewed-on: http://gerrit.cloudera.org:8080/9494 Reviewed-by: David Knupp <[email protected]> Tested-by: Impala Public Jenkins Project: http://git-wip-us.apache.org/repos/asf/impala/repo Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/4b331b5b Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/4b331b5b Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/4b331b5b Branch: refs/heads/2.x Commit: 4b331b5bbdbd455534d5b4440d7e6b24ceb41931 Parents: 527396d Author: njanarthanan <[email protected]> Authored: Mon Mar 5 10:32:28 2018 -0800 Committer: Impala Public Jenkins <[email protected]> Committed: Wed Mar 28 00:10:20 2018 +0000 ---------------------------------------------------------------------- tests/run-tests.py | 91 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 69 insertions(+), 22 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/impala/blob/4b331b5b/tests/run-tests.py ---------------------------------------------------------------------- diff --git a/tests/run-tests.py b/tests/run-tests.py index 3a8bd2e..6967b17 100755 --- a/tests/run-tests.py +++ b/tests/run-tests.py @@ -28,7 +28,7 @@ import multiprocessing import os import pytest import sys - +from _pytest.main import EXIT_NOTESTSCOLLECTED from _pytest.config import FILE_OR_DIR # We whitelist valid test directories. If a new test directory is added, update this. @@ -60,22 +60,58 @@ NUM_STRESS_CLIENTS = min(multiprocessing.cpu_count() * 4, 64) if 'NUM_STRESS_CLIENTS' in os.environ: NUM_STRESS_CLIENTS = int(os.environ['NUM_STRESS_CLIENTS']) +class TestCounterPlugin(object): + """ Custom pytest plugin to count the number of tests + collected and executed over multiple pytest runs -class TestExecutor: + tests_collected is set of nodeids for collected tests + tests_executed is set of nodeids for executed tests + """ + def __init__(self): + self.tests_collected = set() + self.tests_executed = set() + + # pytest hook to handle test collection when xdist is used (parallel tests) + # https://github.com/pytest-dev/pytest-xdist/pull/35/commits (No official documentation available) + def pytest_xdist_node_collection_finished(self, node, ids): + self.tests_collected.update(set(ids)) + + # link to pytest_collection_modifyitems + # https://docs.pytest.org/en/2.9.2/writing_plugins.html#_pytest.hookspec.pytest_collection_modifyitems + def pytest_collection_modifyitems(self, items): + for item in items: + self.tests_collected.update({item.nodeid}) + + # link to pytest_runtest_logreport + # https://docs.pytest.org/en/2.9.2/_modules/_pytest/hookspec.html#pytest_runtest_logreport + def pytest_runtest_logreport(self, report): + if report.passed: + self.tests_executed.add(report.nodeid) + +class TestExecutor(object): def __init__(self, exit_on_error=True): self._exit_on_error = exit_on_error self.tests_failed = False + self.total_executed = 0 def run_tests(self, args): + testcounterplugin = TestCounterPlugin() + try: - exit_code = pytest.main(args) + pytest_exit_code = pytest.main(args, plugins=[testcounterplugin]) except: sys.stderr.write("Unexpected exception with pytest {0}".format(args)) raise - if exit_code != 0 and self._exit_on_error: - sys.exit(exit_code) - self.tests_failed = exit_code != 0 or self.tests_failed + if '--collect-only' in args: + for test in testcounterplugin.tests_collected: + print(test) + + self.total_executed += len(testcounterplugin.tests_executed) + + if 0 < pytest_exit_code < EXIT_NOTESTSCOLLECTED and self._exit_on_error: + sys.exit(pytest_exit_code) + self.tests_failed = 0 < pytest_exit_code < EXIT_NOTESTSCOLLECTED or self.tests_failed def build_test_args(base_name, valid_dirs=VALID_TEST_DIRS): """ @@ -115,7 +151,6 @@ def build_test_args(base_name, valid_dirs=VALID_TEST_DIRS): commandline_args = itertools.chain(*[arg.split('=') for arg in sys.argv[1:]]) ignored_dirs = build_ignore_dir_arg_list(valid_dirs=valid_dirs) - logging_args = [] for arg, log in LOGGING_ARGS.iteritems(): logging_args.extend([arg, os.path.join(RESULT_DIR, log.format(base_name))]) @@ -197,26 +232,38 @@ if __name__ == "__main__": if not os.path.exists(RESULT_DIR): os.makedirs(RESULT_DIR) - # First run query tests that need to be executed serially - base_args = ['-m', 'execute_serially'] - test_executor.run_tests(base_args + build_test_args('serial')) + # If you like to avoid verbose output the following + # adding -p no:terminal to --collect-only will suppress + # pytest warnings/messages and displays collected tests - # Run the stress tests tests - if '--collect-only' not in sys.argv: + if '--collect-only' in sys.argv: + test_executor.run_tests(sys.argv[1:]) + else: print_metrics('connections') - base_args = ['-m', 'stress', '-n', NUM_STRESS_CLIENTS] - test_executor.run_tests(base_args + build_test_args('stress')) - if '--collect-only' not in sys.argv: + # First run query tests that need to be executed serially + base_args = ['-m', 'execute_serially'] + test_executor.run_tests(base_args + build_test_args('serial')) print_metrics('connections') - # Run the remaining query tests in parallel - base_args = ['-m', 'not execute_serially and not stress', '-n', NUM_CONCURRENT_TESTS] - test_executor.run_tests(base_args + build_test_args('parallel')) + # Run the stress tests tests + base_args = ['-m', 'stress', '-n', NUM_STRESS_CLIENTS] + test_executor.run_tests(base_args + build_test_args('stress')) + print_metrics('connections') + + # Run the remaining query tests in parallel + base_args = ['-m', 'not execute_serially and not stress', '-n', NUM_CONCURRENT_TESTS] + test_executor.run_tests(base_args + build_test_args('parallel')) + + # The total number of tests executed at this point is expected to be >0 + # If it is < 0 then the script needs to exit with a non-zero + # error code indicating an error in test execution + if test_executor.total_executed == 0: + sys.exit(1) - # Finally, validate impalad/statestored metrics. - args = build_test_args(base_name='verify-metrics', valid_dirs=['verifiers']) - args.append('verifiers/test_verify_metrics.py') - test_executor.run_tests(args) + # Finally, validate impalad/statestored metrics. + args = build_test_args(base_name='verify-metrics', valid_dirs=['verifiers']) + args.append('verifiers/test_verify_metrics.py') + test_executor.run_tests(args) if test_executor.tests_failed: sys.exit(1)
