This is an automated email from the ASF dual-hosted git repository.

laszlog pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 26625e46b027041ad3d407536677039d1db9b16d
Author: Riza Suminto <riza.sumi...@cloudera.com>
AuthorDate: Tue Jan 21 16:57:01 2025 -0800

    IMPALA-13683: Add env var to skip infra tests and verifiers.
    
    bin/run-all-tests.sh provides a convenient way to repeat running the
    same test multiple times by setting NUM_TEST_ITERATIONS env var. This is
    especially useful to prove that a test is not flaky. However, it will
    still redundantly repeat run-workload.py and verifiers without any way
    to skip them.
    
    This patch adds env var SKIP_VERIFIERS to allow skipping verifiers. "Run
    test run-workload" is rewritten into its own test_run_workload.py.
    
    Testing:
    - Run and pass test_run_workload.py.
    - Manually run the script with SKIP_VERIFIERS set to true and confirm
      that verifiers are skipped.
    
    Change-Id: Ib483dcd48980655e4aa0c77f1cdc1f2a3c40a1de
    Reviewed-on: http://gerrit.cloudera.org:8080/22365
    Reviewed-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
    Tested-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
---
 bin/run-all-tests.sh             | 16 +++++------
 tests/infra/test_run_workload.py | 58 ++++++++++++++++++++++++++++++++++++++++
 tests/run-tests.py               | 12 +++++----
 3 files changed, 73 insertions(+), 13 deletions(-)

diff --git a/bin/run-all-tests.sh b/bin/run-all-tests.sh
index 9e53aef2c..62fbdff56 100755
--- a/bin/run-all-tests.sh
+++ b/bin/run-all-tests.sh
@@ -63,7 +63,8 @@ fi
 : ${CLUSTER_TEST_FILES:=}
 # Run JS tests
 : ${JS_TEST:=false}
-# Verifiers to run after all tests. Skipped if empty.
+# Verifiers to run after all tests. Skipped if true.
+: ${SKIP_VERIFIERS:=false}
 : ${TEST_SUITE_VERIFIERS:=verifiers/test_banned_log_messages.py}
 : ${TEST_SUITE_VERIFIERS_LOG_DIR:=${IMPALA_LOGS_DIR}/verifiers}
 # Extra arguments passed to start-impala-cluster for tests. These do not apply 
to custom
@@ -201,6 +202,12 @@ if [[ "${EXPLORATION_STRATEGY}" == "core" ]]; then
   RUN_TESTS_ARGS+=" --skip-stress"
 fi
 
+if [[ "$SKIP_VERIFIERS" == true ]]; then
+  # Skip verifiers.
+  TEST_SUITE_VERIFIERS=""
+  RUN_TESTS_ARGS+=" --skip-verifiers"
+fi
+
 if [[ "${TARGET_FILESYSTEM}" == "local" ]]; then
   # Only one impalad is supported when running against local filesystem.
   COMMON_PYTEST_ARGS+=" --impalad=localhost:21000"
@@ -270,13 +277,6 @@ do
     fi
   fi
 
-  # Run some queries using run-workload to verify run-workload has not been 
broken.
-  if ! run-step "Run test run-workload" test-run-workload.log \
-      "${IMPALA_HOME}/bin/run-workload.py" -w tpch --num_clients=2 
--query_names=TPCH-Q1 \
-      --table_format=text/none --exec_options="disable_codegen:False" 
${KERB_ARGS}; then
-    TEST_RET_CODE=1
-  fi
-
   if [[ "$FE_TEST" == true ]]; then
     # Run JUnit frontend tests
     # Requires a running impalad cluster because some tests (such as 
DataErrorTest and
diff --git a/tests/infra/test_run_workload.py b/tests/infra/test_run_workload.py
new file mode 100644
index 000000000..2b9dc979f
--- /dev/null
+++ b/tests/infra/test_run_workload.py
@@ -0,0 +1,58 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from __future__ import absolute_import
+import os
+from subprocess import check_output
+
+from tests.common.base_test_suite import BaseTestSuite
+
+
+class TestRunWorkload(BaseTestSuite):
+
+  def test_run_workload(self):
+    """Test that bin/run-workload.py still works."""
+    impala_home = os.getenv('IMPALA_HOME')
+    cmd = [
+      os.path.join(impala_home, 'bin/run-workload.py'), '-w', 'tpch', 
'--num_clients=2',
+      '--query_names=TPCH-Q1', '--table_format=text/none',
+      '--exec_options=disable_codegen:False']
+    kerberos_arg = os.getenv('KERB_ARGS')
+    if kerberos_arg is not None:
+      cmd.append(kerberos_arg)
+    output = check_output(cmd, universal_newlines=True)
+
+    """
+    Full stdout is like this:
+
+    Workload: TPCH, Scale Factor:
+
+    Table Format: text/none/none
+    +---------+---------------------+----------------+-----------+
+    | Query   | Start Time          | Time Taken (s) | Client ID |
+    +---------+---------------------+----------------+-----------+
+    | TPCH-Q1 | 2025-01-27 15:40:28 | 5.59           | 1         |
+    | TPCH-Q1 | 2025-01-27 15:40:28 | 5.65           | 2         |
+    +---------+---------------------+----------------+-----------+
+    """
+    assert "Workload: TPCH, Scale Factor:" in output
+    assert "Table Format: text/none/none" in output
+    assert "Query" in output
+    assert "Start Time" in output
+    assert "Time Taken (s)" in output
+    assert "Client ID" in output
+    assert "TPCH-Q1" in output
diff --git a/tests/run-tests.py b/tests/run-tests.py
index e0a1113d0..df85e4511 100755
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -257,6 +257,7 @@ if __name__ == "__main__":
   skip_serial = detect_and_remove_flag('--skip-serial')
   skip_stress = detect_and_remove_flag('--skip-stress')
   skip_parallel = detect_and_remove_flag('--skip-parallel')
+  skip_verifiers = detect_and_remove_flag('--skip-verifiers')
   test_executor = TestExecutor(exit_on_error=exit_on_error)
 
   # If the user is just asking for --help, just print the help test and then 
exit.
@@ -328,11 +329,12 @@ if __name__ == "__main__":
     if test_executor.total_executed == 0:
       sys.exit(1)
 
-    # Finally, validate impalad/statestored metrics.
-    args = 
build_test_args(base_name="verify-metrics{0}".format(shard_identifier),
-                           valid_dirs=['verifiers'])
-    args.append('verifiers/test_verify_metrics.py')
-    run(args)
+    if not skip_verifiers:
+      # Finally, validate impalad/statestored metrics.
+      args = 
build_test_args(base_name="verify-metrics{0}".format(shard_identifier),
+                             valid_dirs=['verifiers'])
+      args.append('verifiers/test_verify_metrics.py')
+      run(args)
 
   if test_executor.tests_failed:
     sys.exit(1)

Reply via email to