This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit dc0f8a1eee19f0bf9a0bbfdadad1d4231756a238
Author: Bikramjeet Vig <[email protected]>
AuthorDate: Wed May 22 16:24:45 2019 -0700

    IMPALA-8570: Fix flakiness in test_restart_statestore_query_resilience
    
    The test relies on scheduling decisions made on a 3 node minicluster
    without erasure coding. This patch ensures that this test is skipped
    if those conditions are not met by adding a new
    SkipIfNotHdfsMinicluster.scheduling marker for the same. Existing
    tests that rely on the same conditions were also updated to use the
    marker.
    
    Change-Id: I0a54b6e149c42b696c954b5240d6de61453bf7f9
    Reviewed-on: http://gerrit.cloudera.org:8080/13406
    Reviewed-by: Bikramjeet Vig <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 tests/common/skip.py                          |  5 ++++-
 tests/custom_cluster/test_data_cache.py       |  4 ++--
 tests/custom_cluster/test_restart_services.py | 11 ++++++++---
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/tests/common/skip.py b/tests/common/skip.py
index dba8c2d..31d4cd6 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -169,6 +169,10 @@ class SkipIfNotHdfsMinicluster:
   tuned_for_minicluster = pytest.mark.skipif(
       not IS_HDFS or IS_EC or pytest.config.option.testing_remote_cluster,
       reason="Test is tuned for 3-node HDFS minicluster with no EC")
+  scheduling = pytest.mark.skipif(
+      not IS_HDFS or IS_EC or pytest.config.option.testing_remote_cluster,
+      reason="Test is tuned for scheduling decisions made on a 3-node HDFS 
minicluster "
+             "with no EC")
 
 class SkipIfBuildType:
   not_dev_build = pytest.mark.skipif(not 
IMPALA_TEST_CLUSTER_PROPERTIES.is_dev(),
@@ -181,7 +185,6 @@ class SkipIfEC:
       "features relying on local read do not work.")
   oom = pytest.mark.skipif(IS_EC, reason="Probably broken by HDFS-13540.")
   fix_later = pytest.mark.skipif(IS_EC, reason="It should work but doesn't.")
-  scheduling = pytest.mark.skipif(IS_EC, reason="Scheduling is different on 
EC")
 
 
 class SkipIfDockerizedCluster:
diff --git a/tests/custom_cluster/test_data_cache.py 
b/tests/custom_cluster/test_data_cache.py
index c774189..f9056c5 100644
--- a/tests/custom_cluster/test_data_cache.py
+++ b/tests/custom_cluster/test_data_cache.py
@@ -18,12 +18,12 @@
 import pytest
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIf, SkipIfEC
+from tests.common.skip import SkipIf, SkipIfNotHdfsMinicluster
 
 
 @SkipIf.not_hdfs
 @SkipIf.is_buggy_el6_kernel
[email protected]
[email protected]
 class TestDataCache(CustomClusterTestSuite):
   """ This test enables the data cache and verfies that cache hit and miss 
counts
   in the runtime profile and metrics are as expected. Run on non-EC HDFS only 
as
diff --git a/tests/custom_cluster/test_restart_services.py 
b/tests/custom_cluster/test_restart_services.py
index 0591ef2..ad85671 100644
--- a/tests/custom_cluster/test_restart_services.py
+++ b/tests/custom_cluster/test_restart_services.py
@@ -32,7 +32,7 @@ from TCLIService import TCLIService
 from beeswaxd.BeeswaxService import QueryState
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
-from tests.common.skip import SkipIfEC
+from tests.common.skip import SkipIfNotHdfsMinicluster
 from tests.hs2.hs2_test_suite import HS2TestSuite, needs_session
 
 LOG = logging.getLogger(__name__)
@@ -93,6 +93,7 @@ class TestRestart(CustomClusterTestSuite):
   CANCELLATION_GRACE_PERIOD_S = 5
 
   @pytest.mark.execute_serially
+  @SkipIfNotHdfsMinicluster.scheduling
   @CustomClusterTestSuite.with_args(
     impalad_args="--statestore_subscriber_timeout_seconds={timeout_s} "
                  
"--failed_backends_query_cancellation_grace_period_ms={grace_period_ms}"
@@ -113,6 +114,8 @@ class TestRestart(CustomClusterTestSuite):
       handle = client.execute_async(slow_query)
       # Make sure query starts running.
       self.wait_for_state(handle, QueryState.RUNNING, 1000)
+      profile = client.get_runtime_profile(handle)
+      assert "NumBackends: 3" in profile, profile
       # Restart Statestore and wait till the grace period ends + some buffer.
       self.cluster.statestored.restart()
       self.cluster.statestored.service.wait_for_live_subscribers(4)
@@ -140,6 +143,8 @@ class TestRestart(CustomClusterTestSuite):
       impalad.service.wait_for_metric_value("catalog.curr-version", 
catalogd_version)
       handle = client.execute_async(slow_query)
       self.wait_for_state(handle, QueryState.RUNNING, 1000)
+      profile = client.get_runtime_profile(handle)
+      assert "NumBackends: 2" in profile, profile
       start_time = time.time()
       self.cluster.statestored.restart()
       # Make sure it has connected to the impalads before killing one.
@@ -245,7 +250,7 @@ class TestGracefulShutdown(CustomClusterTestSuite, 
HS2TestSuite):
   EXEC_SHUTDOWN_DEADLINE_S = 10
 
   @pytest.mark.execute_serially
-  @SkipIfEC.scheduling
+  @SkipIfNotHdfsMinicluster.scheduling
   @CustomClusterTestSuite.with_args(
       impalad_args="--shutdown_grace_period_s={grace_period} \
           --shutdown_deadline_s={deadline} \
@@ -255,7 +260,7 @@ class TestGracefulShutdown(CustomClusterTestSuite, 
HS2TestSuite):
     self.do_test_shutdown_executor(fetch_delay_s=0)
 
   @pytest.mark.execute_serially
-  @SkipIfEC.scheduling
+  @SkipIfNotHdfsMinicluster.scheduling
   @CustomClusterTestSuite.with_args(
       impalad_args="--shutdown_grace_period_s={grace_period} \
           --shutdown_deadline_s={deadline} \

Reply via email to