This is an automated email from the ASF dual-hosted git repository.

dbecker pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 04f31aea712d605893066f657ec1eb1bb746aeb1
Author: Riza Suminto <[email protected]>
AuthorDate: Thu Nov 16 15:45:53 2023 -0800

    IMPALA-12567: Deflake test_75_percent_availability
    
    TestExecutorGroups.test_75_percent_availability can fail in certains
    build/test setup for not starting the last impalad within 5s delay
    injection. This patch simplifies the test by launching fewer impalad in
    totals (reduced from 8 to 5, excluding coordinator) and increases the
    delay injection to ensure test query run at all five executors. The test
    renamed to test_partial_availability accordingly.
    
    Testing:
    - Run and pass the test against HDFS and S3.
    
    Change-Id: I2e70f1dde10045c32c2bb4f6f78e8a707c9cd97d
    Reviewed-on: http://gerrit.cloudera.org:8080/20712
    Reviewed-by: Impala Public Jenkins <[email protected]>
    Tested-by: Impala Public Jenkins <[email protected]>
---
 tests/custom_cluster/test_executor_groups.py | 32 +++++++++++++++-------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/tests/custom_cluster/test_executor_groups.py 
b/tests/custom_cluster/test_executor_groups.py
index 0ba856597..6718e4572 100644
--- a/tests/custom_cluster/test_executor_groups.py
+++ b/tests/custom_cluster/test_executor_groups.py
@@ -1403,10 +1403,10 @@ class TestExecutorGroups(CustomClusterTestSuite):
     second_coord_client.close()
 
   @pytest.mark.execute_serially
-  def test_75_percent_availability(self):
-    """Test query planning and execution when only 75% of executor is up.
-    This test will run query over 8 node executor group at its healthy 
threshold (6) and
-    start the other 2 executor after query is planned.
+  def test_partial_availability(self):
+    """Test query planning and execution when only 80% of executor is up.
+    This test will run a query over 5 node executor group at its healthy 
threshold (4)
+    and start the last executor after query is planned.
     """
     coordinator_test_args = ''
     # The path to resources directory which contains the admission control 
config files.
@@ -1421,7 +1421,7 @@ class TestExecutorGroups(CustomClusterTestSuite):
     # extra args template to start coordinator
     extra_args_template = ("-vmodule admission-controller=3 "
         "-admission_control_slots=8 "
-        "-expected_executor_group_sets=root.large:8 "
+        "-expected_executor_group_sets=root.large:5 "
         "-fair_scheduler_allocation_path %s "
         "-llama_site_path %s "
         "%s ")
@@ -1433,27 +1433,29 @@ class TestExecutorGroups(CustomClusterTestSuite):
 
     # Create fresh client
     self.create_impala_clients()
-    # Start root.large exec group with 8 admission slots and 6 executors.
-    self._add_executor_group("group", 6, num_executors=6, 
admission_control_slots=8,
-                             resource_pool="root.large", 
extra_args="-mem_limit=2g")
+    # Start root.large exec group with 8 admission slots and 4 executors.
+    healthy_threshold = 4
+    self._add_executor_group("group", healthy_threshold, 
num_executors=healthy_threshold,
+                             admission_control_slots=8, 
resource_pool="root.large",
+                             extra_args="-mem_limit=2g")
     assert self._get_num_executor_groups(only_healthy=False) == 1
     assert self._get_num_executor_groups(only_healthy=False,
                                          exec_group_set_prefix="root.large") 
== 1
 
-    # Run query and let it compile, but delay admission for 5s
+    # Run query and let it compile, but delay admission for 10s
     handle = self.execute_query_async(CPU_TEST_QUERY, {
       "COMPUTE_PROCESSING_COST": "true",
-      "DEBUG_ACTION": "AC_BEFORE_ADMISSION:SLEEP@5000"})
+      "DEBUG_ACTION": "AC_BEFORE_ADMISSION:SLEEP@10000"})
 
-    # Start the next 2 executors.
-    self._add_executors("group", 6, num_executors=2, 
resource_pool="root.large",
-        extra_args="-mem_limit=2g", expected_num_impalads=9)
+    # Start the 5th executor.
+    self._add_executors("group", healthy_threshold, num_executors=1,
+        resource_pool="root.large", extra_args="-mem_limit=2g", 
expected_num_impalads=6)
 
     self.wait_for_state(handle, self.client.QUERY_STATES['FINISHED'], 60)
     profile = self.client.get_runtime_profile(handle)
-    assert "F00:PLAN FRAGMENT [RANDOM] hosts=6 instances=12" in profile, 
profile
+    assert "F00:PLAN FRAGMENT [RANDOM] hosts=4 instances=12" in profile, 
profile
     assert ("Scheduler Warning: Cluster membership might changed between 
planning and "
-        "scheduling, F00 scheduled instance count (16) is higher than its 
effective "
+        "scheduling, F00 scheduled instance count (15) is higher than its 
effective "
         "count (12)") in profile, profile
     self.client.close_query(handle)
 

Reply via email to