This is an automated email from the ASF dual-hosted git repository.

prozsa pushed a commit to branch branch-4.5.0
in repository https://gitbox.apache.org/repos/asf/impala.git

commit e753e94a798978f00f754718957e7f9a0f74b3e6
Author: Riza Suminto <riza.sumi...@cloudera.com>
AuthorDate: Wed Jan 22 20:43:11 2025 -0800

    IMPALA-13694: Add ImpalaTestSuite.__reset_impala_clients method
    
    This patch adds __reset_impala_clients() method in ImpalaConnection.
    __reset_impala_clients() then simply clear configuration. It is called
    on each setup_method() to ensure that each EE test uses clean test
    client. All subclasses of ImpalaTestSuite that declare setup() method
    are refactored to declare setup_method() instead, to match newer py.test
    convention. Also implement teardown_method() to complement
    setup_method(). See "Method and function level setup/teardown" at
    https://docs.pytest.org/en/stable/how-to/xunit_setup.html.
    
    CustomClusterTestSuite fully overrides setup_method() and
    teardown_method() because it subclasses can be destructive. The custom
    cluster test method often restart the whole Impala cluster, rendering
    default impala clients initialized at setup_class() unusable. Each
    subclass of CustomClusterTestSuite is responsible to ensure that impala
    client they are using is in a good state.
    
    This patch improve BeeswaxConnection and ImpylaHS2Connection to only
    consider non-REMOVED options as its default options. They lookup for
    valid (not REMOVED) query options with their own appropriate way,
    memorized the option names as lowercase string and the values as string.
    List values are wrapped with double quote. Log in
    ImpalaConnection.set_configuration_option() is differentiated from how
    SET query looks.
    
    Note that ImpalaTestSuite.run_test_case() modify and restore query
    option written at .test file by issuing SET query, not by calling
    ImpalaConnection.set_configuration_option(). It is remain unchanged.
    
    Consistently lower case query option everywhere in Impala test code
    infrastructure. Fixed several tests that has been unknowingly override
    'exec_option' vector dimension due to case sensitive mismatch. Also
    fixed some flake8 issues.
    
    Added convenience method execute_query_using_vector() and
    create_impala_client_from_vector() in ImpalaTestSuite.
    
    Testing:
    - Pass core tests.
    
    Change-Id: Ieb47fec9f384cb58b19fdbd10ff7aa0850ad6277
    Reviewed-on: http://gerrit.cloudera.org:8080/22404
    Reviewed-by: Csaba Ringhofer <csringho...@cloudera.com>
    Reviewed-by: Jason Fehr <jf...@cloudera.com>
    Tested-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
---
 tests/authorization/test_authorization.py          |   7 +-
 tests/beeswax/impala_beeswax.py                    |   4 +-
 tests/common/impala_connection.py                  |  46 ++++++++--
 tests/common/impala_test_suite.py                  |  58 +++++++++++-
 tests/common/test_vector.py                        |   8 +-
 tests/custom_cluster/test_admission_controller.py  |   6 +-
 tests/custom_cluster/test_hs2_fault_injection.py   |   6 +-
 tests/custom_cluster/test_kudu.py                  |   3 +
 tests/custom_cluster/test_mt_dop.py                |   7 +-
 .../test_runtime_filter_aggregation.py             |   3 +
 tests/hs2/hs2_test_suite.py                        |   6 ++
 tests/query_test/test_exprs.py                     | 102 +++++++++++----------
 tests/query_test/test_hdfs_caching.py              |  88 ++++++++++--------
 tests/query_test/test_iceberg.py                   |   6 ++
 tests/query_test/test_insert_behaviour.py          |  10 +-
 tests/query_test/test_kudu.py                      |   3 +
 tests/query_test/test_runtime_filters.py           |   3 +
 17 files changed, 253 insertions(+), 113 deletions(-)

diff --git a/tests/authorization/test_authorization.py 
b/tests/authorization/test_authorization.py
index 7dbb86b97..07ce8ad18 100644
--- a/tests/authorization/test_authorization.py
+++ b/tests/authorization/test_authorization.py
@@ -38,7 +38,9 @@ ADMIN = "admin"
 
 
 class TestAuthorization(CustomClusterTestSuite):
-  def setup(self):
+
+  def setup_method(self, method):
+    super(TestAuthorization, self).setup_method(method)
     host, port = (self.cluster.impalads[0].service.hostname,
                   self.cluster.impalads[0].service.hs2_port)
     self.socket = TSocket(host, port)
@@ -47,9 +49,10 @@ class TestAuthorization(CustomClusterTestSuite):
     self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
     self.hs2_client = ImpalaHiveServer2Service.Client(self.protocol)
 
-  def teardown(self):
+  def teardown_method(self, method):
     if self.socket:
       self.socket.close()
+    super(TestAuthorization, self).teardown_method(method)
 
   def __execute_hs2_stmt(self, statement, verify=True):
     """
diff --git a/tests/beeswax/impala_beeswax.py b/tests/beeswax/impala_beeswax.py
index c33dcb2b6..5823c00ec 100644
--- a/tests/beeswax/impala_beeswax.py
+++ b/tests/beeswax/impala_beeswax.py
@@ -136,7 +136,7 @@ class ImpalaBeeswaxClient(object):
     return self.__query_options
 
   def set_query_option(self, name, value):
-    self.__query_options[name.upper()] = value
+    self.__query_options[name.lower()] = value
 
   def set_query_options(self, query_option_dict):
     if query_option_dict is None:
@@ -146,7 +146,7 @@ class ImpalaBeeswaxClient(object):
       self.set_query_option(name, value)
 
   def get_query_option(self, name):
-    return self.__query_options.get(name.upper())
+    return self.__query_options.get(name.lower())
 
   def clear_query_options(self):
     self.__query_options.clear()
diff --git a/tests/common/impala_connection.py 
b/tests/common/impala_connection.py
index 262eaaa35..2049e3d83 100644
--- a/tests/common/impala_connection.py
+++ b/tests/common/impala_connection.py
@@ -67,6 +67,17 @@ def log_sql_stmt(sql_stmt):
     LOG.info("-- [...]")
 
 
+def collect_default_query_options(options, name, val, kind):
+  if kind == 'REMOVED':
+    return
+  name = name.lower()
+  val = str(val).strip('"')
+  if ',' in val:
+    # Value is a list. Wrap it with double quote.
+    val = '"{}"'.format(val)
+  options[name] = val
+
+
 # Common wrapper around the internal types of HS2/Beeswax operation/query 
handles.
 class OperationHandle(object):
   def __init__(self, handle, sql_stmt):
@@ -197,6 +208,9 @@ class BeeswaxConnection(ImpalaConnection):
     self.__beeswax_client = ImpalaBeeswaxClient(host_port, use_kerberos, 
user=user,
                                                 password=password, 
use_ssl=use_ssl)
     self.__host_port = host_port
+    # Default query option, obtained at first call to 
get_default_configuration().
+    # Query option names are in lower case for consistency.
+    self.__default_query_options = None
     self.QUERY_STATES = self.__beeswax_client.query_states
 
   def get_test_protocol(self):
@@ -207,15 +221,24 @@ class BeeswaxConnection(ImpalaConnection):
 
   def set_configuration_option(self, name, value):
     # Only set the option if it's not already set to the same value.
+    name = name.lower()
+    value = str(value)
     if self.__beeswax_client.get_query_option(name) != value:
-      LOG.info('SET %s=%s;' % (name, value))
+      LOG.info("set_option('{}', '{}')".format(name, value))
       self.__beeswax_client.set_query_option(name, value)
 
-  def get_default_configuration(self):
-    result = {}
+  def __collect_default_options(self):
+    options = {}
     for item in self.__beeswax_client.get_default_configuration():
-      result[item.key] = item.value
-    return result
+      collect_default_query_options(
+        options, item.key, item.value, str(item.level))
+    LOG.debug("Default query options: {0}".format(options))
+    return options
+
+  def get_default_configuration(self):
+    if self.__default_query_options is None:
+      self.__default_query_options = self.__collect_default_options()
+    return self.__default_query_options.copy()
 
   def clear_configuration(self):
     self.__beeswax_client.clear_query_options()
@@ -329,6 +352,9 @@ class ImpylaHS2Connection(ImpalaConnection):
     # at a time per connection, which is a limitation also imposed by the 
Beeswax API.
     self.__impyla_conn = None
     self.__cursor = None
+    # Default query option obtained from initial connect.
+    # Query option names are in lower case for consistency.
+    self.__default_query_options = {}
     # Query options to send along with each query.
     self.__query_options = {}
     self._is_hive = is_hive
@@ -346,7 +372,11 @@ class ImpylaHS2Connection(ImpalaConnection):
     return self.__host_port
 
   def set_configuration_option(self, name, value):
-    self.__query_options[name] = str(value)
+    name = name.lower()
+    value = str(value)
+    if self.__query_options.get(name) != value:
+      LOG.info("set_option('{}', '{}')".format(name, value))
+      self.__query_options[name] = value
 
   def get_default_configuration(self):
     return self.__default_query_options.copy()
@@ -371,8 +401,8 @@ class ImpylaHS2Connection(ImpalaConnection):
     self.__default_query_options = {}
     if not self._is_hive:
       self.__cursor.execute("set all")
-      for name, val, _ in self.__cursor:
-        self.__default_query_options[name] = val
+      for name, val, kind in self.__cursor:
+        collect_default_query_options(self.__default_query_options, name, val, 
kind)
       self.__cursor.close_operation()
       LOG.debug("Default query options: 
{0}".format(self.__default_query_options))
 
diff --git a/tests/common/impala_test_suite.py 
b/tests/common/impala_test_suite.py
index 46c2a550c..61eff6dc6 100644
--- a/tests/common/impala_test_suite.py
+++ b/tests/common/impala_test_suite.py
@@ -319,10 +319,26 @@ class ImpalaTestSuite(BaseTestSuite):
       cls.hive_transport.close()
     cls.close_impala_clients()
 
+  @classmethod
+  def setup_method(cls, test_method):
+    """Setup for all test method."""
+    cls.__reset_impala_clients()
+
+  @classmethod
+  def teardown_method(cls, test_method):
+    """Teardown for all test method.
+    Currently, it is only here as a placeholder for future use and complement
+    setup_method() declaration."""
+    pass
+
   @classmethod
   def create_impala_client(cls, host_port=None,
                            protocol=pytest.config.option.default_test_protocol,
                            is_hive=False):
+    """
+    Create a new ImpalaConnection client.
+    Make sure to always call this method using a with-as statement or manually 
close
+    the returned connection before discarding it."""
     if host_port is None:
       host_port = cls.__get_default_host_port(protocol)
     client = create_connection(host_port=host_port,
@@ -331,6 +347,18 @@ class ImpalaTestSuite(BaseTestSuite):
     client.connect()
     return client
 
+  @classmethod
+  def create_impala_client_from_vector(cls, vector):
+    """A shorthand for create_impala_client with test vector as input.
+    Vector must have 'protocol' and 'exec_option' dimension.
+    Return a client of specified 'protocol' and with cofiguration 
'exec_option' set.
+    Make sure to always call this method using a with-as statement or manually 
close
+    the returned connection before discarding it."""
+    client = cls.create_impala_client(
+      protocol=vector.get_value('protocol'))
+    client.set_configuration(vector.get_exec_option_dict())
+    return client
+
   @classmethod
   def get_impalad_cluster_size(cls):
     return 
len(cls.__get_cluster_host_ports(pytest.config.option.default_test_protocol))
@@ -371,6 +399,15 @@ class ImpalaTestSuite(BaseTestSuite):
       LOG.info("HS2 HTTP connection setup failed, continuing...: 
{0}".format(e))
     cls.client = 
cls.default_impala_client(pytest.config.option.default_test_protocol)
 
+  @classmethod
+  def __reset_impala_clients(cls):
+    if cls.beeswax_client:
+      cls.beeswax_client.clear_configuration()
+    if cls.hs2_client:
+      cls.hs2_client.clear_configuration()
+    if cls.hs2_http_client:
+      cls.hs2_http_client.clear_configuration()
+
   @classmethod
   def close_impala_clients(cls):
     """Closes Impala clients created by create_impala_clients()."""
@@ -463,10 +500,10 @@ class ImpalaTestSuite(BaseTestSuite):
     if not self.default_query_options:
       query_options = impalad_client.get_default_configuration()
       for key, value in query_options.items():
-        self.default_query_options[key.upper()] = value
+        self.default_query_options[key.lower()] = value
     # Restore all the changed query options.
     for query_option in query_options_changed:
-      query_option = query_option.upper()
+      query_option = query_option.lower()
       if query_option not in self.default_query_options:
         continue
       default_val = self.default_query_options[query_option]
@@ -715,7 +752,7 @@ class ImpalaTestSuite(BaseTestSuite):
         for query in query.split(';'):
           set_pattern_match = SET_PATTERN.match(query)
           if set_pattern_match:
-            option_name = set_pattern_match.groups()[0]
+            option_name = set_pattern_match.groups()[0].lower()
             query_options_changed.append(option_name)
             assert option_name not in vector.get_value(EXEC_OPTION_KEY), (
                 "{} cannot be set in  the '.test' file since it is in the test 
vector. "
@@ -1056,6 +1093,21 @@ class ImpalaTestSuite(BaseTestSuite):
   def close_query_using_client(self, client, query):
     return client.close_query(query)
 
+  def execute_query_using_vector(self, query, vector):
+    """Run 'query' with given test 'vector'.
+    'vector' must have 'protocol' and 'exec_option' dimension.
+    Default ImpalaTestSuite client will be used depending on value of 
'protocol'
+    dimension."""
+    client = self.default_impala_client(vector.get_value('protocol'))
+    result = self.execute_query_using_client(client, query, vector)
+    # Restore client configuration before returning.
+    modified_configs = vector.get_exec_option_dict().keys()
+    for name, val in client.get_default_configuration().items():
+      lower_name = name.lower()
+      if lower_name in modified_configs:
+        client.set_configuration_option(lower_name, val)
+    return result
+
   @execute_wrapper
   def execute_query_async(self, query, query_options=None):
     if query_options is not None: self.client.set_configuration(query_options)
diff --git a/tests/common/test_vector.py b/tests/common/test_vector.py
index bdab6cf5b..fc3234b44 100644
--- a/tests/common/test_vector.py
+++ b/tests/common/test_vector.py
@@ -99,13 +99,15 @@ class ImpalaTestVector(object):
     return self.get_value(EXEC_OPTION_KEY)
 
   def get_exec_option(self, option_name):
-    return self.get_value(EXEC_OPTION_KEY)[option_name]
+    value = self.get_value(EXEC_OPTION_KEY).get(option_name.lower())
+    assert value is not None
+    return value
 
   def set_exec_option(self, option_name, option_value):
-    self.get_value(EXEC_OPTION_KEY)[option_name] = option_value
+    self.get_value(EXEC_OPTION_KEY)[option_name.lower()] = str(option_value)
 
   def unset_exec_option(self, option_name):
-    del self.get_value(EXEC_OPTION_KEY)[option_name]
+    del self.get_value(EXEC_OPTION_KEY)[option_name.lower()]
 
   def __str__(self):
       return ' | '.join(['%s' % vector_value for vector_value in 
self.vector_values])
diff --git a/tests/custom_cluster/test_admission_controller.py 
b/tests/custom_cluster/test_admission_controller.py
index 83c6b09ed..fcd0ee8b3 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -2000,7 +2000,8 @@ class 
TestAdmissionControllerStress(TestAdmissionControllerBase):
       cls.ImpalaTestMatrix.add_constraint(
           lambda v: v.get_value('round_robin_submission'))
 
-  def setup(self):
+  def setup_method(self, method):
+    super(TestAdmissionControllerStress, self).setup_method(method)
     # All threads are stored in this list and it's used just to make sure we 
clean up
     # properly in teardown.
     self.all_threads = list()
@@ -2023,7 +2024,7 @@ class 
TestAdmissionControllerStress(TestAdmissionControllerBase):
     signal.signal(signal.SIGINT, quit)
     signal.signal(signal.SIGHUP, quit)
 
-  def teardown(self):
+  def teardown_method(self, method):
     # Set shutdown for all threads (cancel if needed)
     self.exit.set()
 
@@ -2032,6 +2033,7 @@ class 
TestAdmissionControllerStress(TestAdmissionControllerBase):
       thread.join(5)
       LOG.info("Join thread for query num %s %s", thread.query_num,
           "TIMED OUT" if thread.isAlive() else "")
+    super(TestAdmissionControllerStress, self).teardown_method(method)
 
   def should_run(self):
     return not self.exit.is_set()
diff --git a/tests/custom_cluster/test_hs2_fault_injection.py 
b/tests/custom_cluster/test_hs2_fault_injection.py
index 1d393001d..1b4ca8632 100644
--- a/tests/custom_cluster/test_hs2_fault_injection.py
+++ b/tests/custom_cluster/test_hs2_fault_injection.py
@@ -125,15 +125,17 @@ class FaultInjectingImpalaHS2Client(ImpalaHS2Client):
 class TestHS2FaultInjection(CustomClusterTestSuite):
   """Class for testing the http fault injection in various rpcs used by the
   impala-shell client"""
-  def setup(self):
+  def setup_method(self, method):
+    super(TestHS2FaultInjection, self).setup_method(method)
     impalad = IMPALAD_HS2_HTTP_HOST_PORT.split(":")
     self.custom_hs2_http_client = FaultInjectingImpalaHS2Client(impalad, 1024,
         kerberos_host_fqdn=None, use_http_base_transport=True, 
http_path='cliservice')
     self.transport = self.custom_hs2_http_client.transport
 
-  def teardown(self):
+  def teardown_method(self, method):
     self.transport.disable_fault()
     self.custom_hs2_http_client.close_connection()
+    super(TestHS2FaultInjection, self).teardown_method(method)
 
   def connect(self):
     self.custom_hs2_http_client.connect()
diff --git a/tests/custom_cluster/test_kudu.py 
b/tests/custom_cluster/test_kudu.py
index c8154c8af..10b4d6af5 100644
--- a/tests/custom_cluster/test_kudu.py
+++ b/tests/custom_cluster/test_kudu.py
@@ -73,6 +73,9 @@ class TestKuduOperations(CustomKuduTest):
   def test_local_tz_conversion_ops(self, vector, unique_database):
     """IMPALA-5539: Test Kudu timestamp reads/writes are correct with the
        use_local_tz_for_unix_timestamp_conversions flag."""
+    # Remove 'abort_on_error' option so we can set it at .test file.
+    # Revisit this if 'abort_on_error' dimension size increase.
+    vector.unset_exec_option('abort_on_error')
     # These tests provide enough coverage of queries with timestamps.
     self.run_test_case('QueryTest/kudu-scan-node', vector, 
use_db=unique_database)
     self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
diff --git a/tests/custom_cluster/test_mt_dop.py 
b/tests/custom_cluster/test_mt_dop.py
index 38878c086..4b456a6d3 100644
--- a/tests/custom_cluster/test_mt_dop.py
+++ b/tests/custom_cluster/test_mt_dop.py
@@ -18,7 +18,6 @@
 from __future__ import absolute_import, division, print_function
 import os
 import pytest
-from copy import deepcopy
 
 from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
 from tests.common.environ import build_flavor_timeout
@@ -51,6 +50,9 @@ class TestMtDopFlags(CustomClusterTestSuite):
   def test_mt_dop_runtime_filters_one_node(self, vector):
     """Runtime filter tests, which assume 3 fragment instances, can also be 
run on a single
     node cluster to test multiple filter sources/destinations per backend."""
+    # Remove 'num_nodes' option so we can set it at .test file.
+    # Revisit this if 'num_nodes' dimension size increase.
+    vector.unset_exec_option('num_nodes')
     # Runtime filter test with RUNTIME_PROFILE seconds modified to reflect
     # the different filter aggregation pattern with mt_dop.
     vector.get_value('exec_option')['mt_dop'] = 3
@@ -67,7 +69,7 @@ class TestMtDopFlags(CustomClusterTestSuite):
         test_file_vars={'$RUNTIME_FILTER_WAIT_TIME_MS': str(WAIT_TIME_MS)})
 
   @CustomClusterTestSuite.with_args(cluster_size=1)
-  def test_mt_dop_union_empty_table(self, unique_database):
+  def test_mt_dop_union_empty_table(self):
     """ Regression test for IMPALA-11803: When used in DEBUG build,
     impalad crashed while running union on an empty table with MT_DOP>1.
     This test verifies the fix on the same."""
@@ -76,6 +78,7 @@ class TestMtDopFlags(CustomClusterTestSuite):
                         " functional.emptytable union all select id from"
                         " functional.alltypestiny) t")
 
+
 class TestMaxMtDop(CustomClusterTestSuite):
   @classmethod
   def get_workload(cls):
diff --git a/tests/custom_cluster/test_runtime_filter_aggregation.py 
b/tests/custom_cluster/test_runtime_filter_aggregation.py
index f8bee4785..6b908724b 100644
--- a/tests/custom_cluster/test_runtime_filter_aggregation.py
+++ b/tests/custom_cluster/test_runtime_filter_aggregation.py
@@ -56,6 +56,9 @@ class TestRuntimeFilterAggregation(CustomClusterTestSuite):
   @pytest.mark.execute_serially
   @CustomClusterTestSuite.with_args(cluster_size=6, 
num_exclusive_coordinators=1)
   def test_basic_filters(self, vector):
+    # Remove 'num_nodes' option so we can set it at .test file.
+    # Revisit this if 'num_nodes' dimension size increase.
+    vector.unset_exec_option('num_nodes')
     num_filters_per_host = 
vector.get_exec_option('max_num_filters_aggregated_per_host')
     num_backend = 5  # exclude coordinator
     num_updates = (num_backend if num_filters_per_host == 0
diff --git a/tests/hs2/hs2_test_suite.py b/tests/hs2/hs2_test_suite.py
index ec62a44d7..312a61060 100644
--- a/tests/hs2/hs2_test_suite.py
+++ b/tests/hs2/hs2_test_suite.py
@@ -120,9 +120,13 @@ class HS2TestSuite(ImpalaTestSuite):
                          'doubleVal', 'binaryVal']
 
   def setup(self):
+    # TODO: Rename this method to connect() and provide proper py.test 
setup_method().
+    # test_hs2.py is using this class as cursor.
     self.socket, self.hs2_client = self._open_hs2_connection()
 
   def teardown(self):
+    # TODO: Rename this method to close() and provide proper py.test 
teardown_method().
+    # test_hs2.py is using this class as cursor.
     if self.socket:
       self.socket.close()
 
@@ -177,6 +181,8 @@ class HS2TestSuite(ImpalaTestSuite):
                                 "profile or execution summary".format(user))
 
   def close(self, op_handle):
+    # TODO: Rename this method to close_query().
+    # test_hs2.py is using this class as cursor.
     close_op_req = TCLIService.TCloseOperationReq()
     close_op_req.operationHandle = op_handle
     close_op_resp = self.hs2_client.CloseOperation(close_op_req)
diff --git a/tests/query_test/test_exprs.py b/tests/query_test/test_exprs.py
index 51f976d04..0852ac67b 100644
--- a/tests/query_test/test_exprs.py
+++ b/tests/query_test/test_exprs.py
@@ -23,12 +23,15 @@ import re
 from random import randint
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.test_dimensions import create_exec_option_dimension
-from tests.common.test_dimensions import create_uncompressed_text_dimension
-from tests.common.test_vector import ImpalaTestDimension
+from tests.common.test_dimensions import (
+  add_exec_option_dimension,
+  create_exec_option_dimension,
+  create_uncompressed_text_dimension)
 from tests.util.test_file_parser import QueryTestSectionReader
 
 LOG = logging.getLogger('test_exprs')
+EXPR_REWRITE_OPTIONS = [0, 1]
+
 
 class TestExprs(ImpalaTestSuite):
   @classmethod
@@ -40,17 +43,17 @@ class TestExprs(ImpalaTestSuite):
     super(TestExprs, cls).add_test_dimensions()
     # Test with and without expr rewrites to cover regular expr evaluations
     # as well as constant folding, in particular, timestamp literals.
-    cls.ImpalaTestMatrix.add_dimension(
-        ImpalaTestDimension('enable_expr_rewrites', *[0,1]))
+    add_exec_option_dimension(cls, 'enable_expr_rewrites', 
EXPR_REWRITE_OPTIONS)
     if cls.exploration_strategy() == 'core':
       # Test with file format that supports codegen
       cls.ImpalaTestMatrix.add_constraint(lambda v:
-          v.get_value('table_format').file_format == 'parquet' and
-          v.get_value('table_format').compression_codec == 'none')
+          v.get_value('table_format').file_format == 'parquet'
+          and v.get_value('table_format').compression_codec == 'none')
 
   def test_exprs(self, vector):
-    vector.get_value('exec_option')['enable_expr_rewrites'] = \
-        vector.get_value('enable_expr_rewrites')
+    # Remove 'exec_single_node_rows_threshold' option so we can set it at 
.test file.
+    # Revisit this if 'exec_single_node_rows_threshold' dimension size 
increase.
+    vector.unset_exec_option('exec_single_node_rows_threshold')
     # TODO: Enable some of these tests for Avro if possible
     # Don't attempt to evaluate timestamp expressions with Avro tables (which 
don't
     # support a timestamp type)"
@@ -71,8 +74,6 @@ class TestExprs(ImpalaTestSuite):
 
   def test_special_strings(self, vector):
     """Test handling of expressions with "special" strings."""
-    vector.get_value('exec_option')['enable_expr_rewrites'] = \
-        vector.get_value('enable_expr_rewrites')
     self.run_test_case('QueryTest/special-strings', vector)
 
   def test_encryption_exprs(self, vector):
@@ -82,9 +83,6 @@ class TestExprs(ImpalaTestSuite):
     the mode used in them. For modes that may not be supported, we run a
     probing query first and only run the test file if it succeeds.
     """
-    vector.get_value('exec_option')['enable_expr_rewrites'] = \
-      vector.get_value('enable_expr_rewrites')
-
     # Run queries that are expected to fail, e.g. trying invalid operation 
modes etc.
     self.run_test_case('QueryTest/encryption_exprs_errors', vector)
 
@@ -92,17 +90,17 @@ class TestExprs(ImpalaTestSuite):
     self.run_test_case('QueryTest/encryption_exprs_aes_256_ecb', vector)
     self.run_test_case('QueryTest/encryption_exprs_aes_256_cfb', vector)
 
-    aes_256_gcm_ok = self._check_aes_mode_supported("aes_256_gcm")
+    aes_256_gcm_ok = self._check_aes_mode_supported("aes_256_gcm", vector)
     if aes_256_gcm_ok:
       self.run_test_case('QueryTest/encryption_exprs_aes_256_gcm', vector)
     self._log_whether_aes_tests_run("aes_256_gcm", aes_256_gcm_ok)
 
-    aes_128_gcm_ok = self._check_aes_mode_supported("aes_128_gcm")
+    aes_128_gcm_ok = self._check_aes_mode_supported("aes_128_gcm", vector)
     if aes_128_gcm_ok:
       self.run_test_case('QueryTest/encryption_exprs_aes_128_gcm', vector)
     self._log_whether_aes_tests_run("aes_128_gcm", aes_128_gcm_ok)
 
-    aes_256_ctr_ok = self._check_aes_mode_supported("aes_256_ctr")
+    aes_256_ctr_ok = self._check_aes_mode_supported("aes_256_ctr", vector)
     if aes_256_ctr_ok:
       self.run_test_case('QueryTest/encryption_exprs_aes_256_ctr', vector)
     self._log_whether_aes_tests_run("aes_256_ctr", aes_256_ctr_ok)
@@ -114,7 +112,7 @@ class TestExprs(ImpalaTestSuite):
             "supports" if running else "does not support")
     LOG.warning(msg)
 
-  def _check_aes_mode_supported(self, mode):
+  def _check_aes_mode_supported(self, mode, vector):
     """Checks whether the given AES mode is supported in the current
     environment (see "test_encryption_exprs()") by running a probing query."""
     assert "ECB" not in mode.upper()
@@ -129,7 +127,7 @@ class TestExprs(ImpalaTestSuite):
         expr=expr, key=key, mode=mode, iv=iv)
 
     try:
-      res = self.execute_query(query)
+      res = self.execute_query_using_vector(query, vector)
       assert res.success
       return True
     except Exception as e:
@@ -173,32 +171,32 @@ class TestExprLimits(ImpalaTestSuite):
       if (i + 1 != self.EXPR_CHILDREN_LIMIT - 1):
         in_query += ","
     in_query += ")"
-    self.__exec_query(in_query)
+    self.__exec_query(in_query, vector)
 
     # CASE expr
     case_query = "select case "
     for i in range(0, self.EXPR_CHILDREN_LIMIT // 2):
       case_query += " when true then 1"
     case_query += " end"
-    self.__exec_query(case_query)
+    self.__exec_query(case_query, vector)
 
   def test_expr_depth_limit(self, vector):
     # Compound predicates
     and_query = "select " + self.__gen_deep_infix_expr("true", " and false")
-    self.__exec_query(and_query)
+    self.__exec_query(and_query, vector)
     or_query = "select " + self.__gen_deep_infix_expr("true", " or false")
-    self.__exec_query(or_query)
+    self.__exec_query(or_query, vector)
 
     # Arithmetic expr
     arith_query = "select " + self.__gen_deep_infix_expr("1", " + 1")
-    self.__exec_query(arith_query)
+    self.__exec_query(arith_query, vector)
 
     func_query = "select " + self.__gen_deep_func_expr("lower(", "'abc'", ")")
-    self.__exec_query(func_query)
+    self.__exec_query(func_query, vector)
 
     # Casts.
     cast_query = "select " + self.__gen_deep_func_expr("cast(", "1", " as 
int)")
-    self.__exec_query(cast_query)
+    self.__exec_query(cast_query, vector)
 
   def test_under_statement_expression_limit(self):
     """Generate a huge case statement that barely fits within the statement 
expression
@@ -216,8 +214,9 @@ class TestExprLimits(ImpalaTestSuite):
     """Generate a huge case statement that exceeds the default 16MB limit and 
verify
        that it gets rejected."""
 
-    expected_err_tmpl = ("Statement length of {0} bytes exceeds the maximum "
-        "statement length \({1} bytes\)")
+    expected_err_tmpl = (
+      r"Statement length of {0} bytes exceeds the maximum "
+      r"statement length \({1} bytes\)")
     size_16mb = 16 * 1024 * 1024
 
     # Case 1: a valid SQL that would parse correctly
@@ -241,8 +240,9 @@ class TestExprLimits(ImpalaTestSuite):
     case = self.__gen_huge_case("int_col", 66, 2, "  ")
     query = "select {0} as huge_case from functional.alltypes".format(case)
     assert len(query) < 16 * 1024 * 1024
-    expected_err_re = ("Exceeded the statement expression limit \({0}\)\n"
-        "Statement has .* expressions.").format(250000)
+    expected_err_re = (
+      r"Exceeded the statement expression limit \({0}\)\n"
+      r"Statement has .* expressions.").format(250000)
     err = self.execute_query_expect_failure(self.client, query)
     assert re.search(expected_err_re, str(err))
 
@@ -282,13 +282,14 @@ class TestExprLimits(ImpalaTestSuite):
       expr += close_func
     return expr
 
-  def __exec_query(self, sql_str):
+  def __exec_query(self, sql_str, vector):
     try:
-      impala_ret = self.execute_query(sql_str)
+      impala_ret = self.execute_query_using_vector(sql_str, vector)
       assert impala_ret.success, "Failed to execute query %s" % (sql_str)
     except Exception as e:  # consider any exception a failure
       assert False, "Failed to execute query %s: %s" % (sql_str, e)
 
+
 class TestUtcTimestampFunctions(ImpalaTestSuite):
   """Tests for UTC timestamp functions, i.e. functions that do not depend on 
the behavior
      of the flag --use_local_tz_for_unix_timestamp_conversions. Tests added 
here should
@@ -300,21 +301,18 @@ class TestUtcTimestampFunctions(ImpalaTestSuite):
     super(TestUtcTimestampFunctions, cls).add_test_dimensions()
     # Test with and without expr rewrites to cover regular expr evaluations
     # as well as constant folding, in particular, timestamp literals.
-    cls.ImpalaTestMatrix.add_dimension(
-        ImpalaTestDimension('enable_expr_rewrites', *[0,1]))
+    add_exec_option_dimension(cls, 'enable_expr_rewrites', 
EXPR_REWRITE_OPTIONS)
     if cls.exploration_strategy() == 'core':
       # Test with file format that supports codegen
-      cls.ImpalaTestMatrix.add_constraint(lambda v:\
-          v.get_value('table_format').file_format == 'text' and\
-          v.get_value('table_format').compression_codec == 'none')
+      cls.ImpalaTestMatrix.add_constraint(lambda v:
+          v.get_value('table_format').file_format == 'text'
+          and v.get_value('table_format').compression_codec == 'none')
 
   @classmethod
   def get_workload(cls):
     return 'functional-query'
 
   def test_utc_functions(self, vector):
-    vector.get_value('exec_option')['enable_expr_rewrites'] = \
-        vector.get_value('enable_expr_rewrites')
     self.run_test_case('QueryTest/utc-timestamp-functions', vector)
 
 
@@ -330,8 +328,7 @@ class TestConstantFoldingNoTypeLoss(ImpalaTestSuite):
     super(TestConstantFoldingNoTypeLoss, cls).add_test_dimensions()
     # Test with and without expr rewrites to verify that constant folding does 
not change
     # the behaviour.
-    cls.ImpalaTestMatrix.add_dimension(
-        ImpalaTestDimension('enable_expr_rewrites', *[0,1]))
+    add_exec_option_dimension(cls, 'enable_expr_rewrites', 
EXPR_REWRITE_OPTIONS)
     # We don't actually use a table so one file format is enough.
     cls.ImpalaTestMatrix.add_constraint(lambda v:
         v.get_value('table_format').file_format in ['parquet'])
@@ -350,13 +347,13 @@ class TestConstantFoldingNoTypeLoss(ImpalaTestSuite):
     for (typename, width) in types_and_widths:
       shift_val = width - 2  # Valid and positive for signed types.
       expected_value = 1 << shift_val
-      result = self.execute_query_expect_success(self.client,
-          query_template.format(typename=typename, shift_val=shift_val))
+      result = self.execute_query_using_vector(
+        query_template.format(typename=typename, shift_val=shift_val), vector)
       assert result.data == [str(expected_value)]
 
   def test_addition(self, vector):
     query = "select typeof(cast(1 as bigint) + cast(rand() as tinyint))"
-    result = self.execute_query_expect_success(self.client, query)
+    result = self.execute_query_using_vector(query, vector)
     assert result.data == ["BIGINT"]
 
 
@@ -367,22 +364,29 @@ class TestNonConstPatternILike(ImpalaTestSuite):
   @classmethod
   def add_test_dimensions(cls):
     super(TestNonConstPatternILike, cls).add_test_dimensions()
+    # This test does not care about the file format of test table.
+    # Fix the table format to text.
+    cls.ImpalaTestMatrix.add_dimension(
+        create_uncompressed_text_dimension(cls.get_workload()))
 
   @classmethod
   def get_workload(cls):
     return 'functional-query'
 
   def test_non_const_pattern_ilike(self, vector, unique_database):
-    tbl_name = '`{0}`.`ilike_test`'.format(unique_database)
+    with self.create_impala_client_from_vector(vector) as client:
+      tbl_name = '`{0}`.`ilike_test`'.format(unique_database)
+      self.__run_non_const_pattern_ilike(client, tbl_name)
 
-    self.execute_query_expect_success(self.client,
+  def __run_non_const_pattern_ilike(self, client, tbl_name):
+    self.execute_query_expect_success(client,
         "CREATE TABLE {0} (pattern_str string)".format(tbl_name))
-    self.execute_query_expect_success(self.client,
+    self.execute_query_expect_success(client,
         "INSERT INTO TABLE {0} VALUES('%b%'), ('.*b.*')".format(tbl_name))
 
-    ilike_result = self.execute_query_expect_success(self.client,
+    ilike_result = self.execute_query_expect_success(client,
         "SELECT count(*) FROM {0} WHERE 'ABC' ILIKE 
pattern_str".format(tbl_name))
     assert int(ilike_result.get_data()) == 1
-    iregexp_result = self.execute_query_expect_success(self.client,
+    iregexp_result = self.execute_query_expect_success(client,
         "SELECT count(*) FROM {0} WHERE 'ABC' IREGEXP 
pattern_str".format(tbl_name))
     assert int(iregexp_result.get_data()) == 1
diff --git a/tests/query_test/test_hdfs_caching.py 
b/tests/query_test/test_hdfs_caching.py
index f90e83be2..c960e30bf 100644
--- a/tests/query_test/test_hdfs_caching.py
+++ b/tests/query_test/test_hdfs_caching.py
@@ -43,9 +43,9 @@ class TestHdfsCaching(ImpalaTestSuite):
   @classmethod
   def add_test_dimensions(cls):
     super(TestHdfsCaching, cls).add_test_dimensions()
-    cls.ImpalaTestMatrix.add_constraint(lambda v:\
+    cls.ImpalaTestMatrix.add_constraint(lambda v:
         v.get_value('exec_option')['batch_size'] == 0)
-    cls.ImpalaTestMatrix.add_constraint(lambda v:\
+    cls.ImpalaTestMatrix.add_constraint(lambda v:
         v.get_value('table_format').file_format == "text")
 
   # The tpch nation table is cached as part of data loading. We'll issue a 
query
@@ -63,7 +63,7 @@ class TestHdfsCaching(ImpalaTestSuite):
       
cached_bytes_before.append(impalad.service.get_metric_value(cached_read_metric))
 
     # Execute the query.
-    result = self.execute_query(query_string)
+    result = self.execute_query_using_vector(query_string, vector)
     assert(len(result.data) == 1)
     assert(result.data[0] == '25')
 
@@ -77,8 +77,8 @@ class TestHdfsCaching(ImpalaTestSuite):
     num_metrics_increased = 0
     assert(len(cached_bytes_before) == len(cached_bytes_after))
     for i in range(0, len(cached_bytes_before)):
-      assert(cached_bytes_before[i] == cached_bytes_after[i] or\
-             cached_bytes_before[i] + expected_bytes_delta == 
cached_bytes_after[i])
+      assert (cached_bytes_before[i] == cached_bytes_after[i]
+              or cached_bytes_before[i] + expected_bytes_delta == 
cached_bytes_after[i])
       if cached_bytes_after[i] > cached_bytes_before[i]:
         num_metrics_increased = num_metrics_increased + 1
 
@@ -105,7 +105,7 @@ class TestHdfsCaching(ImpalaTestSuite):
 
     # Run this query for some iterations since it is timing dependent.
     for x in range(1, num_iters):
-      result = self.execute_query(query_string)
+      result = self.execute_query_using_vector(query_string, vector)
       assert(len(result.data) == 2)
 
 
@@ -134,9 +134,9 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
     create_query_sql = "CREATE EXTERNAL TABLE %s.cached_nation like 
tpch.nation "\
         "LOCATION '%s'" % (unique_database, encrypted_table_dir)
     check_call(["hdfs", "dfs", "-mkdir", encrypted_table_dir], shell=False)
-    check_call(["hdfs", "crypto", "-createZone", "-keyName", "testKey1", 
"-path",\
+    check_call(["hdfs", "crypto", "-createZone", "-keyName", "testKey1", 
"-path",
         encrypted_table_dir], shell=False)
-    check_call(["hdfs", "dfs", "-cp", 
get_fs_path("/test-warehouse/tpch.nation/*.tbl"),\
+    check_call(["hdfs", "dfs", "-cp", 
get_fs_path("/test-warehouse/tpch.nation/*.tbl"),
         encrypted_table_dir], shell=False)
     # Reduce the scan range size to force the query to have multiple scan 
ranges.
     exec_options = vector.get_value('exec_option')
@@ -154,7 +154,7 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
           pytest.fail("Timed out caching path: " + encrypted_table_dir)
         time.sleep(2)
       self.execute_query_expect_success(self.client, "invalidate metadata "
-          "%s.cached_nation" % unique_database);
+          "%s.cached_nation" % unique_database)
       result = self.execute_query_expect_success(self.client, "select count(*) 
from "
           "%s.cached_nation" % unique_database, exec_options)
       assert(len(result.data) == 1)
@@ -162,8 +162,8 @@ class TestHdfsCachingFallbackPath(ImpalaTestSuite):
     except Exception as e:
       pytest.fail("Failure in test_hdfs_caching_fallback_path: " + str(e))
     finally:
-      check_call(["hdfs", "dfs", "-rm", "-r", "-f", "-skipTrash", 
encrypted_table_dir],\
-          shell=False)
+      check_call(["hdfs", "dfs", "-rm", "-r", "-f", "-skipTrash", 
encrypted_table_dir],
+                 shell=False)
 
 
 @SkipIfFS.hdfs_caching
@@ -177,16 +177,18 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
     super(TestHdfsCachingDdl, cls).add_test_dimensions()
     cls.ImpalaTestMatrix.add_dimension(create_single_exec_option_dimension())
 
-    cls.ImpalaTestMatrix.add_constraint(lambda v:\
-        v.get_value('table_format').file_format == 'text' and \
-        v.get_value('table_format').compression_codec == 'none')
+    cls.ImpalaTestMatrix.add_constraint(lambda v:
+        v.get_value('table_format').file_format == 'text'
+        and v.get_value('table_format').compression_codec == 'none')
 
   def setup_method(self, method):
+    super(TestHdfsCachingDdl, self).setup_method(method)
     self.cleanup_db("cachedb")
     self.client.execute("create database cachedb")
 
   def teardown_method(self, method):
     self.cleanup_db("cachedb")
+    super(TestHdfsCachingDdl, self).teardown_method(method)
 
   @pytest.mark.execute_serially
   @SkipIfDockerizedCluster.accesses_host_filesystem
@@ -200,10 +202,10 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
     # for the table with partitions on both HDFS and local file system.
     assert num_entries_pre == get_num_cache_requests() - 10
 
-    self.client.execute("drop table cachedb.cached_tbl_part")
-    self.client.execute("drop table cachedb.cached_tbl_nopart")
-    self.client.execute("drop table cachedb.cached_tbl_local")
-    self.client.execute("drop table cachedb.cached_tbl_ttl")
+    self.execute_query_using_vector("drop table cachedb.cached_tbl_part", 
vector)
+    self.execute_query_using_vector("drop table cachedb.cached_tbl_nopart", 
vector)
+    self.execute_query_using_vector("drop table cachedb.cached_tbl_local", 
vector)
+    self.execute_query_using_vector("drop table cachedb.cached_tbl_ttl", 
vector)
 
     # Dropping the tables should cleanup cache entries leaving us with the same
     # total number of entries.
@@ -213,18 +215,22 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
   def test_caching_ddl_drop_database(self, vector):
     """IMPALA-2518: DROP DATABASE CASCADE should properly drop all impacted 
cache
         directives"""
+    with self.create_impala_client_from_vector(vector) as client:
+      self.__run_caching_ddl_drop_database(client)
+
+  def __run_caching_ddl_drop_database(self, client):
     num_entries_pre = get_num_cache_requests()
     # Populates the `cachedb` database with some cached tables and partitions
-    self.client.execute("use cachedb")
-    self.client.execute("create table cached_tbl_nopart (i int) cached in 
'testPool'")
-    self.client.execute("insert into cached_tbl_nopart select 1")
-    self.client.execute("create table cached_tbl_part (i int) partitioned by 
(j int) \
-                         cached in 'testPool'")
-    self.client.execute("insert into cached_tbl_part (i,j) select 1, 2")
+    client.execute("use cachedb")
+    client.execute("create table cached_tbl_nopart (i int) cached in 
'testPool'")
+    client.execute("insert into cached_tbl_nopart select 1")
+    client.execute("create table cached_tbl_part (i int) partitioned by (j 
int) "
+                   "cached in 'testPool'")
+    client.execute("insert into cached_tbl_part (i,j) select 1, 2")
     # We expect the number of cached entities to grow
     assert num_entries_pre < get_num_cache_requests()
-    self.client.execute("use default")
-    self.client.execute("drop database cachedb cascade")
+    client.execute("use default")
+    client.execute("drop database cachedb cascade")
     # We want to see the number of cached entities return to the original count
     assert num_entries_pre == get_num_cache_requests()
 
@@ -236,18 +242,21 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
     num_entries_pre = get_num_cache_requests()
     create_table = ("create table cachedb.cached_tbl_reload "
         "(id int) cached in 'testPool' with replication = 8")
-    self.client.execute(create_table)
+    self.execute_query_using_vector(create_table, vector)
 
     # Access the table once to load the metadata
-    self.client.execute("select count(*) from cachedb.cached_tbl_reload")
+    self.execute_query_using_vector(
+      "select count(*) from cachedb.cached_tbl_reload", vector)
 
     create_table = ("create table cachedb.cached_tbl_reload_part (i int) "
         "partitioned by (j int) cached in 'testPool' with replication = 8")
-    self.client.execute(create_table)
+    self.execute_query_using_vector(create_table, vector)
 
     # Add two partitions
-    self.client.execute("alter table cachedb.cached_tbl_reload_part add 
partition (j=1)")
-    self.client.execute("alter table cachedb.cached_tbl_reload_part add 
partition (j=2)")
+    self.execute_query_using_vector(
+      "alter table cachedb.cached_tbl_reload_part add partition (j=1)", vector)
+    self.execute_query_using_vector(
+      "alter table cachedb.cached_tbl_reload_part add partition (j=2)", vector)
 
     assert num_entries_pre + 4 == get_num_cache_requests(), \
       "Adding the tables should be reflected by the number of cache 
directives."
@@ -262,9 +271,11 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
         "/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2", 3)
 
     # Create a bogus cached table abusing an existing cache directive ID, 
IMPALA-1750
-    dirid = 
get_cache_directive_for_path("/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
-    self.client.execute(("create table cachedb.no_replication_factor (id int) 
" \
-                         "tblproperties(\"cache_directive_id\"=\"%s\")" % 
dirid))
+    dirid = get_cache_directive_for_path(
+      "/test-warehouse/cachedb.db/cached_tbl_reload_part/j=2")
+    self.execute_query_using_vector(
+      ("create table cachedb.no_replication_factor (id int) "
+       "tblproperties(\"cache_directive_id\"=\"{}\")").format(dirid), vector)
     self.run_test_case('QueryTest/hdfs-caching-validation', vector)
     # Temp fix for IMPALA-2510. Due to IMPALA-2518, when the test database is 
dropped,
     # the cache directives are not removed for table 'cached_tbl_reload_part'.
@@ -290,15 +301,18 @@ class TestHdfsCachingDdl(ImpalaTestSuite):
     # We want to see the number of cached entities return to the original 
count.
     assert num_entries_pre == get_num_cache_requests()
 
+
 def drop_cache_directives_for_path(path):
   """Drop the cache directive for a given path"""
   rc, stdout, stderr = exec_process("hdfs cacheadmin -removeDirectives -path 
%s" % path)
   assert rc == 0, \
       "Error removing cache directive for path %s (%s, %s)" % (path, stdout, 
stderr)
 
+
 def is_path_fully_cached(path):
   """Returns true if all the bytes of the path are cached, false otherwise"""
-  rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -stats 
-path %s" % path)
+  rc, stdout, stderr = exec_process(
+    "hdfs cacheadmin -listDirectives -stats -path %s" % path)
   assert rc == 0
   caching_stats = stdout.strip("\n").split("\n")[-1].split()
   # Compare BYTES_NEEDED and BYTES_CACHED, the output format is as follows
@@ -309,9 +323,10 @@ def is_path_fully_cached(path):
 def get_cache_directive_for_path(path):
   rc, stdout, stderr = exec_process("hdfs cacheadmin -listDirectives -path %s" 
% path)
   assert rc == 0
-  dirid = re.search('^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, 
re.MULTILINE).group(1)
+  dirid = re.search(r'^\s+?(\d+)\s+?testPool\s+?.*?$', stdout, 
re.MULTILINE).group(1)
   return dirid
 
+
 def change_cache_directive_repl_for_path(path, repl):
   """Drop the cache directive for a given path"""
   dirid = get_cache_directive_for_path(path)
@@ -320,6 +335,7 @@ def change_cache_directive_repl_for_path(path, repl):
   assert rc == 0, \
       "Error modifying cache directive for path %s (%s, %s)" % (path, stdout, 
stderr)
 
+
 def get_num_cache_requests():
   """Returns the number of outstanding cache requests. Due to race conditions 
in the
     way cache requests are added/dropped/reported (see IMPALA-3040), this 
function tries
diff --git a/tests/query_test/test_iceberg.py b/tests/query_test/test_iceberg.py
index ab51bab13..0ba8214ba 100644
--- a/tests/query_test/test_iceberg.py
+++ b/tests/query_test/test_iceberg.py
@@ -1526,6 +1526,9 @@ class TestIcebergV2Table(IcebergTestSuite):
   @SkipIfDockerizedCluster.internal_hostname
   @SkipIf.hardcoded_uris
   def test_read_position_deletes(self, vector):
+    # Remove 'batch_size' option so we can set it at .test file.
+    # Revisit this if 'batch_size' dimension size increase.
+    vector.unset_exec_option('batch_size')
     self.run_test_case('QueryTest/iceberg-v2-read-position-deletes', vector)
 
   @SkipIfDockerizedCluster.internal_hostname
@@ -1586,6 +1589,9 @@ class TestIcebergV2Table(IcebergTestSuite):
 
   @SkipIf.hardcoded_uris
   def test_metadata_tables(self, vector, unique_database):
+    # Remove 'batch_size' option so we can set it at .test file.
+    # Revisit this if 'batch_size' dimension size increase.
+    vector.unset_exec_option('batch_size')
     with self.create_impala_client() as impalad_client:
       overwrite_snapshot_id = impalad_client.execute("select snapshot_id from "
                              
"functional_parquet.iceberg_query_metadata.snapshots "
diff --git a/tests/query_test/test_insert_behaviour.py 
b/tests/query_test/test_insert_behaviour.py
index 9f55a9377..3864bca3f 100644
--- a/tests/query_test/test_insert_behaviour.py
+++ b/tests/query_test/test_insert_behaviour.py
@@ -41,6 +41,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
   TEST_DB_NAME = "insert_empty_result_db"
 
   def setup_method(self, method):
+    super(TestInsertBehaviour, self).setup_method(method)
     # cleanup and create a fresh test database
     if method.__name__ == "test_insert_select_with_empty_resultset":
       self.cleanup_db(self.TEST_DB_NAME)
@@ -50,6 +51,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
   def teardown_method(self, method):
     if method.__name__ == "test_insert_select_with_empty_resultset":
       self.cleanup_db(self.TEST_DB_NAME)
+    super(TestInsertBehaviour, self).teardown_method(method)
 
   @SkipIfFS.eventually_consistent
   @pytest.mark.execute_serially
@@ -77,7 +79,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # We do this here because the above 'make_dir' call doesn't make a 
directory for S3.
     for dir_ in dir_locations:
       self.filesystem_client.create_file(
-          table_dir + dir_ + '/' + hidden_file_locations[0] , '', 
overwrite=True)
+        table_dir + dir_ + '/' + hidden_file_locations[0], '', overwrite=True)
 
     for file_ in hidden_file_locations:
       self.filesystem_client.create_file(table_dir + file_, '', overwrite=True)
@@ -406,8 +408,8 @@ class TestInsertBehaviour(ImpalaTestSuite):
     self.execute_query_expect_failure(self.client, insert_query)
 
     # Check that the mask correctly applies to the named user ACL
-    self.hdfs_client.setacl(table_path, "user::r-x,user:" + user +
-                            ":rwx,group::r-x,other::rwx,mask::r--")
+    self.hdfs_client.setacl(table_path, "user::r-x,user:" + user
+                            + ":rwx,group::r-x,other::rwx,mask::r--")
     self.execute_query_expect_success(self.client, refresh_query)
     # Should be unwritable because mask applies to named user and disables 
writing
     self.execute_query_expect_failure(self.client, insert_query)
@@ -624,7 +626,7 @@ class TestInsertBehaviour(ImpalaTestSuite):
     # We expect exactly one partition per year and month, since subsequent row 
batches of
     # a partition will be written into the same file.
     expected_partitions = \
-        ["year=%s/month=%s" % (y, m) for y in [2009, 2010] for m in 
range(1,13)]
+        ["year=%s/month=%s" % (y, m) for y in [2009, 2010] for m in range(1, 
13)]
 
     for partition in expected_partitions:
       partition_path = "{0}/{1}".format(table_path, partition)
diff --git a/tests/query_test/test_kudu.py b/tests/query_test/test_kudu.py
index 21f8a0971..bd1be1233 100644
--- a/tests/query_test/test_kudu.py
+++ b/tests/query_test/test_kudu.py
@@ -75,6 +75,9 @@ class TestKuduBasicDML(KuduTestSuite):
 
   @SkipIfKudu.no_hybrid_clock()
   def test_kudu_insert(self, vector, unique_database):
+    # Remove 'abort_on_error' option so we can set it at .test file.
+    # Revisit this if 'abort_on_error' dimension size increase.
+    vector.unset_exec_option('abort_on_error')
     self.run_test_case('QueryTest/kudu_insert', vector, use_db=unique_database)
 
   @SkipIfKudu.no_hybrid_clock()
diff --git a/tests/query_test/test_runtime_filters.py 
b/tests/query_test/test_runtime_filters.py
index db2921f7b..83a0031b7 100644
--- a/tests/query_test/test_runtime_filters.py
+++ b/tests/query_test/test_runtime_filters.py
@@ -87,6 +87,9 @@ class TestRuntimeFilters(ImpalaTestSuite):
       add_mandatory_exec_option(cls, "async_codegen", 1)
 
   def test_basic_filters(self, vector):
+    # Remove 'num_nodes' option so we can set it at .test file.
+    # Revisit this if 'num_nodes' dimension size increase.
+    vector.unset_exec_option('num_nodes')
     num_filters_per_host = 
vector.get_exec_option('max_num_filters_aggregated_per_host')
     num_backend = 2  # exclude coordinator
     num_updates = (num_backend + 1 if num_filters_per_host == 0

Reply via email to