This is an automated email from the ASF dual-hosted git repository.

yanxinyi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new bf0cef7  PHOENIX-5940 Pre-4.15 client cannot connect to 4.15+ server 
after SYSTEM.CATALOG region has split
bf0cef7 is described below

commit bf0cef74f63f6d63bff889562e6bdb7a4d4236cb
Author: Xinyi Yan <x...@salesforce.com>
AuthorDate: Sat Nov 7 14:07:24 2020 -0800

    PHOENIX-5940 Pre-4.15 client cannot connect to 4.15+ server after 
SYSTEM.CATALOG region has split
    
    Signed-off-by: Xinyi Yan <yanxi...@apache.org>
---
 .../BackwardCompatibilityForSplitableSyscatIT.java |  95 ++++++
 .../phoenix/end2end/BackwardCompatibilityIT.java   | 375 +++------------------
 .../end2end/BackwardCompatibilityTestUtil.java     | 315 +++++++++++++++++
 .../it/resources/gold_files/gold_query_delete.txt  |  22 ++
 .../gold_query_select_and_drop_table.txt           |  22 ++
 .../it/resources/sql_files/create_tmp_table.sql    |  21 ++
 .../src/it/resources/sql_files/query_delete.sql    |  24 ++
 .../sql_files/query_select_and_drop_table.sql      |  21 ++
 .../phoenix/coprocessor/MetaDataEndpointImpl.java  |  27 +-
 9 files changed, 596 insertions(+), 326 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityForSplitableSyscatIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityForSplitableSyscatIT.java
new file mode 100644
index 0000000..c100930
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityForSplitableSyscatIT.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import java.util.Collection;
+import java.util.Map;
+
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.CREATE_TMP_TABLE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_DELETE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_SELECT_AND_DROP_TABLE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.assertExpectedOutput;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.checkForPreConditions;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.computeClientVersions;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.executeQueriesWithCurrentVersion;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.executeQueryWithClientVersion;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.UpgradeProps.NONE;
+
+/**
+ * This class is meant for specifically testing syscat with all compatible 
client versions.
+ */
+
+@RunWith(Parameterized.class)
+@Category(NeedsOwnMiniClusterTest.class)
+public class BackwardCompatibilityForSplitableSyscatIT extends 
SplitSystemCatalogIT {
+    private final String compatibleClientVersion;
+    private String zkQuorum;
+    private String url;
+
+    @Parameterized.Parameters(name = 
"BackwardCompatibilityForSplitableSyscatIT_compatibleClientVersion={0}")
+    public static synchronized Collection<String> data() throws Exception {
+        return computeClientVersions();
+    }
+
+    public BackwardCompatibilityForSplitableSyscatIT(String 
compatibleClientVersion) {
+        this.compatibleClientVersion = compatibleClientVersion;
+    }
+
+    @Before
+    public synchronized void setup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(1);
+        doSetup(serverProps);
+        zkQuorum = "localhost:" + getZKClientPort(config);
+        url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
+        checkForPreConditions(compatibleClientVersion, config);
+    }
+
+    @Test
+    public void testSplittableSyscatWithOldClientForAddingDataAndDelete() 
throws Exception {
+        executeQueryWithClientVersion(compatibleClientVersion, QUERY_DELETE, 
zkQuorum);
+        assertExpectedOutput(QUERY_DELETE);
+    }
+
+    @Test
+    public void testSplittableSyscatWithNewClientForAddingDataAndDelete() 
throws Exception {
+        executeQueriesWithCurrentVersion(QUERY_DELETE, url, NONE);
+        assertExpectedOutput(QUERY_DELETE);
+    }
+
+    @Test
+    public void 
testSplittableSyscatWithOldClientLoadDataAndNewClientQueryAndDelete() throws 
Exception {
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_TMP_TABLE, zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_SELECT_AND_DROP_TABLE, url, 
NONE);
+        assertExpectedOutput(QUERY_SELECT_AND_DROP_TABLE);
+    }
+
+    @Test
+    public void 
testSplittableSyscatWithNewClientLoadDataAndOldClientQueryAndDelete() throws 
Exception {
+        executeQueriesWithCurrentVersion(CREATE_TMP_TABLE, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_SELECT_AND_DROP_TABLE, zkQuorum);
+        assertExpectedOutput(QUERY_SELECT_AND_DROP_TABLE);
+    }
+}
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
index 8ca3db5..7e4f287 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityIT.java
@@ -17,55 +17,39 @@
  */
 package org.apache.phoenix.end2end;
 
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.ADD_DATA;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.ADD_DELETE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.CREATE_ADD;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.CREATE_DIVERGED_VIEW;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_ADD_DATA;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_ADD_DELETE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_CREATE_ADD;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.QUERY_CREATE_DIVERGED_VIEW;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.assertExpectedOutput;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.checkForPreConditions;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.computeClientVersions;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.executeQueriesWithCurrentVersion;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.executeQueryWithClientVersion;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.UpgradeProps.NONE;
+import static 
org.apache.phoenix.end2end.BackwardCompatibilityTestUtil.UpgradeProps.SET_MAX_LOOK_BACK_AGE;
 import static org.apache.phoenix.query.BaseTest.setUpConfigForMiniCluster;
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assume.assumeFalse;
 
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.net.URL;
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.ResultSetMetaData;
 import java.util.Collection;
-import java.util.List;
-import java.util.Properties;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.TableDescriptor;
-import org.apache.hadoop.hbase.util.VersionInfo;
-import 
org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
-import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixDriver;
-import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.SystemTaskSplitPolicy;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -74,9 +58,6 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
-
-
 /**
  * This class is meant for testing all compatible client versions 
  * against the current server version. It runs SQL queries with given 
@@ -87,28 +68,6 @@ import 
org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
 @Category(NeedsOwnMiniClusterTest.class)
 public class BackwardCompatibilityIT {
 
-    private static final String SQL_DIR = "sql_files/";
-    private static final String RESULTS_AND_GOLD_FILES_DIR = "gold_files/";
-    private static final String COMPATIBLE_CLIENTS_JSON =
-            "compatible_client_versions.json";
-    private static final String BASH = "/bin/bash";
-    private static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
-    private static final String QUERY_PREFIX = "query_";
-    private static final String RESULT_PREFIX = "result_";
-    private static final String GOLD_PREFIX = "gold_";
-    private static final String SQL_EXTENSION = ".sql";
-    private static final String TEXT_EXTENSION = ".txt";
-    private static final String CREATE_ADD = "create_add";
-    private static final String CREATE_DIVERGED_VIEW = "create_diverged_view";
-    private static final String ADD_DATA = "add_data";
-    private static final String ADD_DELETE = "add_delete";
-    private static final String QUERY_CREATE_ADD = QUERY_PREFIX + CREATE_ADD;
-    private static final String QUERY_ADD_DATA = QUERY_PREFIX + ADD_DATA;
-    private static final String QUERY_ADD_DELETE = QUERY_PREFIX + ADD_DELETE;
-    private static final String QUERY_CREATE_DIVERGED_VIEW = QUERY_PREFIX + 
CREATE_DIVERGED_VIEW;
-    private static final String MVN_HOME = "maven.home";
-    private static final String JAVA_TMP_DIR = "java.io.tmpdir";
-
     private final String compatibleClientVersion;
     private static Configuration conf;
     private static HBaseTestingUtility hbaseTestUtil;
@@ -116,11 +75,6 @@ public class BackwardCompatibilityIT {
     private static String url;
     private String tmpDir;
 
-    private enum UpgradeProps {
-        NONE,
-        SET_MAX_LOOK_BACK_AGE
-    }
-
     public BackwardCompatibilityIT(String compatibleClientVersion) {
         this.compatibleClientVersion = compatibleClientVersion;
     }
@@ -141,7 +95,7 @@ public class BackwardCompatibilityIT {
         zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
         url = PhoenixRuntime.JDBC_PROTOCOL + 
PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
         DriverManager.registerDriver(PhoenixDriver.INSTANCE);
-        checkForPreConditions();
+        checkForPreConditions(compatibleClientVersion, conf);
     }
     
     @After
@@ -153,29 +107,6 @@ public class BackwardCompatibilityIT {
         }
         System.setProperty("java.io.tmpdir", tmpDir);
     }
-    
-    private static List<String> computeClientVersions() throws Exception {
-        String hbaseVersion = VersionInfo.getVersion();
-        Pattern p = Pattern.compile("\\d+\\.\\d+");
-        Matcher m = p.matcher(hbaseVersion);
-        String hbaseProfile = null;
-        if (m.find()) {
-            hbaseProfile = m.group();
-        }
-        List<String> clientVersions = Lists.newArrayList();
-        ObjectMapper mapper = new ObjectMapper();
-        mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
-        try (InputStream inputStream = BackwardCompatibilityIT.class
-                
.getClassLoader().getResourceAsStream(COMPATIBLE_CLIENTS_JSON)) {
-            assertNotNull(inputStream);
-            JsonNode jsonNode = mapper.readTree(inputStream);
-            JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
-            for (final JsonNode clientVersion : HBaseProfile) {
-                clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
-            }
-        }
-        return clientVersions;
-    }
 
     /**
      * Scenario: 
@@ -188,16 +119,16 @@ public class BackwardCompatibilityIT {
     @Test
     public void testUpsertWithOldClient() throws Exception {
         // Insert data with old client and read with new client
-        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD, 
zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, url, NONE);
         assertExpectedOutput(QUERY_CREATE_ADD);
     }
 
     @Test
     public void testCreateDivergedViewWithOldClientReadFromNewClient() throws 
Exception {
         // Create a base table, view and make it diverge from an old client
-        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, 
UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW, zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, url, 
NONE);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
@@ -205,17 +136,16 @@ public class BackwardCompatibilityIT {
     public void testCreateDivergedViewWithOldClientReadWithMaxLookBackAge()
             throws Exception {
         // Create a base table, view and make it diverge from an old client
-        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW,
-            UpgradeProps.SET_MAX_LOOK_BACK_AGE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW, zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, url, 
SET_MAX_LOOK_BACK_AGE);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
     @Test
     public void testCreateDivergedViewWithOldClientReadFromOldClient() throws 
Exception {
         // Create a base table, view and make it diverge from an old client
-        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW);
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW, zkQuorum);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
@@ -223,26 +153,26 @@ public class BackwardCompatibilityIT {
     public void 
testCreateDivergedViewWithOldClientReadFromOldClientAfterUpgrade()
             throws Exception {
         // Create a base table, view and make it diverge from an old client
-        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW);
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW, zkQuorum);
         try (Connection conn = DriverManager.getConnection(url)) {
             // Just connect with a new client to cause a metadata upgrade
         }
         // Query with an old client again
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
     @Test
     public void testCreateDivergedViewWithNewClientReadFromOldClient() throws 
Exception {
-        executeQueriesWithCurrentVersion(CREATE_DIVERGED_VIEW, 
UpgradeProps.NONE);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW);
+        executeQueriesWithCurrentVersion(CREATE_DIVERGED_VIEW, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_DIVERGED_VIEW, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
     @Test
     public void testCreateDivergedViewWithNewClientReadFromNewClient() throws 
Exception {
-        executeQueriesWithCurrentVersion(CREATE_DIVERGED_VIEW, 
UpgradeProps.NONE);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, 
UpgradeProps.NONE);
+        executeQueriesWithCurrentVersion(CREATE_DIVERGED_VIEW, url, NONE);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, url, 
NONE);
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
 
@@ -257,8 +187,8 @@ public class BackwardCompatibilityIT {
     @Test
     public void testSelectWithOldClient() throws Exception {
         // Insert data with new client and read with old client
-        executeQueriesWithCurrentVersion(CREATE_ADD, UpgradeProps.NONE);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD);
+        executeQueriesWithCurrentVersion(CREATE_ADD, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_ADD);
     }
 
@@ -277,13 +207,13 @@ public class BackwardCompatibilityIT {
     @Test
     public void testSelectUpsertWithNewClientWithMaxLookBackAge() throws 
Exception {
         // Insert data with old client and read with new client
-        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, 
UpgradeProps.SET_MAX_LOOK_BACK_AGE);
+        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD, 
zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, url, 
SET_MAX_LOOK_BACK_AGE);
         assertExpectedOutput(QUERY_CREATE_ADD);
 
         // Insert more data with new client and read with old client
-        executeQueriesWithCurrentVersion(ADD_DATA, 
UpgradeProps.SET_MAX_LOOK_BACK_AGE);
-        executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DATA);
+        executeQueriesWithCurrentVersion(ADD_DATA, url, SET_MAX_LOOK_BACK_AGE);
+        executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DATA, 
zkQuorum);
         assertExpectedOutput(QUERY_ADD_DATA);
     }
 
@@ -300,13 +230,13 @@ public class BackwardCompatibilityIT {
     @Test
     public void testSelectUpsertWithNewClient() throws Exception {
         // Insert data with old client and read with new client
-        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD, 
zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, url, NONE);
         assertExpectedOutput(QUERY_CREATE_ADD);
 
         // Insert more data with new client and read with old client
-        executeQueriesWithCurrentVersion(ADD_DATA, UpgradeProps.NONE);
-        executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DATA);
+        executeQueriesWithCurrentVersion(ADD_DATA, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, QUERY_ADD_DATA, 
zkQuorum);
         assertExpectedOutput(QUERY_ADD_DATA);
     }
 
@@ -323,13 +253,13 @@ public class BackwardCompatibilityIT {
     @Test
     public void testSelectUpsertWithOldClient() throws Exception {
         // Insert data with new client and read with old client
-        executeQueriesWithCurrentVersion(CREATE_ADD, UpgradeProps.NONE);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD);
+        executeQueriesWithCurrentVersion(CREATE_ADD, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_ADD);
 
         // Insert more data with old client and read with new client
-        executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA);
-        executeQueriesWithCurrentVersion(QUERY_ADD_DATA, UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, ADD_DATA, 
zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_ADD_DATA, url, NONE);
         assertExpectedOutput(QUERY_ADD_DATA);
     }
 
@@ -345,13 +275,13 @@ public class BackwardCompatibilityIT {
     @Test
     public void testUpsertDeleteWithOldClient() throws Exception {
         // Insert data with old client and read with new client
-        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, CREATE_ADD, 
zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_ADD, url, NONE);
         assertExpectedOutput(QUERY_CREATE_ADD);
 
         // Deletes with the old client
-        executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_ADD_DELETE);
+        executeQueryWithClientVersion(compatibleClientVersion, ADD_DELETE, 
zkQuorum);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_ADD_DELETE, zkQuorum);
         assertExpectedOutput(QUERY_ADD_DELETE);
     }
 
@@ -367,22 +297,20 @@ public class BackwardCompatibilityIT {
     @Test
     public void testUpsertDeleteWithNewClient() throws Exception {
         // Insert data with old client and read with new client
-        executeQueriesWithCurrentVersion(CREATE_ADD, UpgradeProps.NONE);
-        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD);
+        executeQueriesWithCurrentVersion(CREATE_ADD, url, NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
QUERY_CREATE_ADD, zkQuorum);
         assertExpectedOutput(QUERY_CREATE_ADD);
 
         // Deletes with the new client
-        executeQueriesWithCurrentVersion(ADD_DELETE, UpgradeProps.NONE);
-        executeQueriesWithCurrentVersion(QUERY_ADD_DELETE, UpgradeProps.NONE);
+        executeQueriesWithCurrentVersion(ADD_DELETE,url, NONE);
+        executeQueriesWithCurrentVersion(QUERY_ADD_DELETE, url, NONE);
         assertExpectedOutput(QUERY_ADD_DELETE);
     }
 
     @Test
     public void testUpdatedSplitPolicyForSysTask() throws Exception {
-        executeQueryWithClientVersion(compatibleClientVersion,
-            CREATE_DIVERGED_VIEW);
-        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW,
-            UpgradeProps.NONE);
+        executeQueryWithClientVersion(compatibleClientVersion, 
CREATE_DIVERGED_VIEW, zkQuorum);
+        executeQueriesWithCurrentVersion(QUERY_CREATE_DIVERGED_VIEW, url, 
NONE);
         try (org.apache.hadoop.hbase.client.Connection conn =
                 hbaseTestUtil.getConnection(); Admin admin = conn.getAdmin()) {
             TableDescriptor tableDescriptor = admin.getDescriptor(
@@ -394,199 +322,4 @@ public class BackwardCompatibilityIT {
         }
         assertExpectedOutput(QUERY_CREATE_DIVERGED_VIEW);
     }
-
-    private void checkForPreConditions() throws Exception {
-        // For the first code cut of any major version, there wouldn't be any 
backward compatible
-        // clients. Hence the test wouldn't run and just return true when the 
client
-        // version to be tested is same as current version
-        
assumeFalse(compatibleClientVersion.contains(MetaDataProtocol.CURRENT_CLIENT_VERSION));
-        // Make sure that cluster is clean before test execution with no 
system tables
-        try (org.apache.hadoop.hbase.client.Connection conn = 
-                ConnectionFactory.createConnection(conf);
-                Admin admin = conn.getAdmin()) {
-            
assertFalse(admin.tableExists(TableName.valueOf(QueryConstants.SYSTEM_SCHEMA_NAME,
-                    PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE)));
-        }
-    }
-
-    // Executes the queries listed in the operation file with a given client 
version
-    private void executeQueryWithClientVersion(String clientVersion, String 
operation)
-            throws Exception {
-        List<String> cmdParams = Lists.newArrayList();
-        cmdParams.add(BASH);
-        // Note that auto-commit is true for queries executed via SQLline
-        URL fileUrl = 
BackwardCompatibilityIT.class.getClassLoader().getResource(EXECUTE_QUERY_SH);
-        assertNotNull(fileUrl);
-        cmdParams.add(new File(fileUrl.getFile()).getAbsolutePath());
-        cmdParams.add(zkQuorum);
-        cmdParams.add(clientVersion);
-
-        fileUrl = BackwardCompatibilityIT.class.getClassLoader()
-                .getResource(SQL_DIR + operation + SQL_EXTENSION);
-        assertNotNull(fileUrl);
-        cmdParams.add(new File(fileUrl.getFile()).getAbsolutePath());
-        fileUrl = BackwardCompatibilityIT.class.getClassLoader().getResource(
-                RESULTS_AND_GOLD_FILES_DIR);
-        assertNotNull(fileUrl);
-        String resultFilePath = new File(fileUrl.getFile()).getAbsolutePath() 
+ "/" +
-                RESULT_PREFIX + operation + TEXT_EXTENSION;
-        cmdParams.add(resultFilePath);
-        cmdParams.add(System.getProperty(JAVA_TMP_DIR));
-
-        if (System.getProperty(MVN_HOME) != null) {
-            cmdParams.add(System.getProperty(MVN_HOME));
-        }
-
-        ProcessBuilder pb = new ProcessBuilder(cmdParams);
-        final Process p = pb.start();
-        final StringBuffer sb = new StringBuffer();
-        //Capture the output stream if any from the execution of the script
-        Thread outputStreamThread = new Thread() {
-            @Override
-            public void run() {
-                try (BufferedReader reader = new BufferedReader(
-                            new InputStreamReader(p.getInputStream()))) {
-                    String line;
-                    while ((line = reader.readLine()) != null) {
-                        sb.append(line);
-                    }
-                } catch (final Exception e) {
-                    sb.append(e.getMessage());
-                }
-            }
-        };
-        outputStreamThread.start();
-        //Capture the error stream if any from the execution of the script
-        Thread errorStreamThread = new Thread() {
-            @Override
-            public void run() {
-                try (BufferedReader reader = new BufferedReader(
-                            new InputStreamReader(p.getErrorStream()))) {
-                    String line;
-                    while ((line = reader.readLine()) != null) {
-                        sb.append(line);
-                    }
-                } catch (final Exception e) {
-                    sb.append(e.getMessage());
-                }
-            }
-        };
-        errorStreamThread.start();
-        p.waitFor();
-        assertEquals(String.format("Executing the query failed%s. Check the 
result file: %s",
-                sb.length() > 0 ? sb.append(" with : ").toString() : "", 
resultFilePath),
-                0, p.exitValue());
-    }
-
-    // Executes the SQL commands listed in the given operation file from the 
sql_files directory
-    private void executeQueriesWithCurrentVersion(String operation,
-            UpgradeProps upgradeProps) throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        if (UpgradeProps.SET_MAX_LOOK_BACK_AGE.equals(upgradeProps)) {
-            // any value < 31 is enough to test relaxing the MaxLookBack age
-            // checks during an upgrade because during upgrade, SCN for the
-            // connection is set to be the phoenix version timestamp
-            // (31 as of now: MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 / 
MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0)
-            // Hence, keeping value: 15
-            
props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
-                Integer.toString(15));
-        }
-
-        try (Connection conn = DriverManager.getConnection(url, props)) {
-            StringBuilder sb = new StringBuilder();
-            try (BufferedReader reader =
-                    getBufferedReaderForResource(SQL_DIR + operation + 
SQL_EXTENSION)) {
-                String sqlCommand;
-                while ((sqlCommand = reader.readLine()) != null) {
-                    sqlCommand = sqlCommand.trim();
-                    if (sqlCommand.length() == 0 || sqlCommand.startsWith("/")
-                            || sqlCommand.startsWith("*")) {
-                        continue;
-                    }
-                    sb.append(sqlCommand);
-                }
-            }
-            ResultSet rs;
-            String[] sqlCommands = sb.toString().split(";");
-
-            URL fileUrl = 
BackwardCompatibilityIT.class.getClassLoader().getResource(
-                    RESULTS_AND_GOLD_FILES_DIR);
-            assertNotNull(fileUrl);
-            final String resultFile = new 
File(fileUrl.getFile()).getAbsolutePath() + "/" +
-                    RESULT_PREFIX + operation + TEXT_EXTENSION;
-            try (BufferedWriter br = new BufferedWriter(new 
FileWriter(resultFile))) {
-                for (String command : sqlCommands) {
-                    try (PreparedStatement stmt = 
conn.prepareStatement(command)) {
-                        stmt.execute();
-                        rs = stmt.getResultSet();
-                        if (rs != null) {
-                            saveResultSet(rs, br);
-                        }
-                    }
-                    conn.commit();
-                }
-            }
-        }
-    }
-
-    // Saves the result set to a text file to be compared against the gold 
file for difference
-    private void saveResultSet(ResultSet rs, BufferedWriter br) throws 
Exception {
-        ResultSetMetaData rsm = rs.getMetaData();
-        int columnCount = rsm.getColumnCount();
-        StringBuilder row = new 
StringBuilder(formatStringWithQuotes(rsm.getColumnName(1)));
-        for (int i = 2; i <= columnCount; i++) {
-            
row.append(",").append(formatStringWithQuotes(rsm.getColumnName(i)));
-        }
-        br.write(row.toString());
-        br.write("\n");
-        while (rs.next()) {
-            row = new StringBuilder(formatStringWithQuotes(rs.getString(1)));
-            for (int i = 2; i <= columnCount; i++) {
-                
row.append(",").append(formatStringWithQuotes(rs.getString(i)));
-            }
-            br.write(row.toString());
-            br.write("\n");
-        }
-    }
-
-    private String formatStringWithQuotes(String str) {
-        return (str != null) ? String.format("\'%s\'", str) : "\'\'";
-    }
-
-    private BufferedReader getBufferedReaderForResource(String relativePath)
-            throws FileNotFoundException {
-        URL fileUrl = getClass().getClassLoader().getResource(relativePath);
-        assertNotNull(fileUrl);
-        return new BufferedReader(new FileReader(new File(fileUrl.getFile())));
-    }
-
-    // Compares the result file against the gold file to match for the 
expected output
-    // for the given operation
-    private void assertExpectedOutput(String result) throws Exception {
-        List<String> resultFile = Lists.newArrayList();
-        List<String> goldFile = Lists.newArrayList();
-        String line;
-        try (BufferedReader resultFileReader = getBufferedReaderForResource(
-                RESULTS_AND_GOLD_FILES_DIR + RESULT_PREFIX + result + 
TEXT_EXTENSION)) {
-            while ((line = resultFileReader.readLine()) != null) {
-                resultFile.add(line.trim());
-            }
-        }
-        try (BufferedReader goldFileReader = getBufferedReaderForResource(
-                RESULTS_AND_GOLD_FILES_DIR + GOLD_PREFIX + result + 
TEXT_EXTENSION)) {
-            while ((line = goldFileReader.readLine()) != null) {
-                line = line.trim();
-                if ( !(line.isEmpty() || line.startsWith("*") || 
line.startsWith("/"))) {
-                    goldFile.add(line);
-                }
-            }
-        }
-
-        // We take the first line in gold file and match against the result 
file to exclude any
-        // other WARNING messages that comes as a result of the query execution
-        int index = resultFile.indexOf(goldFile.get(0));
-        assertNotEquals("Mismatch found between gold file and result file", 
-1, index);
-        resultFile = resultFile.subList(index, resultFile.size());
-        assertEquals(goldFile, resultFile);
-    }
 }
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
new file mode 100644
index 0000000..7689b30
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BackwardCompatibilityTestUtil.java
@@ -0,0 +1,315 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.util.VersionInfo;
+import 
org.apache.phoenix.compat.hbase.coprocessor.CompatBaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.util.List;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assume.assumeFalse;
+
+public final class BackwardCompatibilityTestUtil {
+    public static final String SQL_DIR = "sql_files/";
+    public static final String RESULTS_AND_GOLD_FILES_DIR = "gold_files/";
+    public static final String COMPATIBLE_CLIENTS_JSON =
+            "compatible_client_versions.json";
+    public static final String BASH = "/bin/bash";
+    public static final String EXECUTE_QUERY_SH = "scripts/execute_query.sh";
+    public static final String QUERY_PREFIX = "query_";
+    public static final String RESULT_PREFIX = "result_";
+    public static final String GOLD_PREFIX = "gold_";
+    public static final String SQL_EXTENSION = ".sql";
+    public static final String TEXT_EXTENSION = ".txt";
+    public static final String CREATE_ADD = "create_add";
+    public static final String CREATE_TMP_TABLE = "create_tmp_table";
+    public static final String CREATE_DIVERGED_VIEW = "create_diverged_view";
+    public static final String ADD_DATA = "add_data";
+    public static final String ADD_DELETE = "add_delete";
+    public static final String DELETE = "delete";
+    public static final String SELECT_AND_DROP_TABLE = "select_and_drop_table";
+    public static final String QUERY_CREATE_ADD = QUERY_PREFIX + CREATE_ADD;
+    public static final String QUERY_ADD_DATA = QUERY_PREFIX + ADD_DATA;
+    public static final String QUERY_ADD_DELETE = QUERY_PREFIX + ADD_DELETE;
+    public static final String QUERY_DELETE = QUERY_PREFIX + DELETE;
+    public static final String QUERY_SELECT_AND_DROP_TABLE = QUERY_PREFIX + 
SELECT_AND_DROP_TABLE;
+    public static final String QUERY_CREATE_DIVERGED_VIEW = QUERY_PREFIX + 
CREATE_DIVERGED_VIEW;
+    public static final String MVN_HOME = "maven.home";
+    public static final String JAVA_TMP_DIR = "java.io.tmpdir";
+
+    public enum UpgradeProps {
+        NONE,
+        SET_MAX_LOOK_BACK_AGE
+    }
+
+    private BackwardCompatibilityTestUtil() {
+    }
+
+    public static List<String> computeClientVersions() throws Exception {
+        String hbaseVersion = VersionInfo.getVersion();
+        Pattern p = Pattern.compile("\\d+\\.\\d+");
+        Matcher m = p.matcher(hbaseVersion);
+        String hbaseProfile = null;
+        if (m.find()) {
+            hbaseProfile = m.group();
+        }
+        List<String> clientVersions = Lists.newArrayList();
+        ObjectMapper mapper = new ObjectMapper();
+        mapper.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
+        try (InputStream inputStream = BackwardCompatibilityIT.class
+                
.getClassLoader().getResourceAsStream(COMPATIBLE_CLIENTS_JSON)) {
+            assertNotNull(inputStream);
+            JsonNode jsonNode = mapper.readTree(inputStream);
+            JsonNode HBaseProfile = jsonNode.get(hbaseProfile);
+            for (final JsonNode clientVersion : HBaseProfile) {
+                clientVersions.add(clientVersion.textValue() + "-HBase-" + 
hbaseProfile);
+            }
+        }
+        return clientVersions;
+    }
+
+    // Executes the queries listed in the operation file with a given client 
version
+    public static void executeQueryWithClientVersion(String clientVersion, 
String operation,
+                                                     String zkQuorum) throws 
Exception {
+        List<String> cmdParams = Lists.newArrayList();
+        cmdParams.add(BASH);
+        // Note that auto-commit is true for queries executed via SQLline
+        URL fileUrl = 
BackwardCompatibilityIT.class.getClassLoader().getResource(EXECUTE_QUERY_SH);
+        assertNotNull(fileUrl);
+        cmdParams.add(new File(fileUrl.getFile()).getAbsolutePath());
+        cmdParams.add(zkQuorum);
+        cmdParams.add(clientVersion);
+
+        fileUrl = BackwardCompatibilityIT.class.getClassLoader()
+                .getResource(SQL_DIR + operation + SQL_EXTENSION);
+        assertNotNull(fileUrl);
+        cmdParams.add(new File(fileUrl.getFile()).getAbsolutePath());
+        fileUrl = BackwardCompatibilityIT.class.getClassLoader().getResource(
+                RESULTS_AND_GOLD_FILES_DIR);
+        assertNotNull(fileUrl);
+        String resultFilePath = new File(fileUrl.getFile()).getAbsolutePath() 
+ "/" +
+                RESULT_PREFIX + operation + TEXT_EXTENSION;
+        cmdParams.add(resultFilePath);
+        cmdParams.add(System.getProperty(JAVA_TMP_DIR));
+
+        if (System.getProperty(MVN_HOME) != null) {
+            cmdParams.add(System.getProperty(MVN_HOME));
+        }
+
+        ProcessBuilder pb = new ProcessBuilder(cmdParams);
+        final Process p = pb.start();
+        final StringBuffer sb = new StringBuffer();
+        //Capture the output stream if any from the execution of the script
+        Thread outputStreamThread = new Thread() {
+            @Override
+            public void run() {
+                try (BufferedReader reader = new BufferedReader(
+                        new InputStreamReader(p.getInputStream()))) {
+                    String line;
+                    while ((line = reader.readLine()) != null) {
+                        sb.append(line);
+                    }
+                } catch (final Exception e) {
+                    sb.append(e.getMessage());
+                }
+            }
+        };
+        outputStreamThread.start();
+        //Capture the error stream if any from the execution of the script
+        Thread errorStreamThread = new Thread() {
+            @Override
+            public void run() {
+                try (BufferedReader reader = new BufferedReader(
+                        new InputStreamReader(p.getErrorStream()))) {
+                    String line;
+                    while ((line = reader.readLine()) != null) {
+                        sb.append(line);
+                    }
+                } catch (final Exception e) {
+                    sb.append(e.getMessage());
+                }
+            }
+        };
+        errorStreamThread.start();
+        p.waitFor();
+        assertEquals(String.format("Executing the query failed%s. Check the 
result file: %s",
+                sb.length() > 0 ? sb.append(" with : ").toString() : "", 
resultFilePath),
+                0, p.exitValue());
+    }
+
+
+    public static void checkForPreConditions(String compatibleClientVersion, 
Configuration conf) throws Exception {
+        // For the first code cut of any major version, there wouldn't be any 
backward compatible
+        // clients. Hence the test wouldn't run and just return true when the 
client
+        // version to be tested is same as current version
+        
assumeFalse(compatibleClientVersion.contains(MetaDataProtocol.CURRENT_CLIENT_VERSION));
+        // Make sure that cluster is clean before test execution with no 
system tables
+        try (org.apache.hadoop.hbase.client.Connection conn =
+                     ConnectionFactory.createConnection(conf);
+             Admin admin = conn.getAdmin()) {
+            
assertFalse(admin.tableExists(TableName.valueOf(QueryConstants.SYSTEM_SCHEMA_NAME,
+                    PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE)));
+        }
+    }
+
+    // Saves the result set to a text file to be compared against the gold 
file for difference
+    private static void saveResultSet(ResultSet rs, BufferedWriter br) throws 
Exception {
+        ResultSetMetaData rsm = rs.getMetaData();
+        int columnCount = rsm.getColumnCount();
+        StringBuilder row = new 
StringBuilder(formatStringWithQuotes(rsm.getColumnName(1)));
+        for (int i = 2; i <= columnCount; i++) {
+            
row.append(",").append(formatStringWithQuotes(rsm.getColumnName(i)));
+        }
+        br.write(row.toString());
+        br.write("\n");
+        while (rs.next()) {
+            row = new StringBuilder(formatStringWithQuotes(rs.getString(1)));
+            for (int i = 2; i <= columnCount; i++) {
+                
row.append(",").append(formatStringWithQuotes(rs.getString(i)));
+            }
+            br.write(row.toString());
+            br.write("\n");
+        }
+    }
+
+    private static String formatStringWithQuotes(String str) {
+        return (str != null) ? String.format("\'%s\'", str) : "\'\'";
+    }
+
+    private static BufferedReader getBufferedReaderForResource(String 
relativePath)
+            throws FileNotFoundException {
+        URL fileUrl = 
BackwardCompatibilityTestUtil.class.getClassLoader().getResource(relativePath);
+        assertNotNull(fileUrl);
+        return new BufferedReader(new FileReader(new File(fileUrl.getFile())));
+    }
+
+    // Compares the result file against the gold file to match for the 
expected output
+    // for the given operation
+    public static void assertExpectedOutput(String result) throws Exception {
+        List<String> resultFile = Lists.newArrayList();
+        List<String> goldFile = Lists.newArrayList();
+        String line;
+        try (BufferedReader resultFileReader = getBufferedReaderForResource(
+                RESULTS_AND_GOLD_FILES_DIR + RESULT_PREFIX + result + 
TEXT_EXTENSION)) {
+            while ((line = resultFileReader.readLine()) != null) {
+                resultFile.add(line.trim());
+            }
+        }
+        try (BufferedReader goldFileReader = getBufferedReaderForResource(
+                RESULTS_AND_GOLD_FILES_DIR + GOLD_PREFIX + result + 
TEXT_EXTENSION)) {
+            while ((line = goldFileReader.readLine()) != null) {
+                line = line.trim();
+                if ( !(line.isEmpty() || line.startsWith("*") || 
line.startsWith("/"))) {
+                    goldFile.add(line);
+                }
+            }
+        }
+
+        // We take the first line in gold file and match against the result 
file to exclude any
+        // other WARNING messages that comes as a result of the query execution
+        int index = resultFile.indexOf(goldFile.get(0));
+        assertNotEquals("Mismatch found between gold file and result file", 
-1, index);
+        resultFile = resultFile.subList(index, resultFile.size());
+        assertEquals(goldFile, resultFile);
+    }
+
+    // Executes the SQL commands listed in the given operation file from the 
sql_files directory
+    public static void executeQueriesWithCurrentVersion(String operation, 
String url,
+                                                        UpgradeProps 
upgradeProps) throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        if (UpgradeProps.SET_MAX_LOOK_BACK_AGE.equals(upgradeProps)) {
+            // any value < 31 is enough to test relaxing the MaxLookBack age
+            // checks during an upgrade because during upgrade, SCN for the
+            // connection is set to be the phoenix version timestamp
+            // (31 as of now: MIN_SYSTEM_TABLE_TIMESTAMP_4_16_0 / 
MIN_SYSTEM_TABLE_TIMESTAMP_5_1_0)
+            // Hence, keeping value: 15
+            
props.put(CompatBaseScannerRegionObserver.PHOENIX_MAX_LOOKBACK_AGE_CONF_KEY,
+                    Integer.toString(15));
+        }
+
+        try (Connection conn = DriverManager.getConnection(url, props)) {
+            StringBuilder sb = new StringBuilder();
+            try (BufferedReader reader =
+                         getBufferedReaderForResource(SQL_DIR + operation + 
SQL_EXTENSION)) {
+                String sqlCommand;
+                while ((sqlCommand = reader.readLine()) != null) {
+                    sqlCommand = sqlCommand.trim();
+                    if (sqlCommand.length() == 0 || sqlCommand.startsWith("/")
+                            || sqlCommand.startsWith("*")) {
+                        continue;
+                    }
+                    sb.append(sqlCommand);
+                }
+            }
+            ResultSet rs;
+            String[] sqlCommands = sb.toString().split(";");
+
+            URL fileUrl = 
BackwardCompatibilityIT.class.getClassLoader().getResource(
+                    RESULTS_AND_GOLD_FILES_DIR);
+            assertNotNull(fileUrl);
+            final String resultFile = new 
File(fileUrl.getFile()).getAbsolutePath() + "/" +
+                    RESULT_PREFIX + operation + TEXT_EXTENSION;
+            try (BufferedWriter br = new BufferedWriter(new 
FileWriter(resultFile))) {
+                for (String command : sqlCommands) {
+                    try (PreparedStatement stmt = 
conn.prepareStatement(command)) {
+                        stmt.execute();
+                        rs = stmt.getResultSet();
+                        if (rs != null) {
+                            saveResultSet(rs, br);
+                        }
+                    }
+                    conn.commit();
+                }
+            }
+        }
+    }
+}
diff --git a/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt 
b/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt
new file mode 100644
index 0000000..3607c2b
--- /dev/null
+++ b/phoenix-core/src/it/resources/gold_files/gold_query_delete.txt
@@ -0,0 +1,22 @@
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ 'COUNT(1)'
+'1'
+'ID','NAME'
+'a','a_name'
\ No newline at end of file
diff --git 
a/phoenix-core/src/it/resources/gold_files/gold_query_select_and_drop_table.txt 
b/phoenix-core/src/it/resources/gold_files/gold_query_select_and_drop_table.txt
new file mode 100644
index 0000000..36da41a
--- /dev/null
+++ 
b/phoenix-core/src/it/resources/gold_files/gold_query_select_and_drop_table.txt
@@ -0,0 +1,22 @@
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ 'COUNT(1)'
+'1'
+'ID','NAME'
+'a','a_name'
diff --git a/phoenix-core/src/it/resources/sql_files/create_tmp_table.sql 
b/phoenix-core/src/it/resources/sql_files/create_tmp_table.sql
new file mode 100644
index 0000000..8723487
--- /dev/null
+++ b/phoenix-core/src/it/resources/sql_files/create_tmp_table.sql
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE if not exists my_schema.my_table_delete2
+             (id VARCHAR not null primary key, name VARCHAR) SALT_BUCKETS=4;
+UPSERT INTO my_schema.my_table_delete2 values ('a','a_name');
\ No newline at end of file
diff --git a/phoenix-core/src/it/resources/sql_files/query_delete.sql 
b/phoenix-core/src/it/resources/sql_files/query_delete.sql
new file mode 100644
index 0000000..d963c8e
--- /dev/null
+++ b/phoenix-core/src/it/resources/sql_files/query_delete.sql
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+CREATE TABLE if not exists my_schema.my_table_delete
+             (id VARCHAR not null primary key, name VARCHAR) SALT_BUCKETS=4;
+UPSERT INTO my_schema.my_table_delete values ('a','a_name');
+
+SELECT COUNT(*) from my_schema.my_table_delete;
+SELECT * from my_schema.my_table_delete;
diff --git 
a/phoenix-core/src/it/resources/sql_files/query_select_and_drop_table.sql 
b/phoenix-core/src/it/resources/sql_files/query_select_and_drop_table.sql
new file mode 100644
index 0000000..8de2410
--- /dev/null
+++ b/phoenix-core/src/it/resources/sql_files/query_select_and_drop_table.sql
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+SELECT COUNT(*) from my_schema.my_table_delete2;
+SELECT * from my_schema.my_table_delete2;
+DROP TABLE IF EXISTS my_schema.my_table_delete2;
\ No newline at end of file
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 3f24f6b..fb77387 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -3370,11 +3370,28 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements RegionCopr
                             
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES, 
HConstants.LATEST_TIMESTAMP, null,
                             request.getClientVersion());
         } catch (Throwable t) {
-            LOGGER.error("loading system catalog table inside getVersion 
failed", t);
-            ProtobufUtil.setControllerException(controller,
-                    ServerUtil.createIOException(
-                            
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
-                                    isTablesMappingEnabled).toString(), t));
+            boolean isErrorSwallowed = false;
+            if (t instanceof SQLException &&
+                    ((SQLException) t).getErrorCode() == 
SQLExceptionCode.GET_TABLE_ERROR.getErrorCode()) {
+                Region region = env.getRegion();
+                final byte[] key = SchemaUtil.getTableKey(
+                        ByteUtil.EMPTY_BYTE_ARRAY,
+                        PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA_BYTES,
+                        PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE_BYTES);
+                if (!region.getRegionInfo().containsRow(key) &&
+                        request.getClientVersion() < 
MIN_SPLITTABLE_SYSTEM_CATALOG) {
+                    LOGGER.debug("The pre-4.15 client is trying to get 
SYSTEM.CATALOG " +
+                            "region that contains head row");
+                    isErrorSwallowed = true;
+                }
+            }
+            if (!isErrorSwallowed) {
+                LOGGER.error("loading system catalog table inside getVersion 
failed", t);
+                ProtobufUtil.setControllerException(controller,
+                        ServerUtil.createIOException(
+                                
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
+                                        isTablesMappingEnabled).toString(), 
t));
+            }
         }
         // In case this is the first connection, system catalog does not 
exist, and so we don't
         // set the optional system catalog timestamp.

Reply via email to