PHOENIX-3249 Make changes in LocalIndexIT for method level parallelization in 
BaseHBaseManagedTimeTableReuseIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b0f90048
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b0f90048
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b0f90048

Branch: refs/heads/master
Commit: b0f900488dec92d0eaac8e2576991281e78984f5
Parents: 1a3bd43
Author: James Taylor <jamestay...@apache.org>
Authored: Thu Sep 15 17:01:27 2016 -0700
Committer: James Taylor <jamestay...@apache.org>
Committed: Thu Sep 15 17:01:27 2016 -0700

----------------------------------------------------------------------
 .../end2end/index/AsyncImmutableIndexIT.java    | 114 ----------
 .../end2end/index/IndexOnOwnClusterIT.java      | 114 ++++++++++
 .../phoenix/end2end/index/LocalIndexIT.java     | 215 ++-----------------
 3 files changed, 129 insertions(+), 314 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0f90048/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AsyncImmutableIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AsyncImmutableIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AsyncImmutableIndexIT.java
deleted file mode 100644
index 8c90b6e..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AsyncImmutableIndexIT.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end.index;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
-import org.apache.phoenix.end2end.IndexToolIT;
-import org.apache.phoenix.mapreduce.index.IndexTool;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-public class AsyncImmutableIndexIT extends BaseOwnClusterHBaseManagedTimeIT {
-    
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(1);
-        serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
-            QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
-        setUpRealDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
-            ReadOnlyProps.EMPTY_PROPS);
-    }
-    
-    @Test
-    public void testDeleteFromImmutable() throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-            conn.createStatement().execute("CREATE TABLE TEST_TABLE (\n" + 
-                    "        pk1 VARCHAR NOT NULL,\n" + 
-                    "        pk2 VARCHAR NOT NULL,\n" + 
-                    "        pk3 VARCHAR\n" + 
-                    "        CONSTRAINT PK PRIMARY KEY \n" + 
-                    "        (\n" + 
-                    "        pk1,\n" + 
-                    "        pk2,\n" + 
-                    "        pk3\n" + 
-                    "        )\n" + 
-                    "        ) IMMUTABLE_ROWS=true");
-            conn.createStatement().execute("upsert into TEST_TABLE (pk1, pk2, 
pk3) values ('a', '1', '1')");
-            conn.createStatement().execute("upsert into TEST_TABLE (pk1, pk2, 
pk3) values ('b', '2', '2')");
-            conn.commit();
-            conn.createStatement().execute("CREATE INDEX TEST_INDEX ON 
TEST_TABLE (pk3, pk2) ASYNC");
-            
-            // this delete will be issued at a timestamp later than the above 
timestamp of the index table
-            conn.createStatement().execute("delete from TEST_TABLE where pk1 = 
'a'");
-            conn.commit();
-
-            // run the index MR job
-            final IndexTool indexingTool = new IndexTool();
-            indexingTool.setConf(new 
Configuration(getUtility().getConfiguration()));
-            final String[] cmdArgs =
-                    IndexToolIT.getArgValues(null, "TEST_TABLE", "TEST_INDEX", 
true);
-            int status = indexingTool.run(cmdArgs);
-            assertEquals(0, status);
-
-            // upsert two more rows
-            conn.createStatement().execute(
-                "upsert into TEST_TABLE (pk1, pk2, pk3) values ('a', '3', 
'3')");
-            conn.createStatement().execute(
-                "upsert into TEST_TABLE (pk1, pk2, pk3) values ('b', '4', 
'4')");
-            conn.commit();
-
-            // validate that delete markers were issued correctly and only 
('a', '1', 'value1') was
-            // deleted
-            String query = "SELECT pk3 from TEST_TABLE ORDER BY pk3";
-            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + 
query);
-            String expectedPlan =
-                    "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER TEST_INDEX\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY";
-            assertEquals("Wrong plan ", expectedPlan, 
QueryUtil.getExplainPlan(rs));
-            rs = conn.createStatement().executeQuery(query);
-            assertTrue(rs.next());
-            assertEquals("2", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("3", rs.getString(1));
-            assertTrue(rs.next());
-            assertEquals("4", rs.getString(1));
-            assertFalse(rs.next());
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0f90048/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
new file mode 100644
index 0000000..8c90b6e
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexOnOwnClusterIT.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.phoenix.end2end.BaseOwnClusterHBaseManagedTimeIT;
+import org.apache.phoenix.end2end.IndexToolIT;
+import org.apache.phoenix.mapreduce.index.IndexTool;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class AsyncImmutableIndexIT extends BaseOwnClusterHBaseManagedTimeIT {
+    
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(1);
+        serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
+            QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
+        setUpRealDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
+            ReadOnlyProps.EMPTY_PROPS);
+    }
+    
+    @Test
+    public void testDeleteFromImmutable() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute("CREATE TABLE TEST_TABLE (\n" + 
+                    "        pk1 VARCHAR NOT NULL,\n" + 
+                    "        pk2 VARCHAR NOT NULL,\n" + 
+                    "        pk3 VARCHAR\n" + 
+                    "        CONSTRAINT PK PRIMARY KEY \n" + 
+                    "        (\n" + 
+                    "        pk1,\n" + 
+                    "        pk2,\n" + 
+                    "        pk3\n" + 
+                    "        )\n" + 
+                    "        ) IMMUTABLE_ROWS=true");
+            conn.createStatement().execute("upsert into TEST_TABLE (pk1, pk2, 
pk3) values ('a', '1', '1')");
+            conn.createStatement().execute("upsert into TEST_TABLE (pk1, pk2, 
pk3) values ('b', '2', '2')");
+            conn.commit();
+            conn.createStatement().execute("CREATE INDEX TEST_INDEX ON 
TEST_TABLE (pk3, pk2) ASYNC");
+            
+            // this delete will be issued at a timestamp later than the above 
timestamp of the index table
+            conn.createStatement().execute("delete from TEST_TABLE where pk1 = 
'a'");
+            conn.commit();
+
+            // run the index MR job
+            final IndexTool indexingTool = new IndexTool();
+            indexingTool.setConf(new 
Configuration(getUtility().getConfiguration()));
+            final String[] cmdArgs =
+                    IndexToolIT.getArgValues(null, "TEST_TABLE", "TEST_INDEX", 
true);
+            int status = indexingTool.run(cmdArgs);
+            assertEquals(0, status);
+
+            // upsert two more rows
+            conn.createStatement().execute(
+                "upsert into TEST_TABLE (pk1, pk2, pk3) values ('a', '3', 
'3')");
+            conn.createStatement().execute(
+                "upsert into TEST_TABLE (pk1, pk2, pk3) values ('b', '4', 
'4')");
+            conn.commit();
+
+            // validate that delete markers were issued correctly and only 
('a', '1', 'value1') was
+            // deleted
+            String query = "SELECT pk3 from TEST_TABLE ORDER BY pk3";
+            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + 
query);
+            String expectedPlan =
+                    "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER TEST_INDEX\n"
+                            + "    SERVER FILTER BY FIRST KEY ONLY";
+            assertEquals("Wrong plan ", expectedPlan, 
QueryUtil.getExplainPlan(rs));
+            rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals("2", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("3", rs.getString(1));
+            assertTrue(rs.next());
+            assertEquals("4", rs.getString(1));
+            assertFalse(rs.next());
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b0f90048/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index f254c49..d3d775b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -33,14 +33,10 @@ import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.List;
-import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -52,47 +48,41 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeTableReuseIT;
-import org.apache.phoenix.end2end.Shadower;
 import org.apache.phoenix.hbase.index.IndexRegionSplitPolicy;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.*;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import com.google.common.collect.Maps;
-
 @RunWith(Parameterized.class)
 public class LocalIndexIT extends BaseHBaseManagedTimeTableReuseIT {
     private boolean isNamespaceMapped;
-    String schemaName="TEST";
+    private String schemaName;
 
     public LocalIndexIT(boolean isNamespaceMapped) {
         this.isNamespaceMapped = isNamespaceMapped;
     }
     
-    @BeforeClass 
-    @Shadower(classBeingShadowed = BaseHBaseManagedTimeTableReuseIT.class)
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Drop the HBase table metadata for this test
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        // Must update config before starting server
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
+    @Before
+    public void setup() {
+        schemaName = BaseTest.generateRandomString();
     }
-
+    
     private void createBaseTable(String tableName, Integer saltBuckets, String 
splits) throws SQLException {
         Connection conn = getConnection();
         if (isNamespaceMapped) {
@@ -110,7 +100,7 @@ public class LocalIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
         conn.close();
     }
 
-    @Parameters(name = "isNamespaceMapped = {0}")
+    @Parameters(name = "LocalIndexIT_isNamespaceMapped={0}") // name is used 
by failsafe as file name in reports
     public static Collection<Boolean> data() {
         return Arrays.asList(true, false);
     }
@@ -202,14 +192,12 @@ public class LocalIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
         }
     }
     
-    public Connection getConnection() throws SQLException{
-        Properties props = new Properties();
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+    private Connection getConnection() throws SQLException{
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
         props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
         return DriverManager.getConnection(getUrl(),props);
     }
 
-
     @Test
     public void testDropLocalIndexTable() throws Exception {
         String tableName = schemaName + "." + generateRandomString();
@@ -805,103 +793,6 @@ public class LocalIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
     }
 
     @Test
-    public void testLocalIndexScanAfterRegionSplit() throws Exception {
-        String tableName = schemaName + "." + generateRandomString();
-        String indexName = "IDX_" + generateRandomString();
-        TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
-        String indexPhysicalTableName = physicalTableName.getNameAsString();
-
-        if (isNamespaceMapped) { return; }
-        createBaseTable(tableName, null, "('e','j','o')");
-        Connection conn1 = getConnection();
-        try{
-            String[] strings = 
{"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
-            for (int i = 0; i < 26; i++) {
-                conn1.createStatement().execute(
-                    "UPSERT INTO " + tableName + " values('"+strings[i]+"'," + 
i + ","
-                            + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] 
+ "')");
-            }
-            conn1.commit();
-            conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
-            conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ "_2 ON " + tableName + "(k3)");
-
-            ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM 
" + tableName);
-            assertTrue(rs.next());
-            
-            HBaseAdmin admin = 
conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
-            for (int i = 1; i < 5; i++) {
-                admin.split(physicalTableName, 
ByteUtil.concat(Bytes.toBytes(strings[3*i])));
-                List<HRegionInfo> regionsOfUserTable =
-                        
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
-                                physicalTableName, false);
-
-                while (regionsOfUserTable.size() != (4+i)) {
-                    Thread.sleep(100);
-                    regionsOfUserTable = 
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
-                            admin.getConnection(), physicalTableName, false);
-                }
-                assertEquals(4+i, regionsOfUserTable.size());
-                String[] tIdColumnValues = new String[26]; 
-                String[] v1ColumnValues = new String[26];
-                int[] k1ColumnValue = new int[26];
-                String query = "SELECT t_id,k1,v1 FROM " + tableName;
-                rs = conn1.createStatement().executeQuery(query);
-                Thread.sleep(1000);
-                for (int j = 0; j < 26; j++) {
-                    assertTrue(rs.next());
-                    tIdColumnValues[j] = rs.getString("t_id");
-                    k1ColumnValue[j] = rs.getInt("k1");
-                    v1ColumnValues[j] = rs.getString("V1");
-                }
-                Arrays.sort(tIdColumnValues);
-                Arrays.sort(v1ColumnValues);
-                Arrays.sort(k1ColumnValue);
-                assertTrue(Arrays.equals(strings, tIdColumnValues));
-                assertTrue(Arrays.equals(strings, v1ColumnValues));
-                for(int m=0;m<26;m++) {
-                    assertEquals(m, k1ColumnValue[m]);
-                }
-
-                rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
-                assertEquals(
-                        "CLIENT PARALLEL " + (4 + i) + "-WAY RANGE SCAN OVER "
-                                + indexPhysicalTableName + " [1]\n"
-                                        + "    SERVER FILTER BY FIRST KEY 
ONLY\n"
-                                + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
-                
-                query = "SELECT t_id,k1,k3 FROM " + tableName;
-                rs = conn1.createStatement().executeQuery("EXPLAIN "+query);
-                assertEquals(
-                    "CLIENT PARALLEL "
-                            + ((strings[3 * i].compareTo("j") < 0) ? (4 + i) : 
(4 + i - 1))
-                            + "-WAY RANGE SCAN OVER "
-                            + indexPhysicalTableName + " [2]\n"
-                                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                            + "CLIENT MERGE SORT", 
QueryUtil.getExplainPlan(rs));
-                rs = conn1.createStatement().executeQuery(query);
-                Thread.sleep(1000);
-                int[] k3ColumnValue = new int[26];
-                for (int j = 0; j < 26; j++) {
-                    assertTrue(rs.next());
-                    tIdColumnValues[j] = rs.getString("t_id");
-                    k1ColumnValue[j] = rs.getInt("k1");
-                    k3ColumnValue[j] = rs.getInt("k3");
-                }
-                Arrays.sort(tIdColumnValues);
-                Arrays.sort(k1ColumnValue);
-                Arrays.sort(k3ColumnValue);
-                assertTrue(Arrays.equals(strings, tIdColumnValues));
-                for(int m=0;m<26;m++) {
-                    assertEquals(m, k1ColumnValue[m]);
-                    assertEquals(m+2, k3ColumnValue[m]);
-                }
-            }
-       } finally {
-            conn1.close();
-        }
-    }
-
-    @Test
     public void testLocalIndexScanWithSmallChunks() throws Exception {
         String tableName = schemaName + "." + generateRandomString();
         String indexName = "IDX_" + generateRandomString();
@@ -946,80 +837,4 @@ public class LocalIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
             conn1.close();
         }
     }
-
-    @Test
-    public void testLocalIndexScanAfterRegionsMerge() throws Exception {
-        String tableName = schemaName + "." + generateRandomString();
-        String indexName = "IDX_" + generateRandomString();
-        TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
-        String indexPhysicalTableName = physicalTableName.getNameAsString();
-
-        if (isNamespaceMapped) { return; }
-        createBaseTable(tableName, null, "('e','j','o')");
-        Connection conn1 = getConnection();
-        try{
-            String[] strings = 
{"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
-            for (int i = 0; i < 26; i++) {
-                conn1.createStatement().execute(
-                    "UPSERT INTO " + tableName + " values('"+strings[i]+"'," + 
i + ","
-                            + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] 
+ "')");
-            }
-            conn1.commit();
-            conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ " ON " + tableName + "(v1)");
-            conn1.createStatement().execute("CREATE LOCAL INDEX " + indexName 
+ "_2 ON " + tableName + "(k3)");
-
-            ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM 
" + tableName);
-            assertTrue(rs.next());
-
-            HBaseAdmin admin = 
conn1.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
-            List<HRegionInfo> regionsOfUserTable =
-                    
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
-                        physicalTableName, false);
-            
admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
-                regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
-            regionsOfUserTable =
-                    
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(), 
admin.getConnection(),
-                            physicalTableName, false);
-
-            while (regionsOfUserTable.size() != 3) {
-                Thread.sleep(100);
-                regionsOfUserTable = 
MetaTableAccessor.getTableRegions(getUtility().getZooKeeperWatcher(),
-                        admin.getConnection(), physicalTableName, false);
-            }
-            String query = "SELECT t_id,k1,v1 FROM " + tableName;
-            rs = conn1.createStatement().executeQuery(query);
-            Thread.sleep(1000);
-            for (int j = 0; j < 26; j++) {
-                assertTrue(rs.next());
-                assertEquals(strings[25 - j], rs.getString("t_id"));
-                assertEquals(25 - j, rs.getInt("k1"));
-                assertEquals(strings[j], rs.getString("V1"));
-            }
-            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
-            assertEquals(
-                "CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER "
-                        + indexPhysicalTableName
-                        + " [1]\n" + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
-
-            query = "SELECT t_id,k1,k3 FROM " + tableName;
-            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
-            assertEquals(
-                "CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER "
-                        + indexPhysicalTableName
-                        + " [2]\n" + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
-
-            rs = conn1.createStatement().executeQuery(query);
-            Thread.sleep(1000);
-            for (int j = 0; j < 26; j++) {
-                assertTrue(rs.next());
-                assertEquals(strings[j], rs.getString("t_id"));
-                assertEquals(j, rs.getInt("k1"));
-                assertEquals(j + 2, rs.getInt("k3"));
-            }
-       } finally {
-            conn1.close();
-        }
-    }
 }

Reply via email to