http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
index 5e36784..6f190b6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryWithOffsetIT.java
@@ -38,6 +38,7 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -47,23 +48,17 @@ import org.junit.runners.Parameterized.Parameters;
 import com.google.common.collect.Maps;
 
 @RunWith(Parameterized.class)
-public class QueryWithOffsetIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class QueryWithOffsetIT extends ParallelStatsDisabledIT {
     
-    private String tableName = "T";
-    private final String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", 
"i", "j", "k", "l", "m", "n", "o", "p",
-            "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
-    private final String ddl;
+    private static final String[] STRINGS = { "a", "b", "c", "d", "e", "f", 
"g", "h", "i", "j", "k", "l", "m", "n",
+            "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z" };
     private final boolean isSalted;
-
-    public QueryWithOffsetIT(String preSplit) {
-        this.tableName=tableName + "_" + preSplit.charAt(2);
-        this.ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" 
+ "k1 INTEGER NOT NULL,\n"
-                + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 
VARCHAR,\n"
-                + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + preSplit;
-        this.isSalted = preSplit.startsWith(" SALT_BUCKETS");
-    }
+    private final String preSplit;
+    private String ddl;
+    private String tableName;
 
     @BeforeClass
+    @Shadower(classBeingShadowed = ParallelStatsDisabledIT.class)
     public static void doSetup() throws Exception {
         Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
         props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, 
Boolean.toString(true));
@@ -71,6 +66,19 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
 
+    public QueryWithOffsetIT(String preSplit) {
+        this.isSalted = preSplit.startsWith(" SALT_BUCKETS");
+        this.preSplit = preSplit;
+    }
+
+    @Before
+    public void initTest() {
+        tableName = "T_" + generateRandomString();
+        ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT NULL,\n" + "k1 
INTEGER NOT NULL,\n"
+                + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 
VARCHAR,\n"
+                + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) " + preSplit;
+    }
+
     @Parameters(name="preSplit = {0}")
     public static Collection<String> data() {
         return Arrays.asList(new String[] { " SPLIT ON ('e','i','o')", " 
SALT_BUCKETS=10" });
@@ -92,7 +100,7 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         int i = 0;
         while (i < limit) {
             assertTrue(rs.next());
-            assertEquals("Expected string didn't match for i = " + i, 
strings[offset + i], rs.getString(1));
+            assertEquals("Expected string didn't match for i = " + i, 
STRINGS[offset + i], rs.getString(1));
             i++;
         }
 
@@ -100,14 +108,14 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         rs = conn.createStatement().executeQuery("SELECT t_id from " + 
tableName + " union all SELECT t_id from "
                 + tableName + " offset " + offset + " FETCH FIRST " + limit + 
" rows only");
         i = 0;
-        while (i++ < strings.length - offset) {
+        while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings[offset + i - 1], rs.getString(1));
+            assertEquals(STRINGS[offset + i - 1], rs.getString(1));
         }
         i = 0;
-        while (i++ < limit - strings.length - offset) {
+        while (i++ < limit - STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings[i - 1], rs.getString(1));
+            assertEquals(STRINGS[i - 1], rs.getString(1));
         }
         conn.close();
     }
@@ -124,25 +132,27 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         String query = "SELECT t_id from " + tableName + " offset " + offset;
         ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         if(!isSalted){
-            assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER T_P\n" + "    
SERVER FILTER BY FIRST KEY ONLY\n"
+            assertEquals("CLIENT SERIAL 1-WAY FULL SCAN OVER " + tableName + 
"\n"
+                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "    SERVER OFFSET " + offset, 
QueryUtil.getExplainPlan(rs));
         }else{
-            assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER T_A\n" + "    
SERVER FILTER BY FIRST KEY ONLY\n"
+            assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER " + tableName 
+ "\n"
+                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
                     + "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, 
QueryUtil.getExplainPlan(rs));
         }
         rs = conn.createStatement().executeQuery(query);
         int i = 0;
-        while (i++ < strings.length - offset) {
+        while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings[offset + i - 1], rs.getString(1));
+            assertEquals(STRINGS[offset + i - 1], rs.getString(1));
         }
         query = "SELECT t_id from " + tableName + " ORDER BY v1 offset " + 
offset;
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         if (!isSalted) {
-            assertEquals("CLIENT PARALLEL 5-WAY FULL SCAN OVER T_P\n" + "    
SERVER SORTED BY [C2.V1]\n"
+            assertEquals("CLIENT PARALLEL 5-WAY FULL SCAN OVER " + tableName + 
"\n" + "    SERVER SORTED BY [C2.V1]\n"
                     + "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, 
QueryUtil.getExplainPlan(rs));
         } else {
-            assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER T_A\n" + "    
SERVER SORTED BY [C2.V1]\n"
+            assertEquals("CLIENT PARALLEL 10-WAY FULL SCAN OVER " + tableName 
+ "\n" + "    SERVER SORTED BY [C2.V1]\n"
                     + "CLIENT MERGE SORT\n" + "CLIENT OFFSET " + offset, 
QueryUtil.getExplainPlan(rs));
         }
         conn.close();
@@ -161,31 +171,31 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
         rs = conn.createStatement()
                 .executeQuery("SELECT t_id from " + tableName + " order by 
t_id offset " + offset + " row");
         int i = 0;
-        while (i++ < strings.length - offset) {
+        while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings[offset + i - 1], rs.getString(1));
+            assertEquals(STRINGS[offset + i - 1], rs.getString(1));
         }
 
         rs = conn.createStatement().executeQuery(
                 "SELECT k3, count(*) from " + tableName + " group by k3 order 
by k3 desc offset " + offset + " row");
 
         i = 0;
-        while (i++ < strings.length - offset) {
+        while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings.length - offset - i + 2, rs.getInt(1));
+            assertEquals(STRINGS.length - offset - i + 2, rs.getInt(1));
         }
 
         rs = conn.createStatement().executeQuery("SELECT t_id from " + 
tableName + " union all SELECT t_id from "
                 + tableName + " offset " + offset + " rows");
         i = 0;
-        while (i++ < strings.length - offset) {
+        while (i++ < STRINGS.length - offset) {
             assertTrue(rs.next());
-            assertEquals(strings[offset + i - 1], rs.getString(1));
+            assertEquals(STRINGS[offset + i - 1], rs.getString(1));
         }
         i = 0;
-        while (i++ < strings.length) {
+        while (i++ < STRINGS.length) {
             assertTrue(rs.next());
-            assertEquals(strings[i - 1], rs.getString(1));
+            assertEquals(STRINGS[i - 1], rs.getString(1));
         }
         conn.close();
     }
@@ -210,8 +220,8 @@ public class QueryWithOffsetIT extends 
BaseOwnClusterHBaseManagedTimeIT {
     
     private void initTableValues(Connection conn) throws SQLException {
         for (int i = 0; i < 26; i++) {
-            conn.createStatement().execute("UPSERT INTO " + tableName + " 
values('" + strings[i] + "'," + i + ","
-                    + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
+            conn.createStatement().execute("UPSERT INTO " + tableName + " 
values('" + STRINGS[i] + "'," + i + ","
+                    + (i + 1) + "," + (i + 2) + ",'" + STRINGS[25 - i] + "')");
         }
         conn.commit();
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
index fa0bc8e..aba4ddb 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RenewLeaseIT.java
@@ -42,7 +42,7 @@ import org.junit.Test;
 import com.google.common.collect.Maps;
 
 
-public class RenewLeaseIT extends BaseOwnClusterHBaseManagedTimeIT {
+public class RenewLeaseIT extends BaseOwnClusterIT {
     private static final long RPC_TIMEOUT = 2000;
     private static volatile boolean SLEEP_NOW = false;
     private static final String TABLE_NAME = "FOO_BAR";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
index 22bf8ce..a11f808 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SpillableGroupByIT.java
@@ -34,8 +34,8 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -48,7 +48,7 @@ import com.google.common.collect.Maps;
  * cluster.
  */
 
-public class SpillableGroupByIT extends BaseOwnClusterClientManagedTimeIT {
+public class SpillableGroupByIT extends BaseOwnClusterIT {
 
     private static final int NUM_ROWS_INSERTED = 1000;
     
@@ -61,7 +61,7 @@ public class SpillableGroupByIT extends 
BaseOwnClusterClientManagedTimeIT {
 
     @BeforeClass
     public static void doSetup() throws Exception {
-        Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(11);
         // Set a very small cache size to force plenty of spilling
         props.put(QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB,
                 Integer.toString(1));
@@ -70,6 +70,13 @@ public class SpillableGroupByIT extends 
BaseOwnClusterClientManagedTimeIT {
                 Integer.toString(1));
         // Large enough to not run out of memory, but small enough to spill
         props.put(QueryServices.MAX_MEMORY_SIZE_ATTRIB, 
Integer.toString(40000));
+        
+        // Set guidepost width, but disable stats
+        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
+        props.put(QueryServices.STATS_ENABLED_ATTRIB, Boolean.toString(false));
+        props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, 
Boolean.TRUE.toString());
+        props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, 
Boolean.TRUE.toString());
+        // Must update config before starting server
         setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
     }
 
@@ -77,10 +84,7 @@ public class SpillableGroupByIT extends 
BaseOwnClusterClientManagedTimeIT {
         createGroupByTestTable(conn, tableName);
     }
 
-    private void loadData(long ts) throws SQLException {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
-        Connection conn = DriverManager.getConnection(getUrl(), props);
+    private void loadData(Connection conn) throws SQLException {
         int groupFactor = NUM_ROWS_INSERTED / 2;
         for (int i = 0; i < NUM_ROWS_INSERTED; i++) {
             insertRow(conn, Integer.toString(i % (groupFactor)), 10);
@@ -90,7 +94,6 @@ public class SpillableGroupByIT extends 
BaseOwnClusterClientManagedTimeIT {
             }
         }
         conn.commit();
-        conn.close();
     }
 
     private void insertRow(Connection conn, String uri, int appcpu)
@@ -107,72 +110,66 @@ public class SpillableGroupByIT extends 
BaseOwnClusterClientManagedTimeIT {
     
     @Test
     public void testScanUri() throws Exception {
-        long ts = nextTimestamp();
         SpillableGroupByIT spGpByT = new SpillableGroupByIT();
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
-                Long.toString(ts));
         Connection conn = DriverManager.getConnection(getUrl(), props);
         createTable(conn, GROUPBYTEST_NAME);
-        ts += 2;
-        spGpByT.loadData(ts);
+        spGpByT.loadData(conn);
         props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
-                Long.toString(ts + 10));
-        conn = DriverManager.getConnection(getUrl(), props);
-        try {
-            Statement stmt = conn.createStatement();
-            ResultSet rs = stmt.executeQuery(GROUPBY1);
-
-            int count = 0;
-            while (rs.next()) {
-                String uri = rs.getString(5);
-                assertEquals(2, rs.getInt(1));
-                assertEquals(1, rs.getInt(2));
-                assertEquals(20, rs.getInt(3));
-                assertEquals(10, rs.getInt(4));
-                int a = Integer.valueOf(rs.getString(6)).intValue();
-                int b = Integer.valueOf(rs.getString(7)).intValue();
-                assertEquals(Integer.valueOf(uri).intValue(), Math.min(a, b));
-                assertEquals(NUM_ROWS_INSERTED / 2 + Integer.valueOf(uri), 
Math.max(a, b));
-                count++;
-            }
-            assertEquals(NUM_ROWS_INSERTED / 2, count);
-            
-        } finally {
-            conn.close();
+        Statement stmt = conn.createStatement();
+        ResultSet rs = stmt.executeQuery(GROUPBY1);
+
+        int count = 0;
+        while (rs.next()) {
+            String uri = rs.getString(5);
+            assertEquals(2, rs.getInt(1));
+            assertEquals(1, rs.getInt(2));
+            assertEquals(20, rs.getInt(3));
+            assertEquals(10, rs.getInt(4));
+            int a = Integer.valueOf(rs.getString(6)).intValue();
+            int b = Integer.valueOf(rs.getString(7)).intValue();
+            assertEquals(Integer.valueOf(uri).intValue(), Math.min(a, b));
+            assertEquals(NUM_ROWS_INSERTED / 2 + Integer.valueOf(uri), 
Math.max(a, b));
+            count++;
         }
+        assertEquals(NUM_ROWS_INSERTED / 2, count);
         
-        // Test group by with limit that will exit after first row
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
-                Long.toString(ts + 10));
-        conn = DriverManager.getConnection(getUrl(), props);
-        try {
-            Statement stmt = conn.createStatement();
-            ResultSet rs = stmt.executeQuery("SELECT appcpu FROM " + 
GROUPBYTEST_NAME + " group by appcpu limit 1");
-
-            assertTrue(rs.next());
-            assertEquals(10,rs.getInt(1));
-            assertFalse(rs.next());
-        } finally {
-            conn.close();
-        }
+        conn.createStatement();
+        rs = stmt.executeQuery("SELECT appcpu FROM " + GROUPBYTEST_NAME + " 
group by appcpu limit 1");
+
+        assertTrue(rs.next());
+        assertEquals(10,rs.getInt(1));
+        assertFalse(rs.next());
         
-        // Test group by with limit that will do spilling before exiting
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
-                Long.toString(ts + 10));
-        conn = DriverManager.getConnection(getUrl(), props);
-        try {
-            Statement stmt = conn.createStatement();
-            ResultSet rs = stmt.executeQuery("SELECT to_number(uri) FROM " + 
GROUPBYTEST_NAME + " group by to_number(uri) limit 100");
-            int count = 0;
-            while (rs.next()) {
-                count++;
-            }
-            assertEquals(100, count);
-        } finally {
-            conn.close();
+        stmt = conn.createStatement();
+        rs = stmt.executeQuery("SELECT to_number(uri) FROM " + 
GROUPBYTEST_NAME + " group by to_number(uri) limit 100");
+        count = 0;
+        while (rs.next()) {
+            count++;
         }
+        assertEquals(100, count);
     }
 
+    @Test
+    public void testStatisticsAreNotWritten() throws SQLException {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        Statement stmt = conn.createStatement();
+        stmt.execute("CREATE TABLE T1 (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR)");
+        stmt.execute("UPSERT INTO T1 VALUES (1, 'NAME1')");
+        stmt.execute("UPSERT INTO T1 VALUES (2, 'NAME2')");
+        stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
+        conn.commit();
+        stmt.execute("UPDATE STATISTICS T1");
+        ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
+        assertFalse(rs.next());
+        rs.close();
+        stmt.close();
+        rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM T1");
+        String explainPlan = QueryUtil.getExplainPlan(rs);
+        assertEquals(
+                "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER T1",
+                explainPlan);
+       conn.close();
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
deleted file mode 100644
index 54ffa7c..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectionDisabledIT.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-/**
- * Verifies that statistics are not collected if they are disabled via a 
setting
- */
-public class StatsCollectionDisabledIT extends StatsCollectorAbstractIT {
-
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        props.put(QueryServices.STATS_ENABLED_ATTRIB, Boolean.toString(false));
-        props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, 
Boolean.TRUE.toString());
-        props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, 
Boolean.TRUE.toString());
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-    }
-
-    @Test
-    public void testStatisticsAreNotWritten() throws SQLException {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        Statement stmt = conn.createStatement();
-        stmt.execute("CREATE TABLE T1 (ID INTEGER NOT NULL PRIMARY KEY, NAME 
VARCHAR)");
-        stmt.execute("UPSERT INTO T1 VALUES (1, 'NAME1')");
-        stmt.execute("UPSERT INTO T1 VALUES (2, 'NAME2')");
-        stmt.execute("UPSERT INTO T1 VALUES (3, 'NAME3')");
-        conn.commit();
-        stmt.execute("UPDATE STATISTICS T1");
-        ResultSet rs = stmt.executeQuery("SELECT * FROM SYSTEM.STATS");
-        assertFalse(rs.next());
-        rs.close();
-        stmt.close();
-        rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM T1");
-        String explainPlan = QueryUtil.getExplainPlan(rs);
-        assertEquals(
-                "CLIENT 1-CHUNK PARALLEL 1-WAY FULL SCAN OVER T1",
-                explainPlan);
-       conn.close();
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
deleted file mode 100644
index ab337d6..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorAbstractIT.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.sql.Connection;
-import java.sql.SQLException;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.ConnectionQueryServices;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Maps;
-@Category(NeedsOwnMiniClusterTest.class)
-public abstract class StatsCollectorAbstractIT extends 
BaseOwnClusterHBaseManagedTimeIT {
-    protected static final String STATS_TEST_TABLE_NAME = "S";
-    protected static final String STATS_TEST_TABLE_NAME_NEW = "S_NEW";
-    protected static final byte[] STATS_TEST_TABLE_BYTES = 
Bytes.toBytes(STATS_TEST_TABLE_NAME);
-    protected static final byte[] STATS_TEST_TABLE_BYTES_NEW = 
Bytes.toBytes(STATS_TEST_TABLE_NAME_NEW);
-
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(20));
-        props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, 
Boolean.TRUE.toString());
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-    }
-    
-    protected void splitTable(Connection conn, byte[] splitPoint, byte[] 
tabName) throws IOException, InterruptedException, SQLException {
-        ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
-        int nRegionsNow = services.getAllTableRegions(tabName).size();
-        HBaseAdmin admin = services.getAdmin();
-        try {
-            admin.split(tabName, splitPoint);
-            int nTries = 0;
-            int nRegions;
-            do {
-                Thread.sleep(2000);
-                services.clearTableRegionCache(tabName);
-                nRegions = services.getAllTableRegions(tabName).size();
-                nTries++;
-            } while (nRegions == nRegionsNow && nTries < 10);
-            if (nRegions == nRegionsNow) {
-                fail();
-            }
-            // FIXME: I see the commit of the stats finishing before this with 
a lower timestamp that the scan timestamp,
-            // yet without this sleep, the query finds the old data. Seems 
like an HBase bug and a potentially serious one.
-            Thread.sleep(8000);
-        } finally {
-            admin.close();
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index f0fe346..dd7741a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -20,6 +20,7 @@ package org.apache.phoenix.end2end;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.getAllSplits;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -35,9 +36,12 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.Random;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.ConnectionQueryServices;
@@ -49,6 +53,7 @@ import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -58,14 +63,14 @@ import org.junit.runners.Parameterized.Parameters;
 import com.google.common.collect.Maps;
 
 @RunWith(Parameterized.class)
-public class StatsCollectorIT extends StatsCollectorAbstractIT {
-    private static final String STATS_TEST_TABLE_NAME = "S";
-    
+public class StatsCollectorIT extends ParallelStatsEnabledIT {
     private final String tableDDLOptions;
-    private final String tableName;
-    private final String fullTableName;
+    private String tableName;
+    private String schemaName;
+    private String fullTableName;
         
     @BeforeClass
+    @Shadower(classBeingShadowed = ParallelStatsEnabledIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(10);
         // Must update config before starting server
@@ -79,10 +84,15 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
     
     public StatsCollectorIT( boolean transactional) {
         this.tableDDLOptions= transactional ? " TRANSACTIONAL=true" : "";
-        this.tableName = TestUtil.DEFAULT_DATA_TABLE_NAME + ( transactional ?  
"_TXN" : "");
-        this.fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
     }
     
+    @Before
+    public void generateTableNames() {
+        schemaName = TestUtil.DEFAULT_SCHEMA_NAME;
+        tableName = "T_" + generateRandomString();
+        fullTableName = SchemaUtil.getTableName(schemaName, tableName);
+    }
+
     @Parameters(name="transactional = {0}")
     public static Collection<Boolean> data() {
         return Arrays.asList(false,true);
@@ -110,8 +120,6 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
     public void testSomeUpdateEmptyStats() throws Exception {
         Connection conn;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        String fullTableName = this.fullTableName + "_SALTED";
-        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 10));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(true);
         conn.createStatement().execute(
@@ -150,7 +158,6 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
         PreparedStatement stmt;
         ResultSet rs;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 10));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(
                 "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array 
VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
@@ -217,12 +224,11 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
 
     @Test
     public void testUpdateStatsWithMultipleTables() throws Throwable {
-        String fullTableName2 = fullTableName+"_2";
+        String fullTableName2 = SchemaUtil.getTableName(schemaName, "T_" + 
generateRandomString());
         Connection conn;
         PreparedStatement stmt;
         ResultSet rs;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 10));
         conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(
                 "CREATE TABLE " + fullTableName +" ( k VARCHAR, a_string_array 
VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
@@ -271,7 +277,6 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
             InterruptedException {
         Connection conn;
         PreparedStatement stmt;
-        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 30));
         conn = DriverManager.getConnection(getUrl(), props);
         stmt = upsertStmt(conn, tableName);
         stmt.setString(1, "a");
@@ -356,12 +361,12 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
     
     @Test
     public void testCompactUpdatesStats() throws Exception {
-        testCompactUpdatesStats(null, STATS_TEST_TABLE_NAME + 1);
+        testCompactUpdatesStats(null, fullTableName);
     }
     
     @Test
     public void testCompactUpdatesStatsWithMinStatsUpdateFreq() throws 
Exception {
-        
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
STATS_TEST_TABLE_NAME + 2);
+        
testCompactUpdatesStats(QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS, 
fullTableName);
     }
     
     private void testCompactUpdatesStats(Integer minStatsUpdateFreq, String 
tableName) throws Exception {
@@ -426,4 +431,130 @@ public class StatsCollectorIT extends 
StatsCollectorAbstractIT {
         assertEquals(nRows - nDeletedRows, rs.getLong(1));
         
     }
+
+    @Test
+    public void testWithMultiCF() throws Exception {
+        int nRows = 20;
+        Connection conn;
+        PreparedStatement stmt;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.createStatement().execute(
+                "CREATE TABLE " + fullTableName
+                        + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, 
c.v INTEGER NULL, d.v INTEGER NULL) ");
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " 
VALUES(?,?, ?, ?, ?)");
+        byte[] val = new byte[250];
+        for (int i = 0; i < nRows; i++) {
+            stmt.setString(1, Character.toString((char)('a' + i)) + 
Bytes.toString(val));
+            stmt.setInt(2, i);
+            stmt.setInt(3, i);
+            stmt.setInt(4, i);
+            stmt.setInt(5, i);
+            stmt.executeUpdate();
+        }
+        conn.commit();
+        stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + "(k, 
c.v, d.v) VALUES(?,?,?)");
+        for (int i = 0; i < 5; i++) {
+            stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + 
Bytes.toString(val));
+            stmt.setInt(2, i);
+            stmt.setInt(3, i);
+            stmt.executeUpdate();
+        }
+        conn.commit();
+
+        ResultSet rs;
+        TestUtil.analyzeTable(conn, fullTableName);
+        List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
+        assertEquals(26, keyRanges.size());
+        rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + 
fullTableName);
+        assertEquals("CLIENT 26-CHUNK 25 ROWS 12420 BYTES PARALLEL 1-WAY FULL 
SCAN OVER " + fullTableName,
+                QueryUtil.getExplainPlan(rs));
+
+        ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
+        List<HRegionLocation> regions = 
services.getAllTableRegions(Bytes.toBytes(fullTableName));
+        assertEquals(1, regions.size());
+
+        TestUtil.analyzeTable(conn, fullTableName);
+        String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+                + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + 
Long.toString(1000);
+        conn.createStatement().execute(query);
+        keyRanges = getAllSplits(conn, fullTableName);
+        assertEquals(12, keyRanges.size());
+
+        rs = conn
+                .createStatement()
+                .executeQuery(
+                        "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
+                                + fullTableName + "' GROUP BY COLUMN_FAMILY 
ORDER BY COLUMN_FAMILY");
+
+        assertTrue(rs.next());
+        assertEquals("A", rs.getString(1));
+        assertEquals(24, rs.getInt(2));
+        assertEquals(12144, rs.getInt(3));
+        assertEquals(11, rs.getInt(4));
+
+        assertTrue(rs.next());
+        assertEquals("B", rs.getString(1));
+        assertEquals(20, rs.getInt(2));
+        assertEquals(5540, rs.getInt(3));
+        assertEquals(5, rs.getInt(4));
+
+        assertTrue(rs.next());
+        assertEquals("C", rs.getString(1));
+        assertEquals(24, rs.getInt(2));
+        assertEquals(6652, rs.getInt(3));
+        assertEquals(6, rs.getInt(4));
+
+        assertTrue(rs.next());
+        assertEquals("D", rs.getString(1));
+        assertEquals(24, rs.getInt(2));
+        assertEquals(6652, rs.getInt(3));
+        assertEquals(6, rs.getInt(4));
+
+        assertFalse(rs.next());
+    }
+
+    @Test
+    public void testRowCountAndByteCounts() throws SQLException {
+        Connection conn;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        conn = DriverManager.getConnection(getUrl(), props);
+        String ddl = "CREATE TABLE " + fullTableName + " (t_id VARCHAR NOT 
NULL,\n" + "k1 INTEGER NOT NULL,\n"
+                + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 
VARCHAR,\n"
+                + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) split on 
('e','j','o')";
+        conn.createStatement().execute(ddl);
+        String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", 
"k", "l", "m", "n", "o", "p", "q", "r",
+                "s", "t", "u", "v", "w", "x", "y", "z" };
+        for (int i = 0; i < 26; i++) {
+            conn.createStatement().execute(
+                    "UPSERT INTO " + fullTableName + " values('" + strings[i] 
+ "'," + i + "," + (i + 1) + ","
+                            + (i + 2) + ",'" + strings[25 - i] + "')");
+        }
+        conn.commit();
+        ResultSet rs;
+        String query = "UPDATE STATISTICS " + fullTableName + " SET \""
+                + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + 
Long.toString(20);
+        conn.createStatement().execute(query);
+        Random r = new Random();
+        int count = 0;
+        while (count < 4) {
+            int startIndex = r.nextInt(strings.length);
+            int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
+            long rows = endIndex - startIndex;
+            long c2Bytes = rows * 35;
+            System.out.println(rows + ":" + startIndex + ":" + endIndex);
+            rs = conn.createStatement().executeQuery(
+                    "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
+                            + fullTableName + "' AND GUIDE_POST_KEY>= cast('" 
+ strings[startIndex]
+                            + "' as varbinary) AND  GUIDE_POST_KEY<cast('" + 
strings[endIndex]
+                            + "' as varbinary) and COLUMN_FAMILY='C2' group by 
COLUMN_FAMILY");
+            if (startIndex < endIndex) {
+                assertTrue(rs.next());
+                assertEquals("C2", rs.getString(1));
+                assertEquals(rows, rs.getLong(2));
+                assertEquals(c2Bytes, rs.getLong(3));
+                count++;
+            }
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
deleted file mode 100644
index d922ad9..0000000
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorWithSplitsAndMultiCFIT.java
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.apache.phoenix.util.TestUtil.getAllSplits;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Random;
-
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.ConnectionQueryServices;
-import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-public class StatsCollectorWithSplitsAndMultiCFIT extends 
StatsCollectorAbstractIT {
-    private static final String STATS_TEST_TABLE_NAME_NEW = "S_NEW";
-    private static final byte[] STATS_TEST_TABLE_NEW_BYTES = 
Bytes.toBytes(STATS_TEST_TABLE_NAME_NEW);
-
-    @BeforeClass
-    public static void doSetup() throws Exception {
-        Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
-        // Must update config before starting server
-        props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 
Long.toString(1000));
-        props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, 
Boolean.TRUE.toString());
-        props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(1024));
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-    }
-
-    @Test
-    public void testWithMultiCF() throws Exception {
-        int nRows = 20;
-        Connection conn;
-        PreparedStatement stmt;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        conn = DriverManager.getConnection(getUrl(), props);
-        conn.createStatement().execute("CREATE TABLE " + 
STATS_TEST_TABLE_NAME_NEW
-                + "(k VARCHAR PRIMARY KEY, a.v INTEGER, b.v INTEGER, c.v 
INTEGER NULL, d.v INTEGER NULL) ");
-        stmt = conn.prepareStatement("UPSERT INTO " + 
STATS_TEST_TABLE_NAME_NEW + " VALUES(?,?, ?, ?, ?)");
-        byte[] val = new byte[250];
-        for (int i = 0; i < nRows; i++) {
-            stmt.setString(1, Character.toString((char)('a' + i)) + 
Bytes.toString(val));
-            stmt.setInt(2, i);
-            stmt.setInt(3, i);
-            stmt.setInt(4, i);
-            stmt.setInt(5, i);
-            stmt.executeUpdate();
-        }
-        conn.commit();
-        stmt = conn.prepareStatement("UPSERT INTO " + 
STATS_TEST_TABLE_NAME_NEW + "(k, c.v, d.v) VALUES(?,?,?)");
-        for (int i = 0; i < 5; i++) {
-            stmt.setString(1, Character.toString((char)('a' + 'z' + i)) + 
Bytes.toString(val));
-            stmt.setInt(2, i);
-            stmt.setInt(3, i);
-            stmt.executeUpdate();
-        }
-        conn.commit();
-
-        ResultSet rs;
-        TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
-        List<KeyRange> keyRanges = getAllSplits(conn, 
STATS_TEST_TABLE_NAME_NEW);
-        assertEquals(12, keyRanges.size());
-        rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + 
STATS_TEST_TABLE_NAME_NEW);
-        assertEquals("CLIENT " + (12) + "-CHUNK " + "PARALLEL 1-WAY FULL SCAN 
OVER " + STATS_TEST_TABLE_NAME_NEW,
-                QueryUtil.getExplainPlan(rs));
-
-        ConnectionQueryServices services = 
conn.unwrap(PhoenixConnection.class).getQueryServices();
-        List<HRegionLocation> regions = 
services.getAllTableRegions(STATS_TEST_TABLE_NEW_BYTES);
-        assertEquals(1, regions.size());
-
-        TestUtil.analyzeTable(conn, STATS_TEST_TABLE_NAME_NEW);
-        String query = "UPDATE STATISTICS " + STATS_TEST_TABLE_NAME_NEW + " 
SET \""
-                + QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB + "\"=" + 
Long.toString(250);
-        conn.createStatement().execute(query);
-        keyRanges = getAllSplits(conn, STATS_TEST_TABLE_NAME_NEW);
-        assertEquals(26, keyRanges.size());
-
-        rs = conn.createStatement().executeQuery(
-                "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH),COUNT(*) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
-                        + STATS_TEST_TABLE_NAME_NEW + "' GROUP BY 
COLUMN_FAMILY ORDER BY COLUMN_FAMILY");
-
-        assertTrue(rs.next());
-        assertEquals("A", rs.getString(1));
-        assertEquals(25, rs.getInt(2));
-        assertEquals(12420, rs.getInt(3));
-        assertEquals(25, rs.getInt(4));
-
-        assertTrue(rs.next());
-        assertEquals("B", rs.getString(1));
-        assertEquals(20, rs.getInt(2));
-        assertEquals(5540, rs.getInt(3));
-        assertEquals(20, rs.getInt(4));
-
-        assertTrue(rs.next());
-        assertEquals("C", rs.getString(1));
-        assertEquals(25, rs.getInt(2));
-        assertEquals(6930, rs.getInt(3));
-        assertEquals(25, rs.getInt(4));
-
-        assertTrue(rs.next());
-        assertEquals("D", rs.getString(1));
-        assertEquals(25, rs.getInt(2));
-        assertEquals(6930, rs.getInt(3));
-        assertEquals(25, rs.getInt(4));
-
-    }
-
-    @Test
-    public void testRowCountAndByteCounts() throws SQLException {
-        Connection conn;
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        conn = DriverManager.getConnection(getUrl(), props);
-        String tableName = "T";
-        String ddl = "CREATE TABLE " + tableName + " (t_id VARCHAR NOT 
NULL,\n" + "k1 INTEGER NOT NULL,\n"
-                + "k2 INTEGER NOT NULL,\n" + "C3.k3 INTEGER,\n" + "C2.v1 
VARCHAR,\n"
-                + "CONSTRAINT pk PRIMARY KEY (t_id, k1, k2)) split on 
('e','j','o')";
-        conn.createStatement().execute(ddl);
-        String[] strings = { "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", 
"k", "l", "m", "n", "o", "p", "q", "r",
-                "s", "t", "u", "v", "w", "x", "y", "z" };
-        for (int i = 0; i < 26; i++) {
-            conn.createStatement().execute("UPSERT INTO " + tableName + " 
values('" + strings[i] + "'," + i + ","
-                    + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
-        }
-        conn.commit();
-        ResultSet rs;
-        String query = "UPDATE STATISTICS " + tableName + " SET \"" + 
QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB
-                + "\"=" + Long.toString(20);
-        conn.createStatement().execute(query);
-        Random r = new Random();
-        int count = 0;
-        while (count < 4) {
-            int startIndex = r.nextInt(strings.length);
-            int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
-            long rows = endIndex - startIndex;
-            long c2Bytes = rows * 35;
-            System.out.println(rows + ":" + startIndex + ":" + endIndex);
-            rs = conn.createStatement().executeQuery(
-                    "SELECT 
COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from 
SYSTEM.STATS where PHYSICAL_NAME = '"
-                            + tableName + "' AND GUIDE_POST_KEY>= cast('" + 
strings[startIndex]
-                            + "' as varbinary) AND  GUIDE_POST_KEY<cast('" + 
strings[endIndex]
-                            + "' as varbinary) and COLUMN_FAMILY='C2' group by 
COLUMN_FAMILY");
-            if (startIndex < endIndex) {
-                assertTrue(rs.next());
-                assertEquals("C2", rs.getString(1));
-                assertEquals(rows, rs.getLong(2));
-                assertEquals(c2Bytes, rs.getLong(3));
-                count++;
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2d27179b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 69b3d00..11eb40e 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -23,9 +23,7 @@ import static 
org.apache.phoenix.exception.SQLExceptionCode.TABLE_UNDEFINED;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.KEY_SEQ;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_TABLE;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TYPE_SEQUENCE;
 import static org.apache.phoenix.schema.PTableType.SYSTEM;
 import static org.apache.phoenix.schema.PTableType.TABLE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
@@ -40,10 +38,16 @@ import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.schema.ColumnAlreadyExistsException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
@@ -62,14 +66,17 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     @Test
     public void testCreateTenantSpecificTable() throws Exception {
         // ensure we didn't create a physical HBase table for the 
tenant-specific table
-        HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES)).getAdmin();
+        Connection conn = DriverManager.getConnection(getUrl(), 
PropertiesUtil.deepCopy(TEST_PROPERTIES));
+        HBaseAdmin admin = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
         assertEquals(0, admin.listTables(TENANT_TABLE_NAME).length);
     }
     
     @Test
     public void testCreateTenantTableTwice() throws Exception {
         try {
-            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, 
TENANT_TABLE_DDL, null, nextTimestamp(), false);
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            Connection conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
+            conn.createStatement().execute(TENANT_TABLE_DDL);
                fail();
         }
         catch (TableAlreadyExistsException expected) {}
@@ -77,11 +84,13 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testCreateTenantViewFromNonMultiTenant() throws Exception {
-        createTestTable(getUrl(), "CREATE TABLE NON_MULTI_TENANT_TABLE (K 
VARCHAR PRIMARY KEY)", null, nextTimestamp());
+        String tableName = generateRandomString();
+        createTestTable(getUrl(), "CREATE TABLE " + tableName + " (K VARCHAR 
PRIMARY KEY)");
         try {
+            String viewName = generateRandomString();
             // Only way to get this exception is to attempt to derive from a 
global, multi-type table, as we won't find
             // a tenant-specific table when we attempt to resolve the base 
table.
-            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
TENANT_TABLE2 (COL VARCHAR) AS SELECT * FROM NON_MULTI_TENANT_TABLE", null, 
nextTimestamp());
+            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
viewName + " (COL VARCHAR) AS SELECT * FROM " + tableName);
         }
         catch (TableNotFoundException expected) {
         }
@@ -89,10 +98,9 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
 
     @Test
     public void testAlteringMultiTenancyForTableWithViewsNotAllowed() throws 
Exception {
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-        String multiTenantTable = "BASE_MULTI_TENANT_SWITCH";
-        String globalTable = "GLOBAL_TABLE_SWITCH";
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String multiTenantTable = "MT_" + generateRandomString();
+        String globalTable = "G_" + generateRandomString();
         // create the two base tables
         try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
             String ddl = "CREATE TABLE " + multiTenantTable + " (TENANT_ID 
VARCHAR NOT NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR 
CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, PK1)) MULTI_TENANT = true "; 
@@ -100,23 +108,21 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
             ddl = "CREATE TABLE " + globalTable + " (TENANT_ID VARCHAR NOT 
NULL, PK1 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR, V3 VARCHAR CONSTRAINT 
NAME_PK PRIMARY KEY(TENANT_ID, PK1)) ";
             conn.createStatement().execute(ddl);
         }
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "tenant1");
+        String t1 = generateRandomString();
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
         // create view on multi-tenant table
         try (Connection tenantConn = DriverManager.getConnection(getUrl(), 
props)) {
-            String viewName = "tenantview";
+            String viewName = "V_" + generateRandomString();
             String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " 
+ multiTenantTable;
             tenantConn.createStatement().execute(viewDDL);
         }
-        props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         // create view on global table
         try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-            String viewName = "globalView";
+            String viewName = "V_" + generateRandomString();
             conn.createStatement().execute("CREATE VIEW " + viewName + " AS 
SELECT * FROM " + globalTable);
         }
-        props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
             try {
                 conn.createStatement().execute("ALTER TABLE " + globalTable + 
" SET MULTI_TENANT = " + true);
@@ -134,29 +140,21 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
         }
     }
     
-    @Test
-    public void testCreateTenantTableWithSameWhereClause() throws Exception {
-        createTestTable(getUrl(), PARENT_TABLE_DDL.replace(PARENT_TABLE_NAME, 
PARENT_TABLE_NAME + "_II"), null, nextTimestamp());
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, 
TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, TENANT_TABLE_NAME + "2"), null, 
nextTimestamp());
-    }
-    
     @Test(expected=TableNotFoundException.class)
     public void testDeletionOfParentTableFailsOnTenantSpecificConnection() 
throws Exception {
-        createTestTable(getUrl(), PARENT_TABLE_DDL.replace(PARENT_TABLE_NAME, 
"TEMP_PARENT"), null, nextTimestamp());
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT_ID); // 
connection is tenant-specific
         Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.createStatement().execute("DROP TABLE TEMP_PARENT");
+        conn.createStatement().execute("DROP TABLE " + PARENT_TABLE_NAME);
         conn.close();
     }
     
     public void testCreationOfParentTableFailsOnTenantSpecificConnection() 
throws Exception {
         try {
-            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE TABLE 
PARENT_TABLE ( \n" + 
+            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE TABLE " 
+ generateRandomString() + "( \n" + 
                     "                user VARCHAR ,\n" + 
                     "                id INTEGER not null primary key desc\n" + 
-                    "                ) ", null, nextTimestamp());
+                    "                ) ");
             fail();
         } catch (SQLException e) {
             
assertEquals(SQLExceptionCode.CANNOT_CREATE_TENANT_SPECIFIC_TABLE.getErrorCode(),
 e.getErrorCode());
@@ -165,13 +163,14 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testTenantSpecificAndParentTablesMayBeInDifferentSchemas() 
throws SQLException {
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
DIFFSCHEMA.TENANT_TABLE ( \n" + 
+        String fullTableName = "DIFFSCHEMA." + generateRandomString();
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
fullTableName + " ( \n" + 
                 "                tenant_col VARCHAR) AS SELECT * \n" + 
-                "                FROM " + PARENT_TABLE_NAME + " WHERE 
tenant_type_id = 'aaa'", null, nextTimestamp());
+                "                FROM " + PARENT_TABLE_NAME + " WHERE 
tenant_type_id = 'aaa'");
         try {
-            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
DIFFSCHEMA.TENANT_TABLE ( \n" + 
+            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
fullTableName + "( \n" + 
                     "                tenant_col VARCHAR) AS SELECT *\n"+
-                    "                FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " 
WHERE tenant_type_id = 'aaa'", null, nextTimestamp());
+                    "                FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " 
WHERE tenant_type_id = 'aaa'");
             fail();
         }
         catch (SQLException expected) {
@@ -184,31 +183,33 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
         "                tenant_type_id VARCHAR(3) NOT NULL, \n" + 
         "                id INTEGER NOT NULL\n" + 
         "                CONSTRAINT pk PRIMARY KEY (tenant_id, tenant_type_id, 
id)) MULTI_TENANT=true";
-        createTestTable(getUrl(), newDDL, null, nextTimestamp());
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
DIFFSCHEMA.TENANT_TABLE ( \n" + 
+        createTestTable(getUrl(), newDDL);
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
fullTableName + "( \n" + 
                 "                tenant_col VARCHAR) AS SELECT *\n"+
-                "                FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " 
WHERE tenant_type_id = 'aaa'", null, nextTimestamp());
+                "                FROM DIFFSCHEMA." + PARENT_TABLE_NAME + " 
WHERE tenant_type_id = 'aaa'");
     }
     
     @Test
     public void testTenantSpecificTableCanDeclarePK() throws SQLException {
-            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
TENANT_TABLE2 ( \n" + 
+            createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
generateRandomString() + "( \n" + 
                     "                tenant_col VARCHAR PRIMARY KEY) AS SELECT 
*\n" + 
-                    "                FROM PARENT_TABLE", null, 
nextTimestamp());
+                    "                FROM " + PARENT_TABLE_NAME);
     }
     
     @Test(expected=ColumnAlreadyExistsException.class)
     public void testTenantSpecificTableCannotOverrideParentCol() throws 
SQLException {
-        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW 
TENANT_TABLE2 ( \n" + 
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL, "CREATE VIEW " + 
generateRandomString() + " ( \n" + 
                 "                user INTEGER) AS SELECT *\n" + 
-                "                FROM PARENT_TABLE", null, nextTimestamp());
+                "                FROM " + PARENT_TABLE_NAME);
     }
     
     @Test
     public void testBaseTableWrongFormatWithTenantTypeId() throws Exception {
         // only two PK columns for multi_tenant, multi_type
         try {
-            createTestTable(getUrl(), "CREATE TABLE BASE_TABLE2 (TENANT_ID 
VARCHAR NOT NULL PRIMARY KEY, ID VARCHAR, A INTEGER) MULTI_TENANT=true", null, 
nextTimestamp());
+            createTestTable(getUrl(), 
+                    "CREATE TABLE " + generateRandomString() + 
+                    "(TENANT_ID VARCHAR NOT NULL PRIMARY KEY, ID VARCHAR, A 
INTEGER) MULTI_TENANT=true");
             fail();
         }
         catch (SQLException expected) {
@@ -218,25 +219,13 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testAddDropColumn() throws Exception {
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
         conn.setAutoCommit(true);
         try {
-            conn.createStatement().executeUpdate("upsert into " + 
TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Viva Las Vegas')");
-            
+            conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME 
+ " (id, tenant_col) values (1, 'Viva Las Vegas')");
             conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + 
" add tenant_col2 char(1) null");
-            
-            conn.close();
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-            conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
-            conn.setAutoCommit(true);
-            
-            conn.createStatement().executeUpdate("upsert into " + 
TENANT_TABLE_NAME + " (id, tenant_col2) values (2, 'a')");
-            conn.close();
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-            conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
-            conn.setAutoCommit(true);
+            conn.createStatement().execute("upsert into " + TENANT_TABLE_NAME 
+ " (id, tenant_col2) values (2, 'a')");
             
             ResultSet rs = conn.createStatement().executeQuery("select 
count(*) from " + TENANT_TABLE_NAME);
             rs.next();
@@ -246,24 +235,13 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
             rs.next();
             assertEquals(1, rs.getInt(1));
             
-            conn.close();
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-            conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
-            conn.setAutoCommit(true);
-            
             conn.createStatement().execute("alter view " + TENANT_TABLE_NAME + 
" drop column tenant_col");
-            
-            conn.close();
-            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
-            conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
-            conn.setAutoCommit(true);
-            
             rs = conn.createStatement().executeQuery("select count(*) from " + 
TENANT_TABLE_NAME + "");
             rs.next();
             assertEquals(2, rs.getInt(1));
             
             try {
-                rs = conn.createStatement().executeQuery("select tenant_col 
from TENANT_TABLE");
+                rs = conn.createStatement().executeQuery("select tenant_col 
from " + TENANT_TABLE_NAME);
                 fail();
             }
             catch (ColumnNotFoundException expected) {}
@@ -275,8 +253,7 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testDropOfPKInTenantTablesNotAllowed() throws Exception {
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
         try {
             // try removing a PK col
@@ -295,8 +272,7 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testColumnMutationInParentTableWithExistingTenantTable() 
throws Exception {
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
             try {
@@ -322,8 +298,7 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testDisallowDropParentTableWithExistingTenantTable() throws 
Exception {
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
             conn.createStatement().executeUpdate("drop table " + 
PARENT_TABLE_NAME);
@@ -339,18 +314,13 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     
     @Test
     public void testAllowDropParentTableWithCascadeAndSingleTenantTable() 
throws Exception {
-           long ts = nextTimestamp();
-           Properties props = new Properties();
-           props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+           Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
            Connection conn = DriverManager.getConnection(getUrl(), props);
            Connection connTenant = null;
     
                try {
                        // Drop Parent Table 
                        conn.createStatement().executeUpdate("DROP TABLE " + 
PARENT_TABLE_NAME + " CASCADE");
-                       conn.close();
-                     
-                       props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 10));
                        connTenant = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
                        
                validateTenantViewIsDropped(conn);              
@@ -368,56 +338,58 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     @Test
     public void 
testAllDropParentTableWithCascadeWithMultipleTenantTablesAndIndexes() throws 
Exception {
         // Create a second tenant table
-       createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL, 
null, nextTimestamp());
+        String tenantTable2 = "V_" + generateRandomString();
+        createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, 
TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2));
        //TODO Create some tenant specific table indexes
         
-           long ts = nextTimestamp();
-           Properties props = new Properties();
-           props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
+           Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
            Connection conn = null;
            Connection connTenant1 = null;
            Connection connTenant2 = null;
     
                try {
+            List<String> sortedCatalogs = Arrays.asList(TENANT_ID, TENANT_ID2);
+            Collections.sort(sortedCatalogs);
                        conn = DriverManager.getConnection(getUrl(), props);
                DatabaseMetaData meta = conn.getMetaData();
-            ResultSet rs = meta.getSuperTables(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
-            assertTrue(rs.next());
-            assertEquals(TENANT_ID2, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            ResultSet rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] 
{PTableType.VIEW.getValue().getString()});
             assertTrue(rs.next());
             assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertTableMetaData(rs, null, TENANT_TABLE_NAME, PTableType.VIEW);
+            assertFalse(rs.next());
+            
+            rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), 
new String[] {PTableType.VIEW.getValue().getString()});
+            assertTrue(rs.next());
+            assertEquals(TENANT_ID2, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
+            assertTableMetaData(rs, null, tenantTable2, PTableType.VIEW);
+            assertFalse(rs.next());
+            
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] 
{PTableType.VIEW.getValue().getString()});
             assertTrue(rs.next());
             assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
PTableType.VIEW);
             assertFalse(rs.next());
-            rs.close();
-            conn.close();
             
                        // Drop Parent Table 
                        conn.createStatement().executeUpdate("DROP TABLE " + 
PARENT_TABLE_NAME + " CASCADE");
                  
                        // Validate Tenant Views are dropped
-                       props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts + 10));
                        connTenant1 = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
                validateTenantViewIsDropped(connTenant1);
                        connTenant2 = 
DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, props);
                validateTenantViewIsDropped(connTenant2);
                
                // Validate Tenant Metadata is gone for the Tenant Table 
TENANT_TABLE_NAME
-                       conn = DriverManager.getConnection(getUrl(), props);
-               meta = conn.getMetaData();
-            rs = meta.getSuperTables(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] 
{PTableType.VIEW.getValue().getString()});
+            assertFalse(rs.next());
+            rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), 
new String[] {PTableType.VIEW.getValue().getString()});
+            assertFalse(rs.next());
+            
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] 
{PTableType.VIEW.getValue().getString()});
             assertTrue(rs.next());
             assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
PTableType.VIEW);
             assertFalse(rs.next());
-            rs.close();
                
            } finally {
                if (conn != null) {
@@ -446,29 +418,23 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
     @Test
     public void testTableMetadataScan() throws Exception {
         // create a tenant table with same name for a different tenant to make 
sure we are not picking it up in metadata scans for TENANT_ID
-        String tenantId2 = "tenant2";
+        String tenantId2 = "T_" + generateRandomString();
         String secondTenatConnectionURL = 
PHOENIX_JDBC_TENANT_SPECIFIC_URL.replace(TENANT_ID,  tenantId2);
-        String tenantTable2 = TENANT_TABLE_NAME+"2";
-        createTestTable(secondTenatConnectionURL, 
TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2), null, 
nextTimestamp(), false);
+        String tenantTable2 = "V_" + generateRandomString();
+        createTestTable(secondTenatConnectionURL, 
TENANT_TABLE_DDL.replace(TENANT_TABLE_NAME, tenantTable2));
         
-        Properties props = new Properties();
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
         try {
             // empty string means global tenant id
             // make sure connections w/o tenant id only see 
non-tenant-specific tables, both SYSTEM and USER
             DatabaseMetaData meta = conn.getMetaData();
-            ResultSet rs = meta.getTables("", null, null, null);
-            assertTrue(rs.next());
-            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, 
SYSTEM_CATALOG_TABLE, SYSTEM);
-            assertTrue(rs.next());
-            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, 
SYSTEM_FUNCTION_TABLE, SYSTEM);
-            assertTrue(rs.next());
-            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, TYPE_SEQUENCE, 
SYSTEM);
-            assertTrue(rs.next());
-            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, 
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, SYSTEM);
+            ResultSet rs = meta.getTables("", "", 
StringUtil.escapeLike(PARENT_TABLE_NAME), new String[] 
{TABLE.getValue().getString()});
             assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME, TABLE);
+            assertFalse(rs.next());
+
+            rs = meta.getTables("", "", 
StringUtil.escapeLike(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] 
{TABLE.getValue().getString()});
             assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
TABLE);
             assertFalse(rs.next());
@@ -480,16 +446,26 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
                 assertNotEquals(tenantTable2, rs.getString("TABLE_NAME"));
             }
             
-            // null catalog means across all tenant_ids
-            rs = meta.getSuperTables(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME) + "%");
+            List<String> sortedTableNames = Arrays.asList(TENANT_TABLE_NAME, 
TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
+            Collections.sort(sortedTableNames);
+            List<String> sortedParentNames;
+            if (sortedTableNames.get(0).equals(TENANT_TABLE_NAME)) {
+                sortedParentNames = Arrays.asList(PARENT_TABLE_NAME, 
PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
+            } else {
+                sortedParentNames = 
Arrays.asList(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, PARENT_TABLE_NAME);
+            }
+            rs = meta.getSuperTables(TENANT_ID, null, null);
             assertTrue(rs.next());
             assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertEquals(sortedTableNames.get(0), 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+            assertEquals(sortedParentNames.get(0), 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
             assertTrue(rs.next());
             assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertEquals(sortedTableNames.get(1), 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+            assertEquals(sortedParentNames.get(1), 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
+            assertFalse(rs.next());
+            
+            rs = meta.getSuperTables(tenantId2, null, null);
             assertTrue(rs.next());
             assertEquals(tenantId2, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
             assertEquals(tenantTable2, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
@@ -497,36 +473,22 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
             assertFalse(rs.next());
             conn.close();
             
-            // Global connection sees all tenant tables
-            conn = DriverManager.getConnection(getUrl(), props);
-            rs = conn.getMetaData().getSuperTables(TENANT_ID, null, null);
-            assertTrue(rs.next());
-            assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
-            assertTrue(rs.next());
-            assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertEquals(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
-            assertEquals(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
rs.getString(PhoenixDatabaseMetaData.SUPERTABLE_NAME));
-            assertFalse(rs.next());
-            
+            Set<String> sortedCatalogs = new 
HashSet<>(Arrays.asList(TENANT_ID, tenantId2));
             rs = conn.getMetaData().getCatalogs();
-            assertTrue(rs.next());
-            assertEquals(TENANT_ID, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertTrue(rs.next());
-            assertEquals(tenantId2, 
rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
-            assertFalse(rs.next());
+            while (rs.next()) {
+                
sortedCatalogs.remove(rs.getString(PhoenixDatabaseMetaData.TABLE_CAT));
+            }
+            assertTrue("Should have found both tenant IDs", 
sortedCatalogs.isEmpty());
         } finally {
             props.clear();
             conn.close();
         }
         
-        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(nextTimestamp()));
         conn = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, 
props);
         try {   
             // make sure tenant-specific connections only see their own tables 
and the global tables
             DatabaseMetaData meta = conn.getMetaData();
-            ResultSet rs = meta.getTables(null, null, null, null);
+            ResultSet rs = meta.getTables("", SYSTEM_CATALOG_SCHEMA, null, new 
String[] {PTableType.SYSTEM.getValue().getString()});
             assertTrue(rs.next());
             assertTableMetaData(rs, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE, PTableType.SYSTEM);
             assertTrue(rs.next());
@@ -535,18 +497,33 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
             assertTableMetaData(rs, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, 
PhoenixDatabaseMetaData.TYPE_SEQUENCE, PTableType.SYSTEM);
             assertTrue(rs.next());
             assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, 
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, PTableType.SYSTEM);
+            assertFalse(rs.next());
+            
+            rs = meta.getTables(null, "", StringUtil.escapeLike(tenantTable2), 
new String[] {TABLE.getValue().getString()});
+            assertFalse(rs.next());
+
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(PARENT_TABLE_NAME), new String[] 
{TABLE.getValue().getString()});
             assertTrue(rs.next());
-            assertTableMetaData(rs, null, PARENT_TABLE_NAME, PTableType.TABLE);
+            assertTableMetaData(rs, null, PARENT_TABLE_NAME, TABLE);
+            assertFalse(rs.next());
+
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(PARENT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] 
{TABLE.getValue().getString()});
             assertTrue(rs.next());
-            assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
PTableType.TABLE);
+            assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, 
TABLE);
+            assertFalse(rs.next());
+
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME), new String[] 
{PTableType.VIEW.getValue().getString()});
             assertTrue(rs.next());
             assertTableMetaData(rs, null, TENANT_TABLE_NAME, PTableType.VIEW);
+            assertFalse(rs.next());
+            
+            rs = meta.getTables(null, "", 
StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), new String[] 
{PTableType.VIEW.getValue().getString()});
             assertTrue(rs.next());
             assertTableMetaData(rs, null, TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, 
PTableType.VIEW);
             assertFalse(rs.next());
             
             // make sure tenants see parent table's columns and their own
-            rs = meta.getColumns(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME) + "%", null);
+            rs = meta.getColumns(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME), null);
             assertTrue(rs.next());
             assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "user", 1);
             assertTrue(rs.next());
@@ -557,6 +534,9 @@ public class TenantSpecificTablesDDLIT extends 
BaseTenantSpecificTablesIT {
             assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "id", 3);
             assertTrue(rs.next());
             assertColumnMetaData(rs, null, TENANT_TABLE_NAME, "tenant_col", 4);
+            assertFalse(rs.next());
+            
+            rs = meta.getColumns(null, null, 
StringUtil.escapeLike(TENANT_TABLE_NAME_NO_TENANT_TYPE_ID), null);
             assertTrue(rs.next());
             assertColumnMetaData(rs, null, 
TENANT_TABLE_NAME_NO_TENANT_TYPE_ID, "user", 1);
             assertTrue(rs.next());

Reply via email to