Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 98ef14822 -> 056333da2


PHOENIX-3253 Make changes in various classes for method level parallelization 
in BaseHBaseManagedTimeTableReuseIT


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5e63bd25
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5e63bd25
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5e63bd25

Branch: refs/heads/4.x-HBase-0.98
Commit: 5e63bd258a51736b8b0e27dd44ca7bbb4e793063
Parents: 98ef148
Author: James Taylor <jamestay...@apache.org>
Authored: Thu Sep 15 00:22:33 2016 -0700
Committer: James Taylor <jamestay...@apache.org>
Committed: Thu Sep 15 01:28:23 2016 -0700

----------------------------------------------------------------------
 .../ConnectionQueryServicesTestImpl.java        |  7 ++-
 .../org/apache/phoenix/end2end/DeleteIT.java    |  9 ++--
 .../phoenix/end2end/PhoenixRuntimeIT.java       | 48 +++++++++-----------
 .../phoenix/end2end/index/ViewIndexIT.java      | 41 +++--------------
 .../trace/PhoenixTableMetricsWriterIT.java      | 22 +++++----
 .../phoenix/trace/PhoenixTraceReaderIT.java     | 16 ++++---
 .../phoenix/trace/PhoenixTracingEndToEndIT.java | 23 +++++-----
 .../org/apache/phoenix/tx/TransactionIT.java    | 27 ++++++-----
 .../phoenix/trace/PhoenixMetricsSink.java       |  5 +-
 .../org/apache/phoenix/trace/TraceReader.java   | 12 ++---
 10 files changed, 91 insertions(+), 119 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
index 48f392b..b1e264b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ConnectionQueryServicesTestImpl.java
@@ -20,6 +20,8 @@ package org.apache.phoenix.end2end;
 import static org.junit.Assert.assertEquals;
 
 import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Properties;
 import java.util.Set;
 
@@ -61,9 +63,10 @@ public class ConnectionQueryServicesTestImpl extends 
ConnectionQueryServicesImpl
     @Override
     public void close() throws SQLException {
         try {
-            Set<PhoenixConnection> connections;
+            Collection<PhoenixConnection> connections;
             synchronized(this) {
-                connections = this.connections;
+                // Make copy to prevent ConcurrentModificationException (TODO: 
figure out why this is necessary)
+                connections = new ArrayList<>(this.connections);
                 this.connections = Sets.newHashSet();
             }
             SQLCloseables.closeAll(connections);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
index cca4eb4..5234d10 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DeleteIT.java
@@ -505,16 +505,17 @@ public class DeleteIT extends 
BaseHBaseManagedTimeTableReuseIT {
     
     @Test
     public void testDeleteForTableWithRowTimestampColServer() throws Exception 
{
-        testDeleteForTableWithRowTimestampCol(true);
+        String tableName = generateRandomString();
+        testDeleteForTableWithRowTimestampCol(true, tableName);
     }
     
     @Test
     public void testDeleteForTableWithRowTimestampColClient() throws Exception 
{
-        testDeleteForTableWithRowTimestampCol(false);
+        String tableName = generateRandomString();
+        testDeleteForTableWithRowTimestampCol(false, tableName);
     }
     
-    private void testDeleteForTableWithRowTimestampCol(boolean autoCommit) 
throws Exception {
-        String tableName = 
"testDeleteForTableWithRowTimestampCol".toUpperCase();
+    private void testDeleteForTableWithRowTimestampCol(boolean autoCommit, 
String tableName) throws Exception {
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             conn.setAutoCommit(autoCommit);
             Statement stm = conn.createStatement();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
index 579eb19..c84a76c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/PhoenixRuntimeIT.java
@@ -18,18 +18,17 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
+import java.util.HashSet;
 import java.util.Properties;
+import java.util.Set;
 
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
@@ -53,7 +52,7 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 public class PhoenixRuntimeIT extends BaseHBaseManagedTimeTableReuseIT {
     private static void assertTenantIds(Expression e, HTableInterface htable, 
Filter filter, String[] tenantIds) throws IOException {
@@ -63,17 +62,15 @@ public class PhoenixRuntimeIT extends 
BaseHBaseManagedTimeTableReuseIT {
         ResultScanner scanner = htable.getScanner(scan);
         Result result = null;
         ResultTuple tuple = new ResultTuple();
-        List<String> actualTenantIds = 
Lists.newArrayListWithExpectedSize(tenantIds.length);
-        List<String> expectedTenantIds = Arrays.asList(tenantIds);
+        Set<String> actualTenantIds = 
Sets.newHashSetWithExpectedSize(tenantIds.length);
+        Set<String> expectedTenantIds = new 
HashSet<>(Arrays.asList(tenantIds));
         while ((result = scanner.next()) != null) {
             tuple.setResult(result);
             e.evaluate(tuple, ptr);
             String tenantId = (String)PVarchar.INSTANCE.toObject(ptr);
             actualTenantIds.add(tenantId == null ? "" : tenantId);
         }
-        // Need to sort because of salting
-        Collections.sort(actualTenantIds);
-        assertEquals(expectedTenantIds, actualTenantIds);
+        assertTrue(actualTenantIds.containsAll(expectedTenantIds));
     }
     
     @Test
@@ -96,35 +93,34 @@ public class PhoenixRuntimeIT extends 
BaseHBaseManagedTimeTableReuseIT {
     }
     
     private void testGetTenantIdExpression(boolean isSalted) throws Exception {
-        //Have to delete metaData tables because 
BaseHBaseManagedTimeTableReuseIT doesn't delete them after each test case , and 
tenant list will create issues between test cases
-        deletePriorMetaData(HConstants.LATEST_TIMESTAMP, getUrl());
-
         Connection conn = DriverManager.getConnection(getUrl());
         conn.setAutoCommit(true);
         String tableName = generateRandomString() ;
-        String sequenceName = generateRandomString() ;
+        String sequenceName = generateRandomString();
+        String t1 = generateRandomString();
+        String t2 = t1 + generateRandomString(); // ensure bigger
         conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 
VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) 
MULTI_TENANT=true" + (isSalted ? ",SALT_BUCKETS=3" : ""));
         conn.createStatement().execute("CREATE SEQUENCE "  + sequenceName);
-        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('t1','x')");
-        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('t2','y')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('" + t1 + "','x')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('" + t2 + "','y')");
         
         Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, "t1");
+        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, t1);
         Connection tsconn = DriverManager.getConnection(getUrl(), props);
         tsconn.createStatement().execute("CREATE SEQUENCE " + sequenceName);
         Expression e1 = PhoenixRuntime.getTenantIdExpression(tsconn, 
PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
         HTableInterface htable1 = 
tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
-        assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] 
{"", "t1"} );
+        assertTenantIds(e1, htable1, new FirstKeyOnlyFilter(), new String[] 
{"", t1} );
 
         String viewName = generateRandomString();
         tsconn.createStatement().execute("CREATE VIEW " + viewName + "(V1 
VARCHAR) AS SELECT * FROM " + tableName);
         Expression e2 = PhoenixRuntime.getTenantIdExpression(tsconn, 
PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
         HTableInterface htable2 = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
-        assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new 
String[] {"", "t1"} );
+        assertTenantIds(e2, htable2, getUserTableAndViewsFilter(), new 
String[] {"", t1} );
         
         Expression e3 = PhoenixRuntime.getTenantIdExpression(conn, tableName);
         HTableInterface htable3 = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
-        assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] 
{"t1", "t2"} );
+        assertTenantIds(e3, htable3, new FirstKeyOnlyFilter(), new String[] 
{t1, t2} );
 
         String basTableName = generateRandomString();
         conn.createStatement().execute("CREATE TABLE " + basTableName + " (k1 
VARCHAR PRIMARY KEY)");
@@ -135,23 +131,21 @@ public class PhoenixRuntimeIT extends 
BaseHBaseManagedTimeTableReuseIT {
         tsconn.createStatement().execute("CREATE INDEX " + indexName1 + " ON " 
+ viewName + "(V1)");
         Expression e5 = PhoenixRuntime.getTenantIdExpression(tsconn, 
indexName1);
         HTableInterface htable5 = 
tsconn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(MetaDataUtil.VIEW_INDEX_TABLE_PREFIX
 + tableName));
-        assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] 
{"t1"} );
+        assertTenantIds(e5, htable5, new FirstKeyOnlyFilter(), new String[] 
{t1} );
 
         String indexName2 = generateRandomString();
         conn.createStatement().execute("CREATE INDEX " + indexName2 + " ON " + 
tableName + "(k2)");
         Expression e6 = PhoenixRuntime.getTenantIdExpression(conn, indexName2);
         HTableInterface htable6 = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName2));
-        assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] 
{"t1", "t2"} );
+        assertTenantIds(e6, htable6, new FirstKeyOnlyFilter(), new String[] 
{t1, t2} );
         
         tableName = generateRandomString() + "BAR_" + (isSalted ? "SALTED" : 
"UNSALTED");
         conn.createStatement().execute("CREATE TABLE " + tableName + " (k1 
VARCHAR NOT NULL, k2 VARCHAR, CONSTRAINT PK PRIMARY KEY(K1,K2)) " + (isSalted ? 
"SALT_BUCKETS=3" : ""));
-        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('t1','x')");
-        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('t2','y')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('" + t1 + "','x')");
+        conn.createStatement().execute("UPSERT INTO " + tableName + " 
VALUES('" + t2 + "','y')");
         Expression e7 = PhoenixRuntime.getFirstPKColumnExpression(conn, 
tableName);
         HTableInterface htable7 = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(tableName));
-        assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] 
{"t1", "t2"} );
-
-
+        assertTenantIds(e7, htable7, new FirstKeyOnlyFilter(), new String[] 
{t1, t2} );
     }
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index c985fc9..f70397a 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -31,53 +31,29 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Map;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeTableReuseIT;
-import org.apache.phoenix.end2end.Shadower;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.apache.phoenix.util.SchemaUtil;
-import org.apache.phoenix.util.TestUtil;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import com.google.common.collect.Maps;
-
 @RunWith(Parameterized.class)
 public class ViewIndexIT extends BaseHBaseManagedTimeTableReuseIT {
-
-
-    private String schemaName="TEST";
     private boolean isNamespaceMapped;
 
-
-    @BeforeClass
-    @Shadower(classBeingShadowed = BaseHBaseManagedTimeTableReuseIT.class)
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Drop the HBase table metadata for this test to confirm that view 
index table dropped
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        // Must update config before starting server
-        setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
-    }
-
     @Parameters(name = "isNamespaceMapped = {0}")
     public static Collection<Boolean> data() {
         return Arrays.asList(true, false);
     }
 
-    private void createBaseTable(String tableName, boolean multiTenant, 
Integer saltBuckets, String splits)
+    private void createBaseTable(String schemaName, String tableName, boolean 
multiTenant, Integer saltBuckets, String splits)
             throws SQLException {
         Connection conn = getConnection();
         if (isNamespaceMapped) {
@@ -104,9 +80,8 @@ public class ViewIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
         conn.close();
     }
     
-    public Connection getConnection() throws SQLException{
+    private Connection getConnection() throws SQLException{
         Properties props = new Properties();
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
         props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, 
Boolean.toString(isNamespaceMapped));
         return DriverManager.getConnection(getUrl(),props);
     }
@@ -117,14 +92,13 @@ public class ViewIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
 
     @Test
     public void testDeleteViewIndexSequences() throws Exception {
+        String schemaName = generateRandomString();
         String tableName = schemaName + "." + generateRandomString();
         String indexName = "IND_" + generateRandomString();
         String VIEW_NAME = "VIEW_" + generateRandomString();
-        TableName physicalTableName = 
SchemaUtil.getPhysicalTableName(tableName.getBytes(), isNamespaceMapped);
-        String viewIndexPhysicalTableName = 
physicalTableName.getNameAsString();
         String viewName = schemaName + "." + VIEW_NAME;
 
-        createBaseTable(tableName, false, null, null);
+        createBaseTable(schemaName, tableName, false, null, null);
         Connection conn1 = getConnection();
         Connection conn2 = getConnection();
         conn1.createStatement().execute("CREATE VIEW " + viewName + " AS 
SELECT * FROM " + tableName);
@@ -138,22 +112,19 @@ public class ViewIndexIT extends 
BaseHBaseManagedTimeTableReuseIT {
         // Check other format of sequence is not there as Sequences format is 
different for views/indexes created on
         // table which are namespace mapped and which are not.
         verifySequence(null, seqName, seqSchemaName, false);
-        HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
         conn1.createStatement().execute("DROP VIEW " + viewName);
         conn1.createStatement().execute("DROP TABLE "+ tableName);
-        admin = driver.getConnectionQueryServices(getUrl(), 
TestUtil.TEST_PROPERTIES).getAdmin();
-        assertFalse("View index table should be deleted.", 
admin.tableExists(TableName.valueOf(viewIndexPhysicalTableName)));
         
         verifySequence(null, sequenceName, sequenceSchemaName, false);
-
     }
     
     @Test
     public void testMultiTenantViewLocalIndex() throws Exception {
+        String schemaName = generateRandomString();
         String tableName =  generateRandomString();
         String indexName = "IND_" + generateRandomString();
         String VIEW_NAME = "VIEW_" + generateRandomString();
-        createBaseTable(tableName, true, null, null);
+        createBaseTable(schemaName, tableName, true, null, null);
         Connection conn = DriverManager.getConnection(getUrl());
         PreparedStatement stmt = conn.prepareStatement(
                 "UPSERT INTO " + tableName

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
index 533b6f8..f0319c0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTableMetricsWriterIT.java
@@ -17,18 +17,18 @@
  */
 package org.apache.phoenix.trace;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.util.Collection;
+
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.trace.TraceReader.SpanInfo;
 import org.apache.phoenix.trace.TraceReader.TraceHolder;
 import org.junit.Test;
 
-import java.sql.Connection;
-import java.util.Collection;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
 /**
  * Test that the logging sink stores the expected metrics/stats
  */
@@ -43,7 +43,8 @@ public class PhoenixTableMetricsWriterIT extends 
BaseTracingTestIT {
     public void testCreatesTable() throws Exception {
         PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
-        sink.initForTesting(conn);
+        String tableName = generateRandomString();
+        sink.initForTesting(conn, tableName);
 
         // check for existence of the tracing table
         try {
@@ -57,7 +58,7 @@ public class PhoenixTableMetricsWriterIT extends 
BaseTracingTestIT {
 
         // initialize sink again, which should attempt to create the table, 
but not fail
         try {
-            sink.initForTesting(conn);
+            sink.initForTesting(conn, tableName);
         } catch (Exception e) {
             fail("Initialization shouldn't fail if table already exists!");
         }
@@ -73,7 +74,8 @@ public class PhoenixTableMetricsWriterIT extends 
BaseTracingTestIT {
         // hook up a phoenix sink
         PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
-        sink.initForTesting(conn);
+        String tableName = generateRandomString();
+        sink.initForTesting(conn, tableName);
 
         // create a simple metrics record
         long traceid = 987654;
@@ -94,7 +96,7 @@ public class PhoenixTableMetricsWriterIT extends 
BaseTracingTestIT {
 
         // make sure we only get expected stat entry (matcing the trace id), 
otherwise we could the
         // stats for the update as well
-        TraceReader reader = new TraceReader(conn);
+        TraceReader reader = new TraceReader(conn, tableName);
         Collection<TraceHolder> traces = reader.readAll(10);
         assertEquals("Wrong number of traces in the tracing table", 1, 
traces.size());
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
index 1308c13..5a1335d 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTraceReaderIT.java
@@ -55,7 +55,8 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
         PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Properties props = new Properties(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
-        sink.initForTesting(conn);
+        String tableName = generateRandomString();
+        sink.initForTesting(conn, tableName);
 
         // create a simple metrics record
         long traceid = 987654;
@@ -64,7 +65,7 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
                     "host-name.value", "test annotation for a span");
 
         // start a reader
-        validateTraces(Collections.singletonList(record), conn, traceid);
+        validateTraces(Collections.singletonList(record), conn, traceid, 
tableName);
     }
 
     private MetricsRecord createAndFlush(PhoenixMetricsSink sink, long traceid,
@@ -87,7 +88,8 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT {
         // hook up a phoenix sink
         PhoenixMetricsSink sink = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
-        sink.initForTesting(conn);
+        String tableName = generateRandomString();
+        sink.initForTesting(conn, tableName);
 
         // create a simple metrics record
         long traceid = 12345;
@@ -119,12 +121,12 @@ public class PhoenixTraceReaderIT extends 
BaseTracingTestIT {
         sink.flush();
 
         // start a reader
-        validateTraces(records, conn, traceid);
+        validateTraces(records, conn, traceid, tableName);
     }
 
-    private void validateTraces(List<MetricsRecord> records, Connection conn, 
long traceid)
+    private void validateTraces(List<MetricsRecord> records, Connection conn, 
long traceid, String tableName)
             throws Exception {
-        TraceReader reader = new TraceReader(conn);
+        TraceReader reader = new TraceReader(conn, tableName);
         Collection<TraceHolder> traces = reader.readAll(1);
         assertEquals("Got an unexpected number of traces!", 1, traces.size());
         // make sure the trace matches what we wrote
@@ -175,4 +177,4 @@ public class PhoenixTraceReaderIT extends BaseTracingTestIT 
{
                 spanInfo.annotationCount);
         }
     }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
index 686b5f1..7b66e63 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/trace/PhoenixTracingEndToEndIT.java
@@ -48,7 +48,7 @@ import org.cloudera.htrace.Trace;
 import org.cloudera.htrace.TraceScope;
 import org.cloudera.htrace.impl.ProbabilitySampler;
 import org.junit.After;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableMap;
@@ -64,13 +64,15 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
     private final String table = "ENABLED_FOR_LOGGING";
     private final String index = "ENABALED_FOR_LOGGING_INDEX";
 
-    private static DisableableMetricsWriter sink;
+    private DisableableMetricsWriter sink;
+    private String tableName;
 
-    @BeforeClass
-    public static void setupMetrics() throws Exception {
+    @Before
+    public void setupMetrics() throws Exception {
         PhoenixMetricsSink pWriter = new PhoenixMetricsSink();
         Connection conn = getConnectionWithoutTracing();
-        pWriter.initForTesting(conn);
+        tableName = generateRandomString();
+        pWriter.initForTesting(conn, tableName);
         sink = new DisableableMetricsWriter(pWriter);
 
         TracingTestUtil.registerSink(sink);
@@ -80,22 +82,19 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
     public void cleanup() {
         sink.disable();
         sink.clear();
-        sink.enable();
-
-        // LISTENABLE.clearListeners();
     }
 
-    private static void waitForCommit(CountDownLatch latch) throws 
SQLException {
+    private void waitForCommit(CountDownLatch latch) throws SQLException {
         Connection conn = new 
CountDownConnection(getConnectionWithoutTracing(), latch);
         replaceWriterConnection(conn);
     }
 
-    private static void replaceWriterConnection(Connection conn) throws 
SQLException {
+    private void replaceWriterConnection(Connection conn) throws SQLException {
         // disable the writer
         sink.disable();
 
         // swap the connection for one that listens
-        sink.getDelegate().initForTesting(conn);
+        sink.getDelegate().initForTesting(conn, tableName);
 
         // enable the writer
         sink.enable();
@@ -461,7 +460,7 @@ public class PhoenixTracingEndToEndIT extends 
BaseTracingTestIT {
     }
     
     private boolean checkStoredTraces(Connection conn, TraceChecker checker) 
throws Exception {
-        TraceReader reader = new TraceReader(conn);
+        TraceReader reader = new TraceReader(conn, tableName);
         int retries = 0;
         boolean found = false;
         outer: while (retries < MAX_RETRIES) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index a89889e..56f469c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -503,16 +503,18 @@ public class TransactionIT extends 
BaseHBaseManagedTimeTableReuseIT {
     public void testCreateTableToBeTransactional() throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);
-        String ddl = "CREATE TABLE TEST_TRANSACTIONAL_TABLE (k varchar primary 
key) transactional=true";
+        String t1 = generateRandomString();
+        String t2 = generateRandomString();
+        String ddl = "CREATE TABLE " + t1 + " (k varchar primary key) 
transactional=true";
         conn.createStatement().execute(ddl);
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        PTable table = pconn.getTable(new PTableKey(null, 
"TEST_TRANSACTIONAL_TABLE"));
-        HTableInterface htable = 
pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE"));
+        PTable table = pconn.getTable(new PTableKey(null, t1));
+        HTableInterface htable = 
pconn.getQueryServices().getTable(Bytes.toBytes(t1));
         assertTrue(table.isTransactional());
         
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
         
         try {
-            ddl = "ALTER TABLE TEST_TRANSACTIONAL_TABLE SET 
transactional=false";
+            ddl = "ALTER TABLE " + t1 + " SET transactional=false";
             conn.createStatement().execute(ddl);
             fail();
         } catch (SQLException e) {
@@ -520,15 +522,15 @@ public class TransactionIT extends 
BaseHBaseManagedTimeTableReuseIT {
         }
 
         HBaseAdmin admin = pconn.getQueryServices().getAdmin();
-        HTableDescriptor desc = new 
HTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING"));
+        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(t2));
         desc.addFamily(new 
HColumnDescriptor(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
         admin.createTable(desc);
-        ddl = "CREATE TABLE TXN_TEST_EXISTING (k varchar primary key) 
transactional=true";
+        ddl = "CREATE TABLE " + t2 + " (k varchar primary key) 
transactional=true";
         conn.createStatement().execute(ddl);
-        assertEquals(Boolean.TRUE.toString(), 
admin.getTableDescriptor(TableName.valueOf("TXN_TEST_EXISTING")).getValue(TxConstants.READ_NON_TX_DATA));
+        assertEquals(Boolean.TRUE.toString(), 
admin.getTableDescriptor(TableName.valueOf(t2)).getValue(TxConstants.READ_NON_TX_DATA));
         
         // Should be ok, as HBase metadata should match existing metadata.
-        ddl = "CREATE TABLE IF NOT EXISTS TEST_TRANSACTIONAL_TABLE (k varchar 
primary key)"; 
+        ddl = "CREATE TABLE IF NOT EXISTS " + t1 + " (k varchar primary key)"; 
         try {
             conn.createStatement().execute(ddl);
             fail();
@@ -537,8 +539,8 @@ public class TransactionIT extends 
BaseHBaseManagedTimeTableReuseIT {
         }
         ddl += " transactional=true";
         conn.createStatement().execute(ddl);
-        table = pconn.getTable(new PTableKey(null, 
"TEST_TRANSACTIONAL_TABLE"));
-        htable = 
pconn.getQueryServices().getTable(Bytes.toBytes("TEST_TRANSACTIONAL_TABLE"));
+        table = pconn.getTable(new PTableKey(null, t1));
+        htable = pconn.getQueryServices().getTable(Bytes.toBytes(t1));
         assertTrue(table.isTransactional());
         
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
     }
@@ -840,9 +842,10 @@ public class TransactionIT extends 
BaseHBaseManagedTimeTableReuseIT {
         String transTableName = generateRandomString();
         String fullTableName = INDEX_DATA_SCHEMA + 
QueryConstants.NAME_SEPARATOR + transTableName;
         String selectSQL = "SELECT * FROM " + fullTableName;
-        try (Connection conn1 = DriverManager.getConnection(getUrl()); 
+        try (Connection conn = DriverManager.getConnection(getUrl());
+                Connection conn1 = DriverManager.getConnection(getUrl()); 
                 Connection conn2 = DriverManager.getConnection(getUrl())) {
-            createTable(conn1, fullTableName);
+            createTable(conn, fullTableName);
             conn1.setAutoCommit(false);
             conn2.setAutoCommit(true);
             ResultSet rs = conn1.createStatement().executeQuery(selectSQL);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
index af0d6e7..a8e80ab 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/PhoenixMetricsSink.java
@@ -147,11 +147,12 @@ public class PhoenixMetricsSink implements MetricsSink {
      * {@link 
org.apache.phoenix.query.QueryServicesOptions#DEFAULT_TRACING_STATS_TABLE_NAME}
      *
      * @param conn to store for upserts and to create the table (if necessary)
+     * @param tableName TODO
      * @throws SQLException if any phoenix operation fails
      */
     @VisibleForTesting
-    public void initForTesting(Connection conn) throws SQLException {
-        initializeInternal(conn, 
QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
+    public void initForTesting(Connection conn, String tableName) throws 
SQLException {
+        initializeInternal(conn, tableName);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5e63bd25/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java 
b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
index f3fc81d..4f5b7a8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/trace/TraceReader.java
@@ -62,17 +62,13 @@ public class TraceReader {
     private String table;
     private int pageSize;
 
-    public TraceReader(Connection conn, String statsTableName) throws 
SQLException {
+    public TraceReader(Connection conn, String tracingTableName) throws 
SQLException {
         this.conn = conn;
-        this.table = statsTableName;
+        this.table = tracingTableName;
         String ps = conn.getClientInfo(QueryServices.TRACING_PAGE_SIZE_ATTRIB);
         this.pageSize = ps == null ? 
QueryServicesOptions.DEFAULT_TRACING_PAGE_SIZE : Integer.parseInt(ps);
     }
 
-    public TraceReader(Connection conn) throws SQLException {
-        this(conn, QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME);
-    }
-
     /**
      * Read all the currently stored traces.
      * <p>
@@ -87,7 +83,7 @@ public class TraceReader {
         // trace
         // goes together), and then by start time (so parent spans always 
appear before child spans)
         String query =
-                "SELECT " + knownColumns + " FROM " + 
QueryServicesOptions.DEFAULT_TRACING_STATS_TABLE_NAME
+                "SELECT " + knownColumns + " FROM " + table
                         + " ORDER BY " + MetricInfo.TRACE.columnName + " DESC, 
"
                         + MetricInfo.START.columnName + " ASC" + " LIMIT " + 
pageSize;
         int resultCount = 0;
@@ -381,4 +377,4 @@ public class TraceReader {
             return parentId;
         }
     }
-}
\ No newline at end of file
+}

Reply via email to