This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
     new 97f2d72  PHOENIX-5355 Speed up BaseIndexIT.
97f2d72 is described below

commit 97f2d72ef989e4870859fcd9fe2e68731258d5a5
Author: Lars Hofhansl <la...@apache.org>
AuthorDate: Tue Jun 18 16:32:26 2019 -0700

    PHOENIX-5355 Speed up BaseIndexIT.
---
 .../org/apache/phoenix/end2end/CreateTableIT.java  | 62 +++++++++++++++
 .../apache/phoenix/end2end/index/BaseIndexIT.java  | 91 ----------------------
 2 files changed, 62 insertions(+), 91 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index 054a218..96fa308 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
@@ -24,6 +25,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -33,14 +35,19 @@ import java.sql.Statement;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -48,10 +55,12 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Assert;
@@ -803,6 +812,59 @@ public class CreateTableIT extends ParallelStatsDisabledIT 
{
         }
     }
 
+    /**
+     * Ensure that HTD contains table priorities correctly.
+     */
+    @Test
+    public void testTableDescriptorPriority() throws SQLException, IOException 
{
+        String tableName = "TBL_" + generateUniqueName();
+        String indexName = "IND_" + generateUniqueName();
+        String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+        String fullIndexeName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+        // Check system tables priorities.
+        try (Admin admin = driver.getConnectionQueryServices(null, 
null).getAdmin();
+                Connection c = DriverManager.getConnection(getUrl())) {
+            ResultSet rs = c.getMetaData().getTables("", 
+                    "\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + 
"\"", 
+                    null, 
+                    new String[] {PTableType.SYSTEM.toString()});
+            ReadOnlyProps p = 
c.unwrap(PhoenixConnection.class).getQueryServices().getProps();
+            while (rs.next()) {
+                String schemaName = 
rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
+                String tName = 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
+                org.apache.hadoop.hbase.TableName hbaseTableName = 
SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
+                TableDescriptor htd = admin.getDescriptor(hbaseTableName);
+                String val = htd.getValue("PRIORITY");
+                assertNotNull("PRIORITY is not set for table:" + htd, val);
+                assertTrue(Integer.parseInt(val)
+                        >= 
PhoenixRpcSchedulerFactory.getMetadataPriority(config));
+            }
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            String ddl ="CREATE TABLE " + fullTableName + 
TestUtil.TEST_TABLE_SCHEMA;
+            try (Connection conn = DriverManager.getConnection(getUrl(), 
props)) {
+                conn.setAutoCommit(false);
+                Statement stmt = conn.createStatement();
+                stmt.execute(ddl);
+                BaseTest.populateTestTable(fullTableName);
+                ddl = "CREATE INDEX " + indexName
+                        + " ON " + fullTableName + " (long_col1, long_col2)"
+                        + " INCLUDE (decimal_col1, decimal_col2)";
+                stmt.execute(ddl);
+            }
+
+            TableDescriptor dataTable = admin.getDescriptor(
+                    org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
+            String val = dataTable.getValue("PRIORITY");
+            assertTrue(val == null || Integer.parseInt(val) < 
HConstants.HIGH_QOS);
+
+            TableDescriptor indexTable = admin.getDescriptor(
+                    org.apache.hadoop.hbase.TableName.valueOf(fullIndexeName));
+            val = indexTable.getValue("PRIORITY");
+            assertNotNull("PRIORITY is not set for table:" + indexTable, val);
+            assertTrue(Integer.parseInt(val) >= 
PhoenixRpcSchedulerFactory.getIndexPriority(config));
+        }
+    }
+
     private int checkGuidePostWidth(String tableName) throws Exception {
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             String query =
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index b5b2d1e..b6b6d40 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -1152,61 +1152,6 @@ public abstract class BaseIndexIT extends 
ParallelStatsDisabledIT {
         assertNull(tableScanner.next());
     }
 
-    /**
-     * Ensure that HTD contains table priorities correctly.
-     */
-    @Test
-    public void testTableDescriptorPriority() throws SQLException, IOException 
{
-        String tableName = "TBL_" + generateUniqueName();
-        String indexName = "IND_" + generateUniqueName();
-        String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
-        String fullIndexeName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
-        // Check system tables priorities.
-        try (Admin admin = driver.getConnectionQueryServices(null, 
null).getAdmin();
-                Connection c = DriverManager.getConnection(getUrl())) {
-            ResultSet rs = c.getMetaData().getTables("", 
-                    "\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + 
"\"", 
-                    null, 
-                    new String[] {PTableType.SYSTEM.toString()});
-            ReadOnlyProps p = 
c.unwrap(PhoenixConnection.class).getQueryServices().getProps();
-            while (rs.next()) {
-                String schemaName = 
rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
-                String tName = 
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
-                org.apache.hadoop.hbase.TableName hbaseTableName = 
SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
-                TableDescriptor htd = admin.getDescriptor(hbaseTableName);
-                String val = htd.getValue("PRIORITY");
-                assertNotNull("PRIORITY is not set for table:" + htd, val);
-                assertTrue(Integer.parseInt(val)
-                        >= 
PhoenixRpcSchedulerFactory.getMetadataPriority(config));
-            }
-            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-            String ddl ="CREATE TABLE " + fullTableName + 
TestUtil.TEST_TABLE_SCHEMA + tableDDLOptions;
-            try (Connection conn = DriverManager.getConnection(getUrl(), 
props)) {
-                conn.setAutoCommit(false);
-                Statement stmt = conn.createStatement();
-                stmt.execute(ddl);
-                BaseTest.populateTestTable(fullTableName);
-                ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + 
indexName
-                        + " ON " + fullTableName + " (long_col1, long_col2)"
-                        + " INCLUDE (decimal_col1, decimal_col2)";
-                stmt.execute(ddl);
-            }
-
-            TableDescriptor dataTable = admin.getDescriptor(
-                    org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
-            String val = dataTable.getValue("PRIORITY");
-            assertTrue(val == null || Integer.parseInt(val) < 
HConstants.HIGH_QOS);
-
-            if (!localIndex && mutable) {
-                TableDescriptor indexTable = admin.getDescriptor(
-                        
org.apache.hadoop.hbase.TableName.valueOf(fullIndexeName));
-                val = indexTable.getValue("PRIORITY");
-                assertNotNull("PRIORITY is not set for table:" + indexTable, 
val);
-                assertTrue(Integer.parseInt(val) >= 
PhoenixRpcSchedulerFactory.getIndexPriority(config));
-            }
-        }
-    }
-
     @Test
     public void testQueryBackToDataTableWithDescPKColumn() throws SQLException 
{
         doTestQueryBackToDataTableWithDescPKColumn(true);
@@ -1304,40 +1249,4 @@ public abstract class BaseIndexIT extends 
ParallelStatsDisabledIT {
         }
     }
 
-    @Test
-    public void testMaxIndexesPerTable() throws SQLException {
-        String tableName = "TBL_" + generateUniqueName();
-        String indexName = "IND_" + generateUniqueName();
-        String fullTableName = 
SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-            Configuration conf =
-                    
conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
-            int maxIndexes =
-                    conf.getInt(QueryServices.MAX_INDEXES_PER_TABLE,
-                        QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
-            conn.createStatement()
-                    .execute("CREATE TABLE " + fullTableName
-                            + " (k VARCHAR NOT NULL PRIMARY KEY, \"V1\" 
VARCHAR, \"v2\" VARCHAR)"
-                            + tableDDLOptions);
-            for (int i = 0; i < maxIndexes; i++) {
-                conn.createStatement().execute("CREATE " + (localIndex ? 
"LOCAL " : "") + "INDEX "
-                        + indexName + i + " ON " + fullTableName + "(\"v2\") 
INCLUDE (\"V1\")");
-            }
-            try {
-                conn.createStatement()
-                        .execute("CREATE " + (localIndex ? "LOCAL " : "") + 
"INDEX " + indexName
-                                + maxIndexes + " ON " + fullTableName
-                                + "(\"v2\") INCLUDE (\"V1\")");
-                fail("Expected exception TOO_MANY_INDEXES");
-            } catch (SQLException e) {
-                assertEquals(e.getErrorCode(), 
SQLExceptionCode.TOO_MANY_INDEXES.getErrorCode());
-            }
-            conn.createStatement()
-                    .execute("CREATE " + (localIndex ? "LOCAL " : "") + "INDEX 
IF NOT EXISTS "
-                            + indexName + "0" + " ON " + fullTableName
-                            + "(\"v2\") INCLUDE (\"V1\")");
-        }
-    }
-
 }

Reply via email to