Repository: phoenix
Updated Branches:
  refs/heads/3.0 d42866918 -> 28d7f638f


PHOENIX-1309 Ensure Phoenix table is created for Local index and view
index tables to store guideposts against them - Addendum (James Taylor via
Ram)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/28d7f638
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/28d7f638
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/28d7f638

Branch: refs/heads/3.0
Commit: 28d7f638fca2bccfecc1d16c26efb5a3009eea6a
Parents: d428669
Author: Ramkrishna <ramkrishna.s.vasude...@intel.com>
Authored: Mon Oct 13 14:25:51 2014 +0530
Committer: Ramkrishna <ramkrishna.s.vasude...@intel.com>
Committed: Mon Oct 13 14:25:51 2014 +0530

----------------------------------------------------------------------
 .../end2end/TenantSpecificViewIndexIT.java      |   2 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |  14 +--
 .../phoenix/compile/StatementContext.java       |   4 +
 .../apache/phoenix/schema/MetaDataClient.java   | 105 +++++++++++--------
 .../java/org/apache/phoenix/query/BaseTest.java |   2 +-
 5 files changed, 74 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/28d7f638/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
index e7cdc01..8abda3b 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
@@ -124,7 +124,7 @@ public class TenantSpecificViewIndexIT extends 
BaseTenantSpecificViewIndexIT {
     }
     
     @Test
-    public void testQueryingUsingTenantSpecific() throws Exception {
+    public void testNonPaddedTenantId() throws Exception {
         String tenantId1 = "org1";
         String tenantId2 = "org2";
         String ddl = "CREATE TABLE T (tenantId char(15) NOT NULL, pk1 varchar 
NOT NULL, pk2 INTEGER NOT NULL, val1 VARCHAR CONSTRAINT pk primary key 
(tenantId,pk1,pk2)) MULTI_TENANT = true";

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28d7f638/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
index 294942f..033995e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
@@ -68,8 +68,13 @@ public class PostDDLCompiler {
     private final StatementContext context; // bogus context
 
     public PostDDLCompiler(PhoenixConnection connection) {
+        this(connection, new Scan());
+    }
+
+    public PostDDLCompiler(PhoenixConnection connection, Scan scan) {
         this.connection = connection;
-        this.context = new StatementContext(new PhoenixStatement(connection));
+        this.context = new StatementContext(new PhoenixStatement(connection), 
scan);
+        scan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG, 
QueryConstants.TRUE);
     }
 
     public MutationPlan compile(final List<TableRef> tableRefs, final byte[] 
emptyCF, final byte[] projectCF, final List<PColumn> deleteList,
@@ -101,19 +106,16 @@ public class PostDDLCompiler {
                 try {
                     connection.setAutoCommit(true);
                     SQLException sqlE = null;
-                    if (deleteList == null && emptyCF == null) {
-                        return new MutationState(0, connection);
-                    }
                     /*
                      * Handles:
                      * 1) deletion of all rows for a DROP TABLE and 
subsequently deletion of all rows for a DROP INDEX;
                      * 2) deletion of all column values for a ALTER TABLE DROP 
COLUMN
                      * 3) updating the necessary rows to have an empty KV
+                     * 4) updating table stats
                      */
                     long totalMutationCount = 0;
                     for (final TableRef tableRef : tableRefs) {
-                        Scan scan = new Scan();
-                        
scan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG, QueryConstants.TRUE);
+                        Scan scan = ScanUtil.newScan(context.getScan());
                         SelectStatement select = SelectStatement.COUNT_ONE;
                         // We need to use this tableRef
                         ColumnResolver resolver = new ColumnResolver() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28d7f638/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
index 887ca3e..b513ac1 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/StatementContext.java
@@ -75,6 +75,10 @@ public class StatementContext {
     private Map<SelectStatement, Object> subqueryResults;
     
     public StatementContext(PhoenixStatement statement) {
+        this(statement, new Scan());
+    }
+    
+    public StatementContext(PhoenixStatement statement, Scan scan) {
         this(statement, FromCompiler.EMPTY_TABLE_RESOLVER, new Scan(), new 
SequenceManager(statement));
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28d7f638/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index b0549d2..a9a59d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -84,13 +84,11 @@ import java.util.Set;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -99,7 +97,6 @@ import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.MutationPlan;
 import org.apache.phoenix.compile.PostDDLCompiler;
 import org.apache.phoenix.compile.PostIndexDDLCompiler;
-import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
@@ -109,7 +106,6 @@ import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.AddColumnStatement;
 import org.apache.phoenix.parse.AlterIndexStatement;
 import org.apache.phoenix.parse.ColumnDef;
@@ -484,33 +480,55 @@ public class MetaDataClient {
     public MutationState updateStatistics(UpdateStatisticsStatement 
updateStatisticsStmt)
             throws SQLException {
         // Check before updating the stats if we have reached the configured 
time to reupdate the stats once again
-        ReadOnlyProps props = connection.getQueryServices().getProps();
-        final long msMinBetweenUpdates = props
-                .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
-                        
props.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, 
-                                
QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS) / 2);
         ColumnResolver resolver = 
FromCompiler.getResolver(updateStatisticsStmt, connection);
         PTable table = resolver.getTables().get(0).getTable();
-        List<PTable> indexes = table.getIndexes();
-        List<PTable> tables = Lists.newArrayListWithExpectedSize(1 + 
indexes.size());
+        long rowCount = 0;
         if (updateStatisticsStmt.updateColumns()) {
-            tables.add(table);
+            rowCount += updateStatisticsInternal(table.getPhysicalName(), 
table);
         }
         if (updateStatisticsStmt.updateIndex()) {
-            tables.addAll(indexes);
-        }
-        for(PTable pTable : tables) {
-            updateStatisticsInternal(msMinBetweenUpdates, pTable);
+            // TODO: If our table is a VIEW with multiple indexes or a TABLE 
with local indexes,
+            // we may be doing more work that we have to here. We should union 
the scan ranges
+            // across all indexes in that case so that we don't re-calculate 
the same stats
+            // multiple times.
+            for (PTable index : table.getIndexes()) {
+                rowCount += updateStatisticsInternal(index.getPhysicalName(), 
index);
+            }
+            // If analyzing the indexes of a multi-tenant table or a table 
with view indexes
+            // then analyze all of those indexes too.
+            if (table.getType() != PTableType.VIEW &&
+               (table.isMultiTenant())
+               || (MetaDataUtil.hasViewIndexTable(connection, 
table.getName()))){
+                
+                String viewIndexTableName = 
MetaDataUtil.getViewIndexTableName(table.getTableName().getString());
+                String viewIndexSchemaName = 
MetaDataUtil.getViewIndexSchemaName(table.getSchemaName().getString());
+                final PName viewIndexPhysicalName = 
PNameFactory.newName(SchemaUtil.getTableName(viewIndexSchemaName, 
viewIndexTableName));
+                PTable indexLogicalTable = new DelegateTable(table) {
+                    @Override
+                    public PName getPhysicalName() {
+                        return viewIndexPhysicalName;
+                    }
+                    @Override
+                    public PTableStats getTableStats() {
+                        return PTableStats.EMPTY_STATS;
+                    }
+                };
+                rowCount += updateStatisticsInternal(viewIndexPhysicalName, 
indexLogicalTable);
+            }
         }
-        return new MutationState(1, connection);
+        return new MutationState((int)rowCount, connection);
     }
 
-    private MutationState updateStatisticsInternal(long msMinBetweenUpdates, 
PTable table) throws SQLException {
-        PName physicalName = table.getPhysicalName();
+    private long updateStatisticsInternal(PName physicalName, PTable 
logicalTable) throws SQLException {
+        ReadOnlyProps props = connection.getQueryServices().getProps();
+        final long msMinBetweenUpdates = props
+                .getLong(QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB,
+                        
props.getLong(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, 
+                                
QueryServicesOptions.DEFAULT_STATS_UPDATE_FREQ_MS) / 2);
         byte[] tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
         Long scn = connection.getSCN();
         // Always invalidate the cache
-        long clientTS = connection.getSCN() == null ? 
HConstants.LATEST_TIMESTAMP : scn;
+        long clientTimeStamp = connection.getSCN() == null ? 
HConstants.LATEST_TIMESTAMP : scn;
         String query = "SELECT CURRENT_DATE() - " + LAST_STATS_UPDATE_TIME + " 
FROM " + PhoenixDatabaseMetaData.SYSTEM_STATS_NAME
                 + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() 
+ "' AND " + COLUMN_FAMILY
                 + " IS NULL AND " + REGION_NAME + " IS NULL AND " + 
LAST_STATS_UPDATE_TIME + " IS NOT NULL";
@@ -519,32 +537,29 @@ public class MetaDataClient {
         if (rs.next()) {
             msSinceLastUpdate = rs.getLong(1);
         }
-        if (msSinceLastUpdate >= msMinBetweenUpdates) {
-            // Here create the select query.
-            String countQuery = "SELECT /*+ NO_CACHE NO_INDEX */ count(*) FROM 
" + table.getName().getString();
-            PhoenixStatement statement = (PhoenixStatement) 
connection.createStatement();
-            QueryPlan plan = statement.compileQuery(countQuery);
-            Scan scan = plan.getContext().getScan();
-            // Add all CF in the table
-            scan.getFamilyMap().clear();
-            for (PColumnFamily family : table.getColumnFamilies()) {
-                scan.addFamily(family.getName().getBytes());
-            }
-            scan.setAttribute(BaseScannerRegionObserver.ANALYZE_TABLE, 
PDataType.TRUE_BYTES);
-            KeyValue kv = plan.iterator().next().getValue(0);
-            ImmutableBytesWritable tempPtr = plan.getContext().getTempPtr();
-            tempPtr.set(kv.getValue());
-            // A single Cell will be returned with the count(*) - we decode 
that here
-            long rowCount = PDataType.LONG.getCodec().decodeLong(tempPtr, 
SortOrder.getDefault());
-            // We need to update the stats table so that client will pull the 
new one with
-            // the updated stats.
-            
connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
-                    
Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
-                    
Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), 
clientTS);
-            return new  MutationState(0, connection, rowCount);
-        } else {
-            return new MutationState(0, connection);
+        if (msSinceLastUpdate < msMinBetweenUpdates) { 
+            return 0; 
         }
+
+        /*
+         * Execute a COUNT(*) through PostDDLCompiler as we need to use the 
logicalTable passed through, since it may not represent a "real"
+         * table in the case of the view indexes of a base table.
+         */
+        PostDDLCompiler compiler = new PostDDLCompiler(connection);
+        TableRef tableRef = new TableRef(null, logicalTable, clientTimeStamp, 
false);
+        MutationPlan plan = 
compiler.compile(Collections.singletonList(tableRef), null, null, null, 
clientTimeStamp);
+        Scan scan = plan.getContext().getScan();
+        scan.setCacheBlocks(false);
+        scan.setAttribute(BaseScannerRegionObserver.ANALYZE_TABLE, 
PDataType.TRUE_BYTES);
+        MutationState mutationState = plan.execute();
+        long rowCount = mutationState.getUpdateCount();
+
+        // We need to update the stats table so that client will pull the new 
one with
+        // the updated stats.
+        connection.getQueryServices().incrementTableTimeStamp(tenantIdBytes,
+                
Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
+                
Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), 
clientTimeStamp);
+        return rowCount;
     }
 
     private MutationState buildIndexAtTimeStamp(PTable index, NamedTableNode 
dataTableNode) throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/28d7f638/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index fa07bfb..1e3f004 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -551,7 +551,7 @@ public abstract class BaseTest {
                 @Override
                 public void run() {
                     try {
-                        utility.shutdownMiniCluster();
+                        if (utility != null) utility.shutdownMiniCluster();
                     } catch (Exception e) {
                         logger.warn("Exception caught when shutting down mini 
cluster", e);
                     }

Reply via email to