Repository: phoenix
Updated Branches:
  refs/heads/master e35503374 -> 2faeda4a7


Rename StatisticsUtils to follow conventions and other misc cleanup


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/90333d9a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/90333d9a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/90333d9a

Branch: refs/heads/master
Commit: 90333d9a534e0dcc8eab1f9d5fc10737faa9c221
Parents: e355033
Author: James Taylor <jtay...@salesforce.com>
Authored: Tue Oct 7 01:06:23 2014 -0700
Committer: James Taylor <jtay...@salesforce.com>
Committed: Tue Oct 7 02:31:42 2014 -0700

----------------------------------------------------------------------
 .../coprocessor/MetaDataEndpointImpl.java       |  44 ++++---
 .../phoenix/schema/stat/StatisticsTable.java    |   4 +-
 .../phoenix/schema/stat/StatisticsUtil.java     | 117 +++++++++++++++++
 .../phoenix/schema/stat/StatisticsUtils.java    | 131 -------------------
 .../java/org/apache/phoenix/query/BaseTest.java |   1 -
 .../phoenix/query/QueryServicesTestImpl.java    |   2 -
 6 files changed, 143 insertions(+), 156 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index 53e9c4b..998f057 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -61,8 +61,6 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
@@ -132,7 +130,7 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.stat.PTableStats;
-import org.apache.phoenix.schema.stat.StatisticsUtils;
+import org.apache.phoenix.schema.stat.StatisticsUtil;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
@@ -271,8 +269,6 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
     }
 
     private RegionCoprocessorEnvironment env;
-         
-    private static final Log LOG = 
LogFactory.getLog(MetaDataEndpointImpl.class);
 
     /**
      * Stores a reference to the coprocessor environment provided by the
@@ -291,7 +287,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
         } else {
             throw new CoprocessorException("Must be loaded on a table 
region!");
         }
-        LOG.info("Starting Tracing-Metrics Systems");
+        logger.info("Starting Tracing-Metrics Systems");
         // Start the phoenix trace collection
         Tracing.addTraceMetricsSource();
         Metrics.ensureConfigured();
@@ -397,20 +393,20 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
                 return table;
             }
             // Query for the latest table first, since it's not cached
-            table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientTimeStamp);
+            table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP);
             if (table != null && table.getTimeStamp() < clientTimeStamp) {
                 return table;
             }
             // Otherwise, query for an older version of the table - it won't 
be cached
-            return buildTable(key, cacheKey, region, clientTimeStamp, 
clientTimeStamp);
+            return buildTable(key, cacheKey, region, clientTimeStamp);
         } finally {
             rowLock.release();
         }
     }
 
     private PTable buildTable(byte[] key, ImmutableBytesPtr cacheKey, HRegion 
region,
-            long buildAsOfTimeStamp, long clientTimeStamp) throws IOException, 
SQLException {
-        Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, 
buildAsOfTimeStamp);
+            long clientTimeStamp) throws IOException, SQLException {
+        Scan scan = MetaDataUtil.newTableRowsScan(key, MIN_TABLE_TIMESTAMP, 
clientTimeStamp);
         RegionScanner scanner = region.getScanner(scan);
 
         Cache<ImmutableBytesPtr,PTable> metaDataCache = 
GlobalCache.getInstance(this.env).getMetaDataCache();
@@ -418,7 +414,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
             PTable oldTable = metaDataCache.getIfPresent(cacheKey);
             long tableTimeStamp = oldTable == null ? MIN_TABLE_TIMESTAMP-1 : 
oldTable.getTimeStamp();
             PTable newTable;
-            newTable = getTable(scanner, buildAsOfTimeStamp, tableTimeStamp, 
clientTimeStamp);
+            newTable = getTable(scanner, clientTimeStamp, tableTimeStamp);
             if (newTable == null) {
                 return null;
             }
@@ -517,7 +513,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
         columns.add(column);
     }
     
-    private PTable getTable(RegionScanner scanner, long buildAsOfTimeStamp, 
long tableTimeStamp, long clientTimeStamp)
+    private PTable getTable(RegionScanner scanner, long clientTimeStamp, long 
tableTimeStamp)
         throws IOException, SQLException {
         List<Cell> results = Lists.newArrayList();
         scanner.next(results);
@@ -652,7 +648,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
           if (colName.getString().isEmpty() && famName != null) {
               LinkType linkType = 
LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
               if (linkType == LinkType.INDEX_TABLE) {
-                  addIndexToTable(tenantId, schemaName, famName, tableName, 
buildAsOfTimeStamp, indexes);
+                  addIndexToTable(tenantId, schemaName, famName, tableName, 
clientTimeStamp, indexes);
               } else if (linkType == LinkType.PHYSICAL_TABLE) {
                   physicalTables.add(famName);
               } else {
@@ -664,10 +660,18 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
         }
         PName physicalTableName = physicalTables.isEmpty() ? 
PNameFactory.newName(SchemaUtil.getTableName(
                 schemaName.getString(), tableName.getString())) : 
physicalTables.get(0);
-        PTableStats stats = tenantId == null ? StatisticsUtils.readStatistics(
-                ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME),
-                physicalTableName.getBytes(), 
-                clientTimeStamp) : null;
+        PTableStats stats = null;
+        if (tenantId == null) {
+            HTableInterface statsHTable = null;
+            try {
+                statsHTable = ServerUtil.getHTableForCoprocessorScan(env, 
PhoenixDatabaseMetaData.SYSTEM_STATS_NAME);
+                stats = StatisticsUtil.readStatistics(statsHTable, 
physicalTableName.getBytes(), clientTimeStamp);
+            } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
+                logger.warn(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME + " not 
online yet?");
+            } finally {
+                if (statsHTable != null) statsHTable.close();
+            }
+        }
         return PTableImpl.makePTable(tenantId, schemaName, tableName, 
tableType, indexState, timeStamp, 
             tableSeqNum, pkName, saltBucketNum, columns, tableType == INDEX ? 
dataTableName : null, 
             indexes, isImmutableRows, physicalTables, defaultFamilyName, 
viewStatement, disableWAL, 
@@ -715,7 +719,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol 
implements Coprocesso
         Cache<ImmutableBytesPtr,PTable> metaDataCache = 
GlobalCache.getInstance(this.env).getMetaDataCache();
         PTable table = metaDataCache.getIfPresent(cacheKey);
         // We always cache the latest version - fault in if not in cache
-        if (table != null || (table = buildTable(key, cacheKey, region, 
asOfTimeStamp, clientTimeStamp)) != null) {
+        if (table != null || (table = buildTable(key, cacheKey, region, 
asOfTimeStamp)) != null) {
             return table;
         }
         // if not found then check if newer table already exists and add 
delete marker for timestamp
@@ -1016,7 +1020,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
        
         // We always cache the latest version - fault in if not in cache
         if (table != null
-                || (table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientTimeStamp)) != null) {
+                || (table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP)) != null) {
             if (table.getTimeStamp() < clientTimeStamp) {
                 if (isTableDeleted(table) || tableType != table.getType()) {
                     return new 
MetaDataMutationResult(MutationCode.TABLE_NOT_FOUND, 
EnvironmentEdgeManager.currentTimeMillis(), null);
@@ -1173,7 +1177,7 @@ public class MetaDataEndpointImpl extends 
MetaDataProtocol implements Coprocesso
                 // Get client timeStamp from mutations
                 long clientTimeStamp = 
MetaDataUtil.getClientTimeStamp(tableMetadata);
                 if (table == null
-                        && (table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP, clientTimeStamp)) == null) {
+                        && (table = buildTable(key, cacheKey, region, 
HConstants.LATEST_TIMESTAMP)) == null) {
                     // if not found then call newerTableExists and add delete 
marker for timestamp
                     // found
                     if (buildDeletedTable(key, cacheKey, region, 
clientTimeStamp) != null) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
index fba8df7..3c0f376 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsTable.java
@@ -101,7 +101,7 @@ public class StatisticsTable implements Closeable {
     public void addStats(String regionName, StatisticsCollector tracker, 
String fam, List<Mutation> mutations) throws IOException {
         if (tracker == null) { return; }
 
-        byte[] prefix = StatisticsUtils.getRowKey(tableName, 
PDataType.VARCHAR.toBytes(fam),
+        byte[] prefix = StatisticsUtil.getRowKey(tableName, 
PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
         Put put = new Put(prefix);
         if (tracker.getGuidePosts(fam) != null) {
@@ -160,7 +160,7 @@ public class StatisticsTable implements Closeable {
     
     public void deleteStats(String regionName, StatisticsCollector tracker, 
String fam, List<Mutation> mutations)
             throws IOException {
-        byte[] prefix = StatisticsUtils.getRowKey(tableName, 
PDataType.VARCHAR.toBytes(fam),
+        byte[] prefix = StatisticsUtil.getRowKey(tableName, 
PDataType.VARCHAR.toBytes(fam),
                 PDataType.VARCHAR.toBytes(regionName));
         mutations.add(new Delete(prefix, clientTimeStamp - 1));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
new file mode 100644
index 0000000..c48d62f
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtil.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.stat;
+import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataProtocol;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.PDataType;
+import org.apache.phoenix.schema.PhoenixArray;
+import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.MetaDataUtil;
+
+import com.google.common.collect.Lists;
+/**
+ * Simple utility class for managing multiple key parts of the statistic
+ */
+public class StatisticsUtil {
+    private StatisticsUtil() {
+        // private ctor for utility classes
+    }
+
+    /** Number of parts in our complex key */
+    protected static final int NUM_KEY_PARTS = 3;
+
+    public static byte[] getRowKey(byte[] table, byte[] fam, byte[] region) {
+        // always starts with the source table
+        byte[] rowKey = new byte[table.length + fam.length + region.length + 
2];
+        int offset = 0;
+        System.arraycopy(table, 0, rowKey, offset, table.length);
+        offset += table.length;
+        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
+        System.arraycopy(fam, 0, rowKey, offset, fam.length);
+        offset += fam.length;
+        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
+        System.arraycopy(region, 0, rowKey, offset, region.length);
+        return rowKey;
+    }
+    
+    public static byte[] copyRow(KeyValue kv) {
+        return Arrays.copyOfRange(kv.getRowArray(), kv.getRowOffset(), 
kv.getRowOffset() + kv.getRowLength());
+    }
+
+    public static PTableStats readStatistics(HTableInterface statsHTable, 
byte[] tableNameBytes, long clientTimeStamp) throws IOException {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, 
MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
+        s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
+        ResultScanner scanner = statsHTable.getScanner(s);
+        Result result = null;
+        TreeMap<byte[], List<byte[]>> guidePostsPerCf = new TreeMap<byte[], 
List<byte[]>>(Bytes.BYTES_COMPARATOR);
+        while ((result = scanner.next()) != null) {
+            CellScanner cellScanner = result.cellScanner();
+            while (cellScanner.advance()) {
+                Cell current = cellScanner.current();
+                int tableNameLength = tableNameBytes.length + 1;
+                int cfOffset = current.getRowOffset() + tableNameLength;
+                int cfLength = getVarCharLength(current.getRowArray(), 
cfOffset, current.getRowLength() - tableNameLength);
+                ptr.set(current.getRowArray(), cfOffset, cfLength);
+                byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                PhoenixArray array = 
(PhoenixArray)PDataType.VARBINARY_ARRAY.toObject(current.getValueArray(), 
current.getValueOffset(), current
+                        .getValueLength());
+                if (array != null && array.getDimensions() != 0) {
+                    List<byte[]> guidePosts = 
Lists.newArrayListWithExpectedSize(array.getDimensions());                      
  
+                    for (int j = 0; j < array.getDimensions(); j++) {
+                        byte[] gp = array.toBytes(j);
+                        if (gp.length != 0) {
+                            guidePosts.add(gp);
+                        }
+                    }
+                    List<byte[]> gps = guidePostsPerCf.put(cfName, guidePosts);
+                    if (gps != null) { // Add guidepost already there from 
other regions
+                        guidePosts.addAll(gps);
+                    }
+                }
+            }
+        }
+        if (!guidePostsPerCf.isEmpty()) {
+            // Sort guideposts, as the order above will depend on the order we 
traverse
+            // each region's worth of guideposts above.
+            for (List<byte[]> gps : guidePostsPerCf.values()) {
+                Collections.sort(gps, Bytes.BYTES_COMPARATOR);
+            }
+            return new PTableStatsImpl(guidePostsPerCf);
+        }
+        return PTableStatsImpl.NO_STATS;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
deleted file mode 100644
index b45dfbf..0000000
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stat/StatisticsUtils.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.schema.stat;
-import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.TreeMap;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.coprocessor.MetaDataProtocol;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.PDataType;
-import org.apache.phoenix.schema.PhoenixArray;
-import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.MetaDataUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-/**
- * Simple utility class for managing multiple key parts of the statistic
- */
-public class StatisticsUtils {
-    private static final Logger logger = 
LoggerFactory.getLogger(StatisticsUtils.class);
-
-    private StatisticsUtils() {
-        // private ctor for utility classes
-    }
-
-    /** Number of parts in our complex key */
-    protected static final int NUM_KEY_PARTS = 3;
-
-    public static byte[] getRowKey(byte[] table, byte[] fam, byte[] region) {
-        // always starts with the source table
-        byte[] rowKey = new byte[table.length + fam.length + region.length + 
2];
-        int offset = 0;
-        System.arraycopy(table, 0, rowKey, offset, table.length);
-        offset += table.length;
-        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
-        System.arraycopy(fam, 0, rowKey, offset, fam.length);
-        offset += fam.length;
-        rowKey[offset++] = QueryConstants.SEPARATOR_BYTE;
-        System.arraycopy(region, 0, rowKey, offset, region.length);
-        return rowKey;
-    }
-    
-    public static byte[] copyRow(KeyValue kv) {
-        return Arrays.copyOfRange(kv.getRowArray(), kv.getRowOffset(), 
kv.getRowOffset() + kv.getRowLength());
-    }
-
-    public static PTableStats readStatistics(HTableInterface statsHTable, 
byte[] tableNameBytes, long clientTimeStamp) throws IOException {
-        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-        try {
-            Scan s = MetaDataUtil.newTableRowsScan(tableNameBytes, 
MetaDataProtocol.MIN_TABLE_TIMESTAMP, clientTimeStamp);
-            s.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_BYTES);
-            ResultScanner scanner = statsHTable.getScanner(s);
-            Result result = null;
-            TreeMap<byte[], List<byte[]>> guidePostsPerCf = new 
TreeMap<byte[], List<byte[]>>(Bytes.BYTES_COMPARATOR);
-            while ((result = scanner.next()) != null) {
-                CellScanner cellScanner = result.cellScanner();
-                while (cellScanner.advance()) {
-                    Cell current = cellScanner.current();
-                    int tableNameLength = tableNameBytes.length + 1;
-                    int cfOffset = current.getRowOffset() + tableNameLength;
-                    int cfLength = getVarCharLength(current.getRowArray(), 
cfOffset, current.getRowLength() - tableNameLength);
-                    ptr.set(current.getRowArray(), cfOffset, cfLength);
-                    byte[] cfName = ByteUtil.copyKeyBytesIfNecessary(ptr);
-                    PhoenixArray array = 
(PhoenixArray)PDataType.VARBINARY_ARRAY.toObject(current.getValueArray(), 
current.getValueOffset(), current
-                            .getValueLength());
-                    if (array != null && array.getDimensions() != 0) {
-                        List<byte[]> guidePosts = 
Lists.newArrayListWithExpectedSize(array.getDimensions());                      
  
-                        for (int j = 0; j < array.getDimensions(); j++) {
-                            byte[] gp = array.toBytes(j);
-                            if (gp.length != 0) {
-                                guidePosts.add(gp);
-                            }
-                        }
-                        List<byte[]> gps = guidePostsPerCf.put(cfName, 
guidePosts);
-                        if (gps != null) { // Add guidepost already there from 
other regions
-                            guidePosts.addAll(gps);
-                        }
-                    }
-                }
-            }
-            if (!guidePostsPerCf.isEmpty()) {
-                // Sort guideposts, as the order above will depend on the 
order we traverse
-                // each region's worth of guideposts above.
-                for (List<byte[]> gps : guidePostsPerCf.values()) {
-                    Collections.sort(gps, Bytes.BYTES_COMPARATOR);
-                }
-                return new PTableStatsImpl(guidePostsPerCf);
-            }
-        } catch (Exception e) {
-            if (e instanceof org.apache.hadoop.hbase.TableNotFoundException) {
-                logger.warn("Stats table not yet online", e);
-            } else {
-                throw new IOException(e);
-            }
-        } finally {
-            statsHTable.close();
-        }
-        return PTableStatsImpl.NO_STATS;
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 96257f6..653c90d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -599,7 +599,6 @@ public abstract class BaseTest {
         conf.setInt("hbase.hlog.asyncer.number", 2);
         conf.setInt("hbase.assignment.zkevent.workers", 5);
         conf.setInt("hbase.assignment.threads.max", 5);
-        conf.setInt(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, 20);
         return conf;
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/90333d9a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
index 3151588..a7d34da 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/QueryServicesTestImpl.java
@@ -52,7 +52,6 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
     public static final long DEFAULT_MAX_SERVER_METADATA_CACHE_SIZE =  
1024L*1024L*4L; // 4 Mb
     public static final long DEFAULT_MAX_CLIENT_METADATA_CACHE_SIZE =  
1024L*1024L*2L; // 2 Mb
     public static final long DEFAULT_STATS_HISTOGRAM_DEPTH_BYTES = 2000;
-    public static final int DEFAULT_STATS_UPDATE_FREQ_MS = 10000;
     public static final int DEFAULT_MIN_STATS_UPDATE_FREQ_MS = 0;
 
     
@@ -62,7 +61,6 @@ public final class QueryServicesTestImpl extends 
BaseQueryServicesImpl {
     
     private static QueryServicesOptions getDefaultServicesOptions() {
        return withDefaults()
-               .setStatsUpdateFrequencyMs(DEFAULT_STATS_UPDATE_FREQ_MS)
                 .setMinStatsUpdateFrequencyMs(DEFAULT_MIN_STATS_UPDATE_FREQ_MS)
                
.setStatsHistogramDepthBytes(DEFAULT_STATS_HISTOGRAM_DEPTH_BYTES)
                 .setThreadPoolSize(DEFAULT_THREAD_POOL_SIZE)

Reply via email to