Repository: phoenix Updated Branches: refs/heads/5.x-HBase-2.0 10a0778eb -> e1238aa3c
PHOENIX-4376 Fix all compilation error for package org.apache.phoenix.schema.stats and org.apache.phoenix.coprocessor Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e1238aa3 Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e1238aa3 Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e1238aa3 Branch: refs/heads/5.x-HBase-2.0 Commit: e1238aa3cb8cdf962b70ca8f1ae197285c558891 Parents: 10a0778 Author: Ankit Singhal <[email protected]> Authored: Wed Nov 15 11:28:57 2017 +0530 Committer: Ankit Singhal <[email protected]> Committed: Wed Nov 15 11:28:57 2017 +0530 ---------------------------------------------------------------------- .../hadoop/hbase/regionserver/ScanInfoUtil.java | 35 ------------------- .../coprocessor/BaseScannerRegionObserver.java | 26 ++------------ .../coprocessor/DelegateRegionObserver.java | 32 ++++++++++------- .../coprocessor/DelegateRegionScanner.java | 5 --- .../coprocessor/HashJoinRegionScanner.java | 4 +-- .../coprocessor/MetaDataEndpointImpl.java | 4 +-- .../UngroupedAggregateRegionObserver.java | 36 +++++++++++--------- .../stats/DefaultStatisticsCollector.java | 6 ++-- .../stats/StatisticsCollectionRunTracker.java | 20 +++++------ .../phoenix/schema/stats/StatisticsUtil.java | 4 +-- 10 files changed, 60 insertions(+), 112 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java deleted file mode 100644 index 9885c78..0000000 --- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfoUtil.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.hadoop.hbase.KeepDeletedCells; - -public class ScanInfoUtil { - private ScanInfoUtil() { - } - - public static boolean isKeepDeletedCells(ScanInfo scanInfo) { - return scanInfo.getKeepDeletedCells() != KeepDeletedCells.FALSE; - } - - public static ScanInfo cloneScanInfoWithKeepDeletedCells(ScanInfo scanInfo) { - return new ScanInfo(scanInfo.getConfiguration(), scanInfo.getFamily(), Math.max(scanInfo.getMinVersions(), 1), - scanInfo.getMaxVersions(), scanInfo.getTtl(), KeepDeletedCells.TRUE, - scanInfo.getTimeToPurgeDeletes(), scanInfo.getComparator()); - } -} http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java index 7c6df8f..ee34a8a 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java @@ -19,11 +19,9 @@ package org.apache.phoenix.coprocessor; import java.io.IOException; import java.util.List; -import java.util.NavigableSet; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -31,15 +29,10 @@ import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; -import org.apache.hadoop.hbase.regionserver.ScanInfo; -import org.apache.hadoop.hbase.regionserver.ScanInfoUtil; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.ScannerContextUtil; -import org.apache.hadoop.hbase.regionserver.Store; -import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.htrace.Span; import org.apache.htrace.Trace; @@ -54,7 +47,6 @@ import org.apache.phoenix.schema.types.PUnsignedTinyint; import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.ScanUtil; import org.apache.phoenix.util.ServerUtil; -import org.apache.phoenix.util.TransactionUtil; abstract public class BaseScannerRegionObserver implements RegionObserver { @@ -187,8 +179,8 @@ abstract public class BaseScannerRegionObserver implements RegionObserver { } @Override - public RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, - final Scan scan, final RegionScanner s) throws IOException { + public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + Scan scan) throws IOException { byte[] txnScn = scan.getAttribute(TX_SCN); if (txnScn!=null) { TimeRange timeRange = scan.getTimeRange(); @@ -208,7 +200,6 @@ abstract public class BaseScannerRegionObserver implements RegionObserver { } this.encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan); this.useNewValueColumnQualifier = EncodedColumnsUtil.useNewValueColumnQualifier(scan); - return s; } private class RegionScannerHolder extends DelegateRegionScanner { @@ -352,17 +343,4 @@ abstract public class BaseScannerRegionObserver implements RegionObserver { dataRegion, indexMaintainer, null, viewConstants, null, null, projector, ptr, useQualiferAsListIndex); } - @Override - public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c, - final Store store, final Scan scan, final NavigableSet<byte[]> targetCols, - final KeyValueScanner s) throws IOException { - - if (scan.isRaw() || ScanInfoUtil.isKeepDeletedCells(store.getScanInfo()) || scan.getTimeRange().getMax() == HConstants.LATEST_TIMESTAMP || TransactionUtil.isTransactionalTimestamp(scan.getTimeRange().getMax())) { - return s; - } - - ScanInfo scanInfo = ScanInfoUtil.cloneScanInfoWithKeepDeletedCells(store.getScanInfo()); - return new StoreScanner(store, scanInfo, scan, targetCols, - c.getEnvironment().getRegion().getReadpoint(scan.getIsolationLevel())); - } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java index 34eee78..8fcd68d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionObserver.java @@ -74,25 +74,31 @@ public class DelegateRegionObserver implements RegionObserver { @Override - public void preFlush(ObserverContext<RegionCoprocessorEnvironment> c) throws IOException { - delegate.preFlush(c); + public void preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + delegate.preFlush(c, tracker); + ; } @Override - public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, - InternalScanner scanner) throws IOException { - return delegate.preFlush(c, store, scanner); + public InternalScanner preFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + Store store, InternalScanner scanner, org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) + throws IOException { + return delegate.preFlush(c, store, scanner, tracker); } @Override - public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c) throws IOException { - delegate.postFlush(c); + public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) throws IOException { + delegate.postFlush(c, tracker); } + @Override - public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, - StoreFile resultFile) throws IOException { - delegate.postFlush(c, store, resultFile); + public void postFlush(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + Store store, StoreFile resultFile, org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker tracker) + throws IOException { + delegate.postFlush(c, store, resultFile, tracker); } @@ -232,9 +238,9 @@ public class DelegateRegionObserver implements RegionObserver { } @Override - public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, - RegionScanner s) throws IOException { - return delegate.preScannerOpen(c, scan, s); + public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + Scan scan) throws IOException { + delegate.preScannerOpen(c, scan); } @Override http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java index 21a8eef..1955708 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/DelegateRegionScanner.java @@ -82,11 +82,6 @@ public class DelegateRegionScanner implements RegionScanner { return delegate.getBatch(); } - @Override - public void shipped() throws IOException { - delegate.shipped(); - - } @Override public RegionInfo getRegionInfo() { http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java index 59f844d..c24bb74 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/HashJoinRegionScanner.java @@ -25,7 +25,7 @@ import java.util.Queue; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.RegionScanner; @@ -248,7 +248,7 @@ public class HashJoinRegionScanner implements RegionScanner { } @Override - public HRegionInfo getRegionInfo() { + public RegionInfo getRegionInfo() { return scanner.getRegionInfo(); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java index a87e961..63719cf 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java @@ -1728,7 +1728,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso // Table hTable = env.getTable(systemCatalogTableName); // These deprecated calls work around the issue try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, - region.getTableDesc().getTableName())) { + region.getTableDescriptor().getTableName())) { boolean allViewsInCurrentRegion = true; int numOfChildViews = 0; List<ViewInfo> viewInfoList = Lists.newArrayList(); @@ -1777,7 +1777,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso // Table hTable = env.getTable(systemCatalogTableName); // These deprecated calls work around the issue try (Table hTable = ServerUtil.getHTableForCoprocessorScan(env, - region.getTableDesc().getTableName())) { + region.getTableDescriptor().getTableName())) { boolean allViewsInCurrentRegion = true; int numOfChildViews = 0; List<ViewInfo> viewInfoList = Lists.newArrayList(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java index 82bfc07..332ecf2 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java @@ -47,20 +47,21 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -304,18 +305,17 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver } @Override - public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> e, Scan scan, RegionScanner s) - throws IOException { - s = super.preScannerOpen(e, scan, s); + public void preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<RegionCoprocessorEnvironment> c, + Scan scan) throws IOException { + if (ScanUtil.isAnalyzeTable(scan)) { // We are setting the start row and stop row such that it covers the entire region. As part // of Phonenix-1263 we are storing the guideposts against the physical table rather than // individual tenant specific tables. - scan.setStartRow(HConstants.EMPTY_START_ROW); - scan.setStopRow(HConstants.EMPTY_END_ROW); + scan.withStartRow(HConstants.EMPTY_START_ROW); + scan.withStopRow(HConstants.EMPTY_END_ROW); scan.setFilter(null); } - return s; } public static class MutationList extends ArrayList<Mutation> { @@ -831,7 +831,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver private void checkForLocalIndexColumnFamilies(Region region, List<IndexMaintainer> indexMaintainers) throws IOException { - HTableDescriptor tableDesc = region.getTableDesc(); + TableDescriptor tableDesc = region.getTableDescriptor(); String schemaName = tableDesc.getTableName().getNamespaceAsString() .equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR) ? SchemaUtil @@ -843,7 +843,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver if(coveredColumns.isEmpty()) { byte[] localIndexCf = indexMaintainer.getEmptyKeyValueFamily().get(); // When covered columns empty we store index data in default column family so check for it. - if (tableDesc.getFamily(localIndexCf) == null) { + if (tableDesc.getColumnFamily(localIndexCf) == null) { ServerUtil.throwIOException("Column Family Not Found", new ColumnFamilyNotFoundException(schemaName, tableName, Bytes .toString(localIndexCf))); @@ -851,7 +851,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver } for (ColumnReference reference : coveredColumns) { byte[] cf = IndexUtil.getLocalIndexColumnFamily(reference.getFamily()); - HColumnDescriptor family = region.getTableDesc().getFamily(cf); + ColumnFamilyDescriptor family = region.getTableDescriptor().getColumnFamily(cf); if (family == null) { ServerUtil.throwIOException("Column Family Not Found", new ColumnFamilyNotFoundException(schemaName, tableName, Bytes.toString(cf))); @@ -882,7 +882,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver //if we're writing to the same table, but the PK can change, that means that some //mutations might be in our current region, and others in a different one. if (areMutationsInSameTable && isPKChanging) { - HRegionInfo regionInfo = region.getRegionInfo(); + RegionInfo regionInfo = region.getRegionInfo(); for (Mutation mutation : mutations){ if (regionInfo.containsRow(mutation.getRow())){ localRegionMutations.add(mutation); @@ -898,8 +898,8 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver } private boolean areMutationsInSameTable(Table targetHTable, Region region) { - return (targetHTable == null || Bytes.compareTo(targetHTable.getName(), - region.getTableDesc().getTableName().getName()) == 0); + return (targetHTable == null || Bytes.compareTo(targetHTable.getName().getName(), + region.getTableDescriptor().getTableName().getName()) == 0); } @Override @@ -1101,7 +1101,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver RegionScanner scanner = new BaseRegionScanner(innerScanner) { @Override - public HRegionInfo getRegionInfo() { + public RegionInfo getRegionInfo() { return region.getRegionInfo(); } @@ -1159,7 +1159,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length); RegionScanner scanner = new BaseRegionScanner(innerScanner) { @Override - public HRegionInfo getRegionInfo() { + public RegionInfo getRegionInfo() { return region.getRegionInfo(); } @@ -1325,6 +1325,10 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver } } + + /* + * TODO: use waitForFlushes PHOENIX-4352 + */ @Override public void preSplit(ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow) throws IOException { http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java index c14759f..42f8b91 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java @@ -135,7 +135,7 @@ class DefaultStatisticsCollector implements StatisticsCollector { guidepostWidth = PLong.INSTANCE.getCodec().decodeInt(guidePostWidthBytes, 0, SortOrder.getDefault()); } this.guidePostDepth = StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth, - env.getRegion().getTableDesc()); + env.getRegion().getTableDescriptor()); } else { long guidepostWidth = -1; Table htable = null; @@ -203,7 +203,7 @@ class DefaultStatisticsCollector implements StatisticsCollector { config.getLong( QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_STATS_GUIDEPOST_WIDTH_BYTES), - env.getRegion().getTableDesc()); + env.getRegion().getTableDescriptor()); } } } @@ -353,7 +353,7 @@ class DefaultStatisticsCollector implements StatisticsCollector { if (logger.isDebugEnabled()) { logger.debug("Compaction scanner created for stats"); } - ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getFamily().getName()); + ImmutableBytesPtr cfKey = new ImmutableBytesPtr(store.getColumnFamilyDescriptor().getName()); // Potentially perform a cross region server get in order to use the correct guide posts // width for the table being compacted. init(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java index 560fc0a..1a928db 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsCollectionRunTracker.java @@ -26,7 +26,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.query.QueryServicesOptions; @@ -38,10 +38,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; */ public class StatisticsCollectionRunTracker { private static volatile StatisticsCollectionRunTracker INSTANCE; - private final Set<HRegionInfo> updateStatsRegions = Collections - .newSetFromMap(new ConcurrentHashMap<HRegionInfo, Boolean>()); - private final Set<HRegionInfo> compactingRegions = Collections - .newSetFromMap(new ConcurrentHashMap<HRegionInfo, Boolean>()); + private final Set<RegionInfo> updateStatsRegions = Collections + .newSetFromMap(new ConcurrentHashMap<RegionInfo, Boolean>()); + private final Set<RegionInfo> compactingRegions = Collections + .newSetFromMap(new ConcurrentHashMap<RegionInfo, Boolean>()); private final ExecutorService executor; // Constants added for testing purposes @@ -77,7 +77,7 @@ public class StatisticsCollectionRunTracker { * @return true if the region wasn't already marked for stats collection via compaction, false * otherwise. */ - public boolean addCompactingRegion(HRegionInfo regionInfo) { + public boolean addCompactingRegion(RegionInfo regionInfo) { return compactingRegions.add(regionInfo); } @@ -86,7 +86,7 @@ public class StatisticsCollectionRunTracker { * major compaction. * @return true if the region was marked for stats collection via compaction, false otherwise. */ - public boolean removeCompactingRegion(HRegionInfo regionInfo) { + public boolean removeCompactingRegion(RegionInfo regionInfo) { return compactingRegions.remove(regionInfo); } @@ -95,7 +95,7 @@ public class StatisticsCollectionRunTracker { * @return true if stats are being collected for the region via major compaction, false * otherwise. */ - public boolean areStatsBeingCollectedOnCompaction(HRegionInfo regionInfo) { + public boolean areStatsBeingCollectedOnCompaction(RegionInfo regionInfo) { return compactingRegions.contains(regionInfo); } @@ -103,7 +103,7 @@ public class StatisticsCollectionRunTracker { * @param regionInfo for the region to run UPDATE STATISTICS command on. * @return true if UPDATE STATISTICS wasn't already running on the region, false otherwise. */ - public boolean addUpdateStatsCommandRegion(HRegionInfo regionInfo) { + public boolean addUpdateStatsCommandRegion(RegionInfo regionInfo) { return updateStatsRegions.add(regionInfo); } @@ -111,7 +111,7 @@ public class StatisticsCollectionRunTracker { * @param regionInfo for the region to mark as not running UPDATE STATISTICS command on. * @return true if UPDATE STATISTICS was running on the region, false otherwise. */ - public boolean removeUpdateStatsCommandRegion(HRegionInfo regionInfo) { + public boolean removeUpdateStatsCommandRegion(RegionInfo regionInfo) { return updateStatsRegions.remove(regionInfo); } http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1238aa3/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java index 71b01ae..7f1b4c2 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsUtil.java @@ -24,12 +24,12 @@ import java.util.Set; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.phoenix.coprocessor.MetaDataProtocol; @@ -190,7 +190,7 @@ public class StatisticsUtil { return current == null ? GuidePostsInfo.NO_GUIDEPOST : guidePostsInfoBuilder.isEmpty() ? emptyGuidePost : guidePostsInfoBuilder.build(); } - public static long getGuidePostDepth(int guidepostPerRegion, long guidepostWidth, HTableDescriptor tableDesc) { + public static long getGuidePostDepth(int guidepostPerRegion, long guidepostWidth, TableDescriptor tableDesc) { if (guidepostPerRegion > 0) { long maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE; if (tableDesc != null) {
