HBASE-18826 Use HStore instead of Store in our own code base and remove unnecessary methods in Store interface
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d26b8f8d Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d26b8f8d Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d26b8f8d Branch: refs/heads/branch-2 Commit: d26b8f8dddb19e9d888961a00fa597d7efb9fbd4 Parents: f73a3a6 Author: zhangduo <zhang...@apache.org> Authored: Thu Sep 28 15:24:41 2017 +0800 Committer: zhangduo <zhang...@apache.org> Committed: Thu Sep 28 15:41:56 2017 +0800 ---------------------------------------------------------------------- .../client/ColumnFamilyDescriptorBuilder.java | 5 + .../example/ZooKeeperScanPolicyObserver.java | 2 +- .../hbase/regionserver/CompactionTool.java | 7 +- .../hbase/mapreduce/TestHFileOutputFormat2.java | 6 +- .../regionserver/BusyRegionSplitPolicy.java | 2 +- .../hadoop/hbase/regionserver/CompactSplit.java | 16 +- .../regionserver/CompactedHFilesDischarger.java | 10 +- .../ConstantSizeRegionSplitPolicy.java | 2 +- .../regionserver/FlushLargeStoresPolicy.java | 4 +- .../hadoop/hbase/regionserver/HRegion.java | 34 +-- .../hbase/regionserver/HRegionServer.java | 2 +- .../hadoop/hbase/regionserver/HStore.java | 147 +++++------- ...IncreasingToUpperBoundRegionSplitPolicy.java | 2 +- .../hbase/regionserver/KeyValueScanner.java | 2 +- .../MetricsRegionServerWrapperImpl.java | 28 ++- .../regionserver/MetricsRegionWrapperImpl.java | 31 ++- .../regionserver/NonLazyKeyValueScanner.java | 2 +- .../hadoop/hbase/regionserver/Region.java | 2 +- .../hbase/regionserver/SegmentScanner.java | 2 +- .../apache/hadoop/hbase/regionserver/Store.java | 239 +++---------------- .../hbase/regionserver/StoreFileManager.java | 2 +- .../hbase/regionserver/StoreFileScanner.java | 2 +- .../hadoop/hbase/regionserver/StoreScanner.java | 2 +- .../hadoop/hbase/regionserver/StoreUtils.java | 14 +- .../compactions/CompactionRequest.java | 5 +- .../compactions/DateTieredCompactor.java | 6 +- ...sureAwareCompactionThroughputController.java | 2 +- .../throttle/ThroughputControlUtil.java | 10 +- .../hbase-webapps/regionserver/region.jsp | 2 +- .../org/apache/hadoop/hbase/TestIOFencing.java | 2 +- .../TestZooKeeperTableArchiveClient.java | 15 +- ...estAvoidCellReferencesIntoShippedBlocks.java | 14 +- .../client/TestBlockEvictionFromClient.java | 72 +++--- .../hadoop/hbase/client/TestFromClientSide.java | 34 +-- .../TestRegionObserverScannerOpenHook.java | 3 +- .../io/hfile/TestForceCacheImportantBlocks.java | 7 +- .../io/hfile/TestScannerFromBucketCache.java | 6 +- .../regionserver/DelegatingKeyValueScanner.java | 2 +- .../regionserver/NoOpScanPolicyObserver.java | 15 +- .../hbase/regionserver/TestAtomicOperation.java | 32 +-- .../regionserver/TestCacheOnWriteInSchema.java | 2 +- .../hbase/regionserver/TestCompaction.java | 12 +- .../TestCompactionFileNotFound.java | 2 +- .../regionserver/TestCompoundBloomFilter.java | 9 +- .../hbase/regionserver/TestHMobStore.java | 4 +- .../hadoop/hbase/regionserver/TestHRegion.java | 32 +-- .../regionserver/TestHRegionReplayEvents.java | 109 ++++----- .../hadoop/hbase/regionserver/TestHStore.java | 238 +++++++++++------- .../hbase/regionserver/TestHStoreFile.java | 27 +-- .../hbase/regionserver/TestKeepDeletes.java | 18 +- .../hbase/regionserver/TestMajorCompaction.java | 33 ++- .../hbase/regionserver/TestMinorCompaction.java | 11 +- .../regionserver/TestMobStoreCompaction.java | 2 +- .../regionserver/TestPerColumnFamilyFlush.java | 50 ++-- .../regionserver/TestRegionSplitPolicy.java | 2 +- .../TestWalAndCompactingMemStoreFlush.java | 90 +++---- .../compactions/TestDateTieredCompactor.java | 3 +- .../compactions/TestFIFOCompactionPolicy.java | 108 ++++----- .../TestCompactionWithThroughputController.java | 42 ++-- .../TestFlushWithThroughputController.java | 22 +- .../regionserver/wal/AbstractTestWALReplay.java | 5 +- .../hbase/util/TestCoprocessorScanPolicy.java | 15 +- 62 files changed, 759 insertions(+), 869 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java ---------------------------------------------------------------------- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 5f4d256..14f7381 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -583,6 +583,11 @@ public class ColumnFamilyDescriptorBuilder { return this; } + public ColumnFamilyDescriptorBuilder setValue(final String key, final String value) { + desc.setValue(key, value); + return this; + } + /** * An ModifyableFamilyDescriptor contains information about a column family such as the * number of versions, compression settings, etc. http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java ---------------------------------------------------------------------- diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java index 733a003..80290dd 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java @@ -203,7 +203,7 @@ public class ZooKeeperScanPolicyObserver implements RegionCoprocessor, RegionObs if (data == null) { return null; } - ScanInfo oldSI = store.getScanInfo(); + ScanInfo oldSI = ((HStore) store).getScanInfo(); if (oldSI.getTtl() == Long.MAX_VALUE) { return null; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java ---------------------------------------------------------------------- diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 7912340..a94f50e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -15,9 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER; + import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; @@ -161,12 +162,12 @@ public class CompactionTool extends Configured implements Tool { } do { Optional<CompactionContext> compaction = - store.requestCompaction(Store.PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); + store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null); if (!compaction.isPresent()) { break; } List<HStoreFile> storeFiles = - store.compact(compaction.get(), NoLimitThroughputController.INSTANCE); + store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (keepCompactedFiles && deleteCompacted) { for (HStoreFile storeFile: storeFiles) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java ---------------------------------------------------------------------- diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 0b5a929..5f8c119 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -85,7 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile.Reader; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -1258,7 +1258,7 @@ public class TestHFileOutputFormat2 { public Boolean call() throws Exception { List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAMES[0]); for (HRegion region : regions) { - for (Store store : region.getStores()) { + for (HStore store : region.getStores()) { store.closeAndArchiveCompactedFiles(); } } @@ -1277,7 +1277,7 @@ public class TestHFileOutputFormat2 { public Boolean call() throws Exception { List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAMES[0]); for (HRegion region : regions) { - for (Store store : region.getStores()) { + for (HStore store : region.getStores()) { store.closeAndArchiveCompactedFiles(); } } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java index 9c21274..0b25115 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/BusyRegionSplitPolicy.java @@ -105,7 +105,7 @@ public class BusyRegionSplitPolicy extends IncreasingToUpperBoundRegionSplitPoli return false; } - for (Store store: region.getStores()) { + for (HStore store: region.getStores()) { if (!store.canSplit()) { return false; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index cdeeff7..e193dcb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -18,6 +18,9 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY; +import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER; + import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -35,7 +38,6 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; @@ -45,12 +47,14 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.StealJobQueue; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; /** * Compact region on request and then run split if appropriate @@ -195,7 +199,7 @@ public class CompactSplit implements PropagatingConfigurationObserver { public synchronized boolean requestSplit(final Region r) { // don't split regions that are blocking - if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) { + if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= PRIORITY_USER) { byte[] midKey = ((HRegion)r).checkSplit(); if (midKey != null) { requestSplit(r, midKey); @@ -298,13 +302,13 @@ public class CompactSplit implements PropagatingConfigurationObserver { } public synchronized void requestSystemCompaction(HRegion region, String why) throws IOException { - requestCompactionInternal(region, why, Store.NO_PRIORITY, false, + requestCompactionInternal(region, why, NO_PRIORITY, false, CompactionLifeCycleTracker.DUMMY, null); } public synchronized void requestSystemCompaction(HRegion region, HStore store, String why) throws IOException { - requestCompactionInternal(region, store, why, Store.NO_PRIORITY, false, + requestCompactionInternal(region, store, why, NO_PRIORITY, false, CompactionLifeCycleTracker.DUMMY, null); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java index 021bb56..72f80e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactedHFilesDischarger.java @@ -24,11 +24,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Stoppable; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; @@ -95,12 +92,11 @@ public class CompactedHFilesDischarger extends ScheduledChore { if (LOG.isTraceEnabled()) { LOG.trace("Started compacted hfiles cleaner on " + region.getRegionInfo()); } - for (Store store : region.getStores()) { + for (HStore store : ((HRegion) region).getStores()) { try { if (useExecutor && regionServerServices != null) { CompactedHFilesDischargeHandler handler = new CompactedHFilesDischargeHandler( - (Server) regionServerServices, EventType.RS_COMPACTED_FILES_DISCHARGER, - (HStore) store); + (Server) regionServerServices, EventType.RS_COMPACTED_FILES_DISCHARGER, store); regionServerServices.getExecutorService().submit(handler); } else { // call synchronously if the RegionServerServices are not http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index 4f41830..cea84ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -72,7 +72,7 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { boolean force = region.shouldForceSplit(); boolean foundABigStore = false; - for (Store store : region.getStores()) { + for (HStore store : region.getStores()) { // If any of the stores are unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java index e0c6510..4777e62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java @@ -78,11 +78,11 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy { } protected boolean shouldFlush(HStore store) { - if (store.getSizeOfMemStore().getDataSize() > this.flushSizeLowerBound) { + if (store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) { if (LOG.isDebugEnabled()) { LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + region.getRegionInfo().getEncodedName() + " because memstoreSize=" + - store.getSizeOfMemStore().getDataSize() + " > lower bound=" + store.getMemStoreSize().getDataSize() + " > lower bound=" + this.flushSizeLowerBound); } return true; http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 4fa2c70..9552f43 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1011,13 +1011,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi hasSloppyStores = true; } - long storeMaxSequenceId = store.getMaxSequenceId(); + long storeMaxSequenceId = store.getMaxSequenceId().orElse(0L); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), storeMaxSequenceId); if (maxSeqId == -1 || storeMaxSequenceId > maxSeqId) { maxSeqId = storeMaxSequenceId; } - long maxStoreMemstoreTS = store.getMaxMemstoreTS(); + long maxStoreMemstoreTS = store.getMaxMemstoreTS().orElse(0L); if (maxStoreMemstoreTS > maxMemstoreTS) { maxMemstoreTS = maxStoreMemstoreTS; } @@ -1645,7 +1645,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // close each store in parallel for (HStore store : stores.values()) { - MemstoreSize flushableSize = store.getSizeToFlush(); + MemstoreSize flushableSize = store.getFlushableSize(); if (!(abort || flushableSize.getDataSize() == 0 || writestate.readOnly)) { if (getRegionServerServices() != null) { getRegionServerServices().abort("Assertion failed while closing store " @@ -1717,7 +1717,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } private long getMemstoreHeapSize() { - return stores.values().stream().mapToLong(s -> s.getSizeOfMemStore().getHeapSize()).sum(); + return stores.values().stream().mapToLong(s -> s.getMemStoreSize().getHeapSize()).sum(); } @Override @@ -2320,7 +2320,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } //since we didn't flush in the recent past, flush now if certain conditions //are met. Return true on first such memstore hit. - for (Store s : stores.values()) { + for (HStore s : stores.values()) { if (s.timeOfOldestEdit() < now - modifiedFlushCheckInterval) { // we have an old enough edit in the memstore, flush whyFlush.append(s.toString() + " has an old edit so flush to free WALs"); @@ -2481,7 +2481,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } for (HStore s : storesToFlush) { - MemstoreSize flushableSize = s.getSizeToFlush(); + MemstoreSize flushableSize = s.getFlushableSize(); totalSizeOfFlushableStores.incMemstoreSize(flushableSize); storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(), s.createFlushContext(flushOpSeqId)); committedFiles.put(s.getColumnFamilyDescriptor().getName(), null); // for writing stores to WAL @@ -2529,7 +2529,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (HStore store: storesToFlush) { perCfExtras.append("; ").append(store.getColumnFamilyName()); perCfExtras.append("=") - .append(StringUtils.byteDesc(store.getSizeToFlush().getDataSize())); + .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize())); } } LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + @@ -4836,7 +4836,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private MemstoreSize doDropStoreMemstoreContentsForSeqId(HStore s, long currentSeqId) throws IOException { - MemstoreSize flushableSize = s.getSizeToFlush(); + MemstoreSize flushableSize = s.getFlushableSize(); this.decrMemstoreSize(flushableSize); StoreFlushContext ctx = s.createFlushContext(currentSeqId); ctx.prepare(); @@ -4933,7 +4933,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi continue; } - long storeSeqId = store.getMaxSequenceId(); + long storeSeqId = store.getMaxSequenceId().orElse(0L); List<String> storeFiles = storeDescriptor.getStoreFileList(); try { store.refreshStoreFiles(storeFiles); // replace the files with the new ones @@ -4943,7 +4943,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi + " doesn't exist any more. Skip loading the file(s)", ex); continue; } - if (store.getMaxSequenceId() != storeSeqId) { + if (store.getMaxSequenceId().orElse(0L) != storeSeqId) { // Record latest flush time if we picked up new files lastStoreFlushTimeMap.put(store, EnvironmentEdgeManager.currentTime()); } @@ -4954,7 +4954,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? null : this.prepareFlushResult.storeFlushCtxs.get(family); if (ctx != null) { - MemstoreSize snapshotSize = store.getSizeToFlush(); + MemstoreSize snapshotSize = store.getFlushableSize(); ctx.abort(); this.decrMemstoreSize(snapshotSize); this.prepareFlushResult.storeFlushCtxs.remove(family); @@ -5085,7 +5085,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (store == null) { continue; } - if (store.getSizeOfSnapshot().getDataSize() > 0) { + if (store.getSnapshotSize().getDataSize() > 0) { canDrop = false; break; } @@ -5129,12 +5129,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (HStore store : stores.values()) { // TODO: some stores might see new data from flush, while others do not which // MIGHT break atomic edits across column families. - long maxSeqIdBefore = store.getMaxSequenceId(); + long maxSeqIdBefore = store.getMaxSequenceId().orElse(0L); // refresh the store files. This is similar to observing a region open wal marker. store.refreshStoreFiles(); - long storeSeqId = store.getMaxSequenceId(); + long storeSeqId = store.getMaxSequenceId().orElse(0L); if (storeSeqId < smallestSeqIdInStores) { smallestSeqIdInStores = storeSeqId; } @@ -5148,7 +5148,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi null : this.prepareFlushResult.storeFlushCtxs.get( store.getColumnFamilyDescriptor().getName()); if (ctx != null) { - MemstoreSize snapshotSize = store.getSizeToFlush(); + MemstoreSize snapshotSize = store.getFlushableSize(); ctx.abort(); this.decrMemstoreSize(snapshotSize); this.prepareFlushResult.storeFlushCtxs.remove( @@ -5169,7 +5169,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // advance the mvcc read point so that the new flushed files are visible. // either greater than flush seq number or they were already picked up via flush. for (HStore s : stores.values()) { - mvcc.advanceTo(s.getMaxMemstoreTS()); + mvcc.advanceTo(s.getMaxMemstoreTS().orElse(0L)); } @@ -8074,7 +8074,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (HStore s : stores.values()) { buf.append(s.getColumnFamilyDescriptor().getNameAsString()); buf.append(" size: "); - buf.append(s.getSizeOfMemStore().getDataSize()); + buf.append(s.getMemStoreSize().getDataSize()); buf.append(" "); } buf.append("end-of-stores"); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 62080d7..53f4445 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1787,7 +1787,7 @@ public class HRegionServer extends HasThread implements // Queue a compaction. Will recognize if major is needed. this.instance.compactSplitThread.requestSystemCompaction(hr, s, getName() + " requests compaction"); - } else if (s.isMajorCompaction()) { + } else if (s.shouldPerformMajorCompaction()) { s.triggerMajorCompaction(); if (majorCompactPriority == DEFAULT_PRIORITY || majorCompactPriority > hr.getCompactPriority()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index de41087..8c55c42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.Optional; import java.util.OptionalDouble; +import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; @@ -65,6 +66,8 @@ import org.apache.hadoop.hbase.backup.FailedArchiveException; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; +import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -120,7 +123,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDes * not be called directly but by an HRegion manager. */ @InterfaceAudience.Private -public class HStore implements Store { +public class HStore implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver { public static final String MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class"; public static final String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY = "hbase.server.compactchecker.interval.multiplier"; @@ -396,26 +399,12 @@ public class HStore implements Store { } @Override - @Deprecated - public long getFlushableSize() { - MemstoreSize size = getSizeToFlush(); - return size.getHeapSize(); - } - - @Override - public MemstoreSize getSizeToFlush() { + public MemstoreSize getFlushableSize() { return this.memstore.getFlushableSize(); } @Override - @Deprecated - public long getSnapshotSize() { - MemstoreSize size = getSizeOfSnapshot(); - return size.getHeapSize(); - } - - @Override - public MemstoreSize getSizeOfSnapshot() { + public MemstoreSize getSnapshotSize() { return this.memstore.getSnapshotSize(); } @@ -466,16 +455,13 @@ public class HStore implements Store { return this.family; } - /** - * @return The maximum sequence id in all store files. Used for log replay. - */ @Override - public long getMaxSequenceId() { + public OptionalLong getMaxSequenceId() { return StoreUtils.getMaxSequenceIdInList(this.getStorefiles()); } @Override - public long getMaxMemstoreTS() { + public OptionalLong getMaxMemstoreTS() { return StoreUtils.getMaxMemstoreTSInList(this.getStorefiles()); } @@ -503,7 +489,9 @@ public class HStore implements Store { return new Path(tabledir, new Path(encodedName, Bytes.toString(family))); } - @Override + /** + * @return the data block encoder + */ public HFileDataBlockEncoder getDataBlockEncoder() { return dataBlockEncoder; } @@ -584,20 +572,17 @@ public class HStore implements Store { return results; } - /** - * Checks the underlying store files, and opens the files that have not - * been opened, and removes the store file readers for store files no longer - * available. Mainly used by secondary region replicas to keep up to date with - * the primary region files. - * @throws IOException - */ @Override public void refreshStoreFiles() throws IOException { Collection<StoreFileInfo> newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } - @Override + /** + * Replaces the store files that the store has with the given files. Mainly used by secondary + * region replicas to keep up to date with the primary region files. + * @throws IOException + */ public void refreshStoreFiles(Collection<String> newFiles) throws IOException { List<StoreFileInfo> storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { @@ -658,7 +643,8 @@ public class HStore implements Store { // readers might pick it up. This assumes that the store is not getting any writes (otherwise // in-flight transactions might be made visible) if (!toBeAddedFiles.isEmpty()) { - region.getMVCC().advanceTo(this.getMaxSequenceId()); + // we must have the max sequence id here as we do have several store files + region.getMVCC().advanceTo(this.getMaxSequenceId().getAsLong()); } completeCompaction(toBeRemovedStoreFiles); @@ -878,7 +864,12 @@ public class HStore implements Store { } } - @Override + /** + * Close all the readers We don't need to worry about subsequent requests because the Region holds + * a write lock that will prevent any more reads or writes. + * @return the {@link StoreFile StoreFiles} that were previously being used. + * @throws IOException on failure + */ public ImmutableCollection<HStoreFile> close() throws IOException { this.archiveLock.lock(); this.lock.writeLock().lock(); @@ -1035,13 +1026,6 @@ public class HStore implements Store { return sf; } - @Override - public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, - boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) throws IOException { - return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, - includesTag, false); - } - /** * @param maxKeyCount * @param compression Compression algorithm to use @@ -1050,7 +1034,6 @@ public class HStore implements Store { * @param includesTag - includesTag or not * @return Writer for a new StoreFile in the tmp dir. */ - @Override public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) throws IOException { @@ -1068,10 +1051,9 @@ public class HStore implements Store { */ // TODO : allow the Writer factory to create Writers of ShipperListener type only in case of // compaction - @Override public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, - boolean shouldDropBehind, final TimeRangeTracker trt) throws IOException { + boolean shouldDropBehind, TimeRangeTracker trt) throws IOException { final CacheConfig writerCacheConf; if (isCompaction) { // Don't cache data on write on compactions. @@ -1301,12 +1283,16 @@ public class HStore implements Store { return scanners; } - @Override + /** + * @param o Observer who wants to know about changes in set of Readers + */ public void addChangedReaderObserver(ChangedReadersObserver o) { this.changedReaderObservers.add(o); } - @Override + /** + * @param o Observer no longer interested in changes in set of Readers. + */ public void deleteChangedReaderObserver(ChangedReadersObserver o) { // We don't check if observer present; it may not be (legitimately) this.changedReaderObservers.remove(o); @@ -1359,13 +1345,6 @@ public class HStore implements Store { * @throws IOException * @return Storefile we compacted into or null if we failed or opted out early. */ - @Override - public List<HStoreFile> compact(CompactionContext compaction, - ThroughputController throughputController) throws IOException { - return compact(compaction, throughputController, null); - } - - @Override public List<HStoreFile> compact(CompactionContext compaction, ThroughputController throughputController, User user) throws IOException { assert compaction != null; @@ -1669,7 +1648,7 @@ public class HStore implements Store { } @Override - public boolean isMajorCompaction() throws IOException { + public boolean shouldPerformMajorCompaction() throws IOException { for (HStoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) { // TODO: what are these reader checks all over the place? if (sf.getReader() == null) { @@ -1681,7 +1660,10 @@ public class HStore implements Store { this.storeEngine.getStoreFileManager().getStorefiles()); } - @Override + public Optional<CompactionContext> requestCompaction() throws IOException { + return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null); + } + public Optional<CompactionContext> requestCompaction(int priority, CompactionLifeCycleTracker tracker, User user) throws IOException { // don't even select for compaction if writes are disabled @@ -1804,7 +1786,6 @@ public class HStore implements Store { + "; total size for store is " + TraditionalBinaryPrefix.long2String(storeSize, "", 1)); } - @Override public void cancelRequestedCompaction(CompactionContext compaction) { finishCompactionRequest(compaction.getRequest()); } @@ -1899,7 +1880,9 @@ public class HStore implements Store { } } - @Override + /** + * Determines if Store should be split. + */ public Optional<byte[]> getSplitPoint() { this.lock.readLock().lock(); try { @@ -1931,7 +1914,6 @@ public class HStore implements Store { return storeSize; } - @Override public void triggerMajorCompaction() { this.forceMajor = true; } @@ -1941,7 +1923,14 @@ public class HStore implements Store { // File administration ////////////////////////////////////////////////////////////////////////////// - @Override + /** + * Return a scanner for both the memstore and the HStore files. Assumes we are not in a + * compaction. + * @param scan Scan to apply when scanning the stores + * @param targetCols columns to scan + * @return a scanner over the current key values + * @throws IOException on failure + */ public KeyValueScanner getScanner(Scan scan, final NavigableSet<byte []> targetCols, long readPt) throws IOException { lock.readLock().lock(); @@ -2032,7 +2021,7 @@ public class HStore implements Store { return this.storeEngine.getStoreFileManager().getCompactedFilesCount(); } - private LongStream getStoreFileCreatedTimestampStream() { + private LongStream getStoreFileAgeStream() { return this.storeEngine.getStoreFileManager().getStorefiles().stream().filter(sf -> { if (sf.getReader() == null) { LOG.warn("StoreFile " + sf + " has a null Reader"); @@ -2040,25 +2029,23 @@ public class HStore implements Store { } else { return true; } - }).filter(HStoreFile::isHFile).mapToLong(sf -> sf.getFileInfo().getCreatedTimestamp()); + }).filter(HStoreFile::isHFile).mapToLong(sf -> sf.getFileInfo().getCreatedTimestamp()) + .map(t -> EnvironmentEdgeManager.currentTime() - t); } @Override - public long getMaxStoreFileAge() { - return EnvironmentEdgeManager.currentTime() - - getStoreFileCreatedTimestampStream().min().orElse(Long.MAX_VALUE); + public OptionalLong getMaxStoreFileAge() { + return getStoreFileAgeStream().max(); } @Override - public long getMinStoreFileAge() { - return EnvironmentEdgeManager.currentTime() - - getStoreFileCreatedTimestampStream().max().orElse(0L); + public OptionalLong getMinStoreFileAge() { + return getStoreFileAgeStream().min(); } @Override - public long getAvgStoreFileAge() { - OptionalDouble avg = getStoreFileCreatedTimestampStream().average(); - return avg.isPresent() ? EnvironmentEdgeManager.currentTime() - (long) avg.getAsDouble() : 0L; + public OptionalDouble getAvgStoreFileAge() { + return getStoreFileAgeStream().average(); } @Override @@ -2128,14 +2115,7 @@ public class HStore implements Store { } @Override - @Deprecated - public long getMemStoreSize() { - MemstoreSize size = getSizeOfMemStore(); - return size.getHeapSize(); - } - - @Override - public MemstoreSize getSizeOfMemStore() { + public MemstoreSize getMemStoreSize() { return this.memstore.size(); } @@ -2148,7 +2128,6 @@ public class HStore implements Store { return priority; } - @Override public boolean throttleCompaction(long compactionSize) { return storeEngine.getCompactionPolicy().throttleCompaction(compactionSize); } @@ -2200,7 +2179,6 @@ public class HStore implements Store { } } - @Override public StoreFlushContext createFlushContext(long cacheFlushId) { return new StoreFlusherImpl(cacheFlushId); } @@ -2344,7 +2322,11 @@ public class HStore implements Store { return this.storeEngine.needsCompaction(this.filesCompacting); } - @Override + /** + * Used for tests. + * @return cache configuration for this Store. + */ + @VisibleForTesting public CacheConfig getCacheConfig() { return this.cacheConf; } @@ -2370,7 +2352,6 @@ public class HStore implements Store { return comparator; } - @Override public ScanInfo getScanInfo() { return scanInfo; } @@ -2490,7 +2471,9 @@ public class HStore implements Store { archiveLock.unlock(); } - @Override + /** + * Closes and archives the compacted files under this store + */ public synchronized void closeAndArchiveCompactedFiles() throws IOException { // ensure other threads do not attempt to archive the same files on close() archiveLock.lock(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 17abb77..82a5b32 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -76,7 +76,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); - for (Store store : region.getStores()) { + for (HStore store : region.getStores()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if (!store.canSplit()) { http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index f9018f3..796f7c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -95,7 +95,7 @@ public interface KeyValueScanner extends Shipper, Closeable { * this query, based on TTL * @return true if the scanner should be included in the query */ - boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS); + boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS); // "Lazy scanner" optimizations http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index e30ed8e..a99dcd6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.OptionalDouble; +import java.util.OptionalLong; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -31,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; @@ -44,6 +45,7 @@ import org.apache.hadoop.hbase.wal.WALProvider; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.metrics2.MetricsExecutor; +import org.apache.yetus.audience.InterfaceAudience; /** * Impl for exposing HRegionServer Information through Hadoop's metrics 2 system. @@ -765,22 +767,30 @@ class MetricsRegionServerWrapperImpl tempNumStores += storeList.size(); for (Store store : storeList) { tempNumStoreFiles += store.getStorefilesCount(); - tempMemstoreSize += store.getSizeOfMemStore().getDataSize(); + tempMemstoreSize += store.getMemStoreSize().getDataSize(); tempStoreFileSize += store.getStorefilesSize(); - long storeMaxStoreFileAge = store.getMaxStoreFileAge(); - tempMaxStoreFileAge = (storeMaxStoreFileAge > tempMaxStoreFileAge) ? - storeMaxStoreFileAge : tempMaxStoreFileAge; + OptionalLong storeMaxStoreFileAge = store.getMaxStoreFileAge(); + if (storeMaxStoreFileAge.isPresent() && + storeMaxStoreFileAge.getAsLong() > tempMaxStoreFileAge) { + tempMaxStoreFileAge = storeMaxStoreFileAge.getAsLong(); + } - long storeMinStoreFileAge = store.getMinStoreFileAge(); - tempMinStoreFileAge = (storeMinStoreFileAge < tempMinStoreFileAge) ? - storeMinStoreFileAge : tempMinStoreFileAge; + OptionalLong storeMinStoreFileAge = store.getMinStoreFileAge(); + if (storeMinStoreFileAge.isPresent() && + storeMinStoreFileAge.getAsLong() < tempMinStoreFileAge) { + tempMinStoreFileAge = storeMinStoreFileAge.getAsLong(); + } long storeHFiles = store.getNumHFiles(); - avgAgeNumerator += store.getAvgStoreFileAge() * storeHFiles; numHFiles += storeHFiles; tempNumReferenceFiles += store.getNumReferenceFiles(); + OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); + if (storeAvgStoreFileAge.isPresent()) { + avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles; + } + tempStorefileIndexSize += store.getStorefilesIndexSize(); tempTotalStaticBloomSize += store.getTotalStaticBloomSize(); tempTotalStaticIndexSize += store.getTotalStaticIndexSize(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index dc7d3cb..efdf712 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -21,18 +21,20 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; import java.io.IOException; import java.util.Map; +import java.util.OptionalDouble; +import java.util.OptionalLong; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.metrics2.MetricsExecutor; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable { @@ -227,21 +229,28 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable if (region.stores != null) { for (Store store : region.stores.values()) { tempNumStoreFiles += store.getStorefilesCount(); - tempMemstoreSize += store.getSizeOfMemStore().getDataSize(); + tempMemstoreSize += store.getMemStoreSize().getDataSize(); tempStoreFileSize += store.getStorefilesSize(); - - long storeMaxStoreFileAge = store.getMaxStoreFileAge(); - tempMaxStoreFileAge = (storeMaxStoreFileAge > tempMaxStoreFileAge) ? - storeMaxStoreFileAge : tempMaxStoreFileAge; - - long storeMinStoreFileAge = store.getMinStoreFileAge(); - tempMinStoreFileAge = (storeMinStoreFileAge < tempMinStoreFileAge) ? - storeMinStoreFileAge : tempMinStoreFileAge; + OptionalLong storeMaxStoreFileAge = store.getMaxStoreFileAge(); + if (storeMaxStoreFileAge.isPresent() && + storeMaxStoreFileAge.getAsLong() > tempMaxStoreFileAge) { + tempMaxStoreFileAge = storeMaxStoreFileAge.getAsLong(); + } + + OptionalLong storeMinStoreFileAge = store.getMinStoreFileAge(); + if (storeMinStoreFileAge.isPresent() && + storeMinStoreFileAge.getAsLong() < tempMinStoreFileAge) { + tempMinStoreFileAge = storeMinStoreFileAge.getAsLong(); + } long storeHFiles = store.getNumHFiles(); - avgAgeNumerator += store.getAvgStoreFileAge() * storeHFiles; numHFiles += storeHFiles; tempNumReferenceFiles += store.getNumReferenceFiles(); + + OptionalDouble storeAvgStoreFileAge = store.getAvgStoreFileAge(); + if (storeAvgStoreFileAge.isPresent()) { + avgAgeNumerator += storeAvgStoreFileAge.getAsDouble() * storeHFiles; + } } } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java index 354954c..fe2ce6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java @@ -56,7 +56,7 @@ public abstract class NonLazyKeyValueScanner implements KeyValueScanner { } @Override - public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) { + public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) { // No optimizations implemented by default. return true; } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 4890f0d..073c25f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -749,7 +749,7 @@ public interface Region extends ConfigurationObserver { * Trigger major compaction on all stores in the region. * <p> * Compaction will be performed asynchronously to this call by the RegionServer's - * CompactSplitThread. See also {@link Store#triggerMajorCompaction()} + * CompactSplitThread. * @throws IOException */ void triggerMajorCompaction() throws IOException; http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index 85fbdbe..626d43c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -279,7 +279,7 @@ public class SegmentScanner implements KeyValueScanner { * overridden method */ @Override - public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) { + public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) { return getSegment().shouldSeek(scan.getColumnFamilyTimeRange() .getOrDefault(store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS); } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 6cece0f..9c01301 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -19,27 +19,17 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Collection; -import java.util.List; -import java.util.NavigableSet; -import java.util.Optional; +import java.util.Comparator; +import java.util.OptionalDouble; +import java.util.OptionalLong; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; -import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; -import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; -import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -49,7 +39,7 @@ import org.apache.yetus.audience.InterfaceStability; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving -public interface Store extends HeapSize, StoreConfigInformation, PropagatingConfigurationObserver { +public interface Store { /** * The default priority for user-specified compaction requests. @@ -59,34 +49,13 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf int NO_PRIORITY = Integer.MIN_VALUE; // General Accessors - CellComparator getComparator(); + Comparator<Cell> getComparator(); Collection<? extends StoreFile> getStorefiles(); Collection<? extends StoreFile> getCompactedFiles(); /** - * Close all the readers We don't need to worry about subsequent requests because the Region - * holds a write lock that will prevent any more reads or writes. - * @return the {@link StoreFile StoreFiles} that were previously being used. - * @throws IOException on failure - */ - Collection<? extends StoreFile> close() throws IOException; - - /** - * Return a scanner for both the memstore and the HStore files. Assumes we are not in a - * compaction. - * @param scan Scan to apply when scanning the stores - * @param targetCols columns to scan - * @return a scanner over the current key values - * @throws IOException on failure - */ - KeyValueScanner getScanner(Scan scan, final NavigableSet<byte[]> targetCols, long readPt) - throws IOException; - - ScanInfo getScanInfo(); - - /** * When was the last edit done in the memstore */ long timeOfOldestEdit(); @@ -94,185 +63,65 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf FileSystem getFileSystem(); /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags - ) throws IOException; - - /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @param shouldDropBehind should the writer drop caches behind writes - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags, - boolean shouldDropBehind - ) throws IOException; - - /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @param shouldDropBehind should the writer drop caches behind writes - * @param trt Ready-made timetracker to use. - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags, - boolean shouldDropBehind, - final TimeRangeTracker trt - ) throws IOException; - - // Compaction oriented methods - - boolean throttleCompaction(long compactionSize); - - /** * getter for CompactionProgress object * @return CompactionProgress object; can be null */ CompactionProgress getCompactionProgress(); - default Optional<CompactionContext> requestCompaction() throws IOException { - return requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null); - } - - Optional<CompactionContext> requestCompaction(int priority, CompactionLifeCycleTracker tracker, - User user) throws IOException; - - void cancelRequestedCompaction(CompactionContext compaction); - - /** - * @deprecated see compact(CompactionContext, ThroughputController, User) - */ - @Deprecated - List<? extends StoreFile> compact(CompactionContext compaction, - ThroughputController throughputController) throws IOException; - - List<? extends StoreFile> compact(CompactionContext compaction, - ThroughputController throughputController, User user) throws IOException; - /** + * Tests whether we should run a major compaction. For example, if the configured major compaction + * interval is reached. * @return true if we should run a major compaction. */ - boolean isMajorCompaction() throws IOException; - - void triggerMajorCompaction(); + boolean shouldPerformMajorCompaction() throws IOException; /** * See if there's too much store files in this store - * @return true if number of store files is greater than the number defined in minFilesToCompact + * @return <code>true</code> if number of store files is greater than the number defined in + * minFilesToCompact */ boolean needsCompaction(); int getCompactPriority(); - StoreFlushContext createFlushContext(long cacheFlushId); - - // Split oriented methods - - boolean canSplit(); - /** - * Determines if Store should be split. + * Returns whether this store is splittable, i.e., no reference file in this store. */ - Optional<byte[]> getSplitPoint(); - - // General accessors into the state of the store - // TODO abstract some of this out into a metrics class + boolean canSplit(); /** - * @return <tt>true</tt> if the store has any underlying reference files to older HFiles + * @return <code>true</code> if the store has any underlying reference files to older HFiles */ boolean hasReferences(); /** - * @return The size of this store's memstore, in bytes - * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfMemStore()} instead. - * <p> - * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which - * is in off heap MSLAB area. - */ - @Deprecated - long getMemStoreSize(); - - /** * @return The size of this store's memstore. */ - MemstoreSize getSizeOfMemStore(); + MemstoreSize getMemStoreSize(); /** * @return The amount of memory we could flush from this memstore; usually this is equal to * {@link #getMemStoreSize()} unless we are carrying snapshots and then it will be the size of * outstanding snapshots. - * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeToFlush()} instead. - * <p> - * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which - * is in off heap MSLAB area. */ - @Deprecated - long getFlushableSize(); + MemstoreSize getFlushableSize(); /** - * @return The amount of memory we could flush from this memstore; usually this is equal to - * {@link #getSizeOfMemStore()} unless we are carrying snapshots and then it will be the size of - * outstanding snapshots. - */ - MemstoreSize getSizeToFlush(); - - /** - * Returns the memstore snapshot size * @return size of the memstore snapshot - * @deprecated Since 2.0 and will be removed in 3.0. Use {@link #getSizeOfSnapshot()} instead. - * <p> - * Note: When using off heap MSLAB feature, this will not account the cell data bytes size which - * is in off heap MSLAB area. */ - @Deprecated - long getSnapshotSize(); - - /** - * @return size of the memstore snapshot - */ - MemstoreSize getSizeOfSnapshot(); + MemstoreSize getSnapshotSize(); ColumnFamilyDescriptor getColumnFamilyDescriptor(); /** * @return The maximum sequence id in all store files. */ - long getMaxSequenceId(); + OptionalLong getMaxSequenceId(); /** * @return The maximum memstoreTS in all store files. */ - long getMaxMemstoreTS(); - - /** - * @return the data block encoder - */ - HFileDataBlockEncoder getDataBlockEncoder(); + OptionalLong getMaxMemstoreTS(); /** @return aggregate size of all HStores used in the last compaction */ long getLastCompactSize(); @@ -293,17 +142,17 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf /** * @return Max age of store files in this store */ - long getMaxStoreFileAge(); + OptionalLong getMaxStoreFileAge(); /** * @return Min age of store files in this store */ - long getMinStoreFileAge(); + OptionalLong getMinStoreFileAge(); /** - * @return Average age of store files in this store, 0 if no store files + * @return Average age of store files in this store */ - long getAvgStoreFileAge(); + OptionalDouble getAvgStoreFileAge(); /** * @return Number of reference files in this store @@ -350,18 +199,10 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ long getTotalStaticBloomSize(); - // Test-helper methods - - /** - * Used for tests. - * @return cache configuration for this Store. - */ - CacheConfig getCacheConfig(); - /** * @return the parent region info hosting this store */ - HRegionInfo getRegionInfo(); + RegionInfo getRegionInfo(); RegionCoprocessorHost getCoprocessorHost(); @@ -413,26 +254,15 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ long getMajorCompactedCellsSize(); - /* - * @param o Observer who wants to know about changes in set of Readers - */ - void addChangedReaderObserver(ChangedReadersObserver o); - - /* - * @param o Observer no longer interested in changes in set of Readers. - */ - void deleteChangedReaderObserver(ChangedReadersObserver o); - /** * @return Whether this store has too many store files. */ boolean hasTooManyStoreFiles(); /** - * Checks the underlying store files, and opens the files that have not - * been opened, and removes the store file readers for store files no longer - * available. Mainly used by secondary region replicas to keep up to date with - * the primary region files. + * Checks the underlying store files, and opens the files that have not been opened, and removes + * the store file readers for store files no longer available. Mainly used by secondary region + * replicas to keep up to date with the primary region files. * @throws IOException */ void refreshStoreFiles() throws IOException; @@ -454,22 +284,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ double getCompactionPressure(); - /** - * Replaces the store files that the store has with the given files. Mainly used by - * secondary region replicas to keep up to date with - * the primary region files. - * @throws IOException - */ - void refreshStoreFiles(Collection<String> newFiles) throws IOException; - boolean isPrimaryReplicaStore(); /** - * Closes and archives the compacted files under this store - */ - void closeAndArchiveCompactedFiles() throws IOException; - - /** * @return true if the memstore may need some extra memory space */ boolean isSloppyMemstore(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java index 67ef4de..5911825 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -163,7 +163,7 @@ public interface StoreFileManager { /** * @return the compaction pressure used for compaction throughput tuning. - * @see Store#getCompactionPressure() + * @see HStore#getCompactionPressure() */ double getCompactionPressure(); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index f21b30b..f52eb39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -474,7 +474,7 @@ public class StoreFileScanner implements KeyValueScanner { } @Override - public boolean shouldUseScanner(Scan scan, Store store, long oldestUnexpiredTS) { + public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) { // if the file has no entries, no need to validate or create a scanner. byte[] cf = store.getColumnFamilyDescriptor().getName(); TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index dd68d28..588211c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -418,7 +418,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * Will be overridden by testcase so declared as protected. */ @VisibleForTesting - protected List<KeyValueScanner> selectScannersFrom(Store store, + protected List<KeyValueScanner> selectScannersFrom(HStore store, List<? extends KeyValueScanner> allScanners) { boolean memOnly; boolean filesOnly; http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0abaffd..354b056 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Optional; import java.util.OptionalInt; +import java.util.OptionalLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -79,22 +80,17 @@ public class StoreUtils { * Return the largest memstoreTS found across all storefiles in the given list. Store files that * were created by a mapreduce bulk load are ignored, as they do not correspond to any specific * put operation, and thus do not have a memstoreTS associated with them. - * @return 0 if no non-bulk-load files are provided or, this is Store that does not yet have any - * store files. */ - public static long getMaxMemstoreTSInList(Collection<HStoreFile> sfs) { + public static OptionalLong getMaxMemstoreTSInList(Collection<HStoreFile> sfs) { return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemstoreTS) - .max().orElse(0L); + .max(); } /** * Return the highest sequence ID found across all storefiles in the given list. - * @param sfs - * @return 0 if no non-bulk-load files are provided or, this is Store that does not yet have any - * store files. */ - public static long getMaxSequenceIdInList(Collection<HStoreFile> sfs) { - return sfs.stream().mapToLong(HStoreFile::getMaxSequenceId).max().orElse(0L); + public static OptionalLong getMaxSequenceIdInList(Collection<HStoreFile> sfs) { + return sfs.stream().mapToLong(HStoreFile::getMaxSequenceId).max(); } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index da35bfc..1e2f18a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -18,11 +18,12 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; +import static org.apache.hadoop.hbase.regionserver.Store.NO_PRIORITY; + import java.util.Collection; import java.util.stream.Collectors; import org.apache.hadoop.hbase.regionserver.HStoreFile; -import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.yetus.audience.InterfaceAudience; @@ -39,7 +40,7 @@ public class CompactionRequest { private boolean isOffPeak = false; private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR } private DisplayCompactionType isMajor = DisplayCompactionType.MINOR; - private int priority = Store.NO_PRIORITY; + private int priority = NO_PRIORITY; private Collection<HStoreFile> filesToCompact; // CompactRequest object creation time. http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java index ba0caa4..d0beed0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.List; +import java.util.OptionalLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,7 +48,10 @@ public class DateTieredCompactor extends AbstractMultiOutputCompactor<DateTiered private boolean needEmptyFile(CompactionRequest request) { // if we are going to compact the last N files, then we need to emit an empty file to retain the // maxSeqId if we haven't written out anything. - return StoreUtils.getMaxSequenceIdInList(request.getFiles()) == store.getMaxSequenceId(); + OptionalLong maxSeqId = StoreUtils.getMaxSequenceIdInList(request.getFiles()); + OptionalLong storeMaxSeqId = store.getMaxSequenceId(); + return maxSeqId.isPresent() && storeMaxSeqId.isPresent() && + maxSeqId.getAsLong() == storeMaxSeqId.getAsLong(); } public List<Path> compact(final CompactionRequest request, final List<Long> lowerBoundaries, http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java index 803ca54..c56b474 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareCompactionThroughputController.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; * {@value #HBASE_HSTORE_COMPACTION_MAX_THROUGHPUT_HIGHER_BOUND}, using the formula "lower + * (higer - lower) * compactionPressure", where compactionPressure is in range [0.0, 1.0]</li> * </ul> - * @see org.apache.hadoop.hbase.regionserver.Store#getCompactionPressure() + * @see org.apache.hadoop.hbase.regionserver.HStore#getCompactionPressure() */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class PressureAwareCompactionThroughputController extends PressureAwareThroughputController { http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java index 21b729a..764c065 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.regionserver.throttle; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.Store; /** * Helper methods for throttling @@ -40,7 +40,7 @@ public final class ThroughputControlUtil { * @param opName Name of the IO operation, e.g. "flush", "compaction", etc. * @return The name for throttling */ - public static String getNameForThrottling(final Store store, final String opName) { + public static String getNameForThrottling(HStore store, String opName) { int counter; for (;;) { counter = NAME_COUNTER.get(); @@ -49,8 +49,8 @@ public final class ThroughputControlUtil { break; } } - return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER - + store.getColumnFamilyDescriptor().getNameAsString() - + NAME_DELIMITER + opName + NAME_DELIMITER + counter; + return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER + + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + + NAME_DELIMITER + counter; } } http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp ---------------------------------------------------------------------- diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp index 5c84e98..097b8a9 100644 --- a/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/region.jsp @@ -99,7 +99,7 @@ <h3>Column Family: <%= cf %></h2> - <h4>Memstore size (MB): <%= (int) (store.getMemStoreSize() / 1024 / 1024) %></h3> + <h4>Memstore size (MB): <%= (int) (store.getMemStoreSize().getHeapSize() / 1024 / 1024) %></h3> <h4>Store Files</h3> http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 6e7bf7d..c9881c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -145,7 +145,7 @@ public class TestIOFencing { public int countStoreFiles() { int count = 0; - for (Store store : stores.values()) { + for (HStore store : stores.values()) { count += store.getStorefilesCount(); } return count; http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index f7bedef..3e8d42e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -36,19 +36,20 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; -import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -176,7 +177,7 @@ public class TestZooKeeperTableArchiveClient { final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0); // create the region - HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM); HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); List<Region> regions = new ArrayList<>(); regions.add(region); @@ -229,7 +230,7 @@ public class TestZooKeeperTableArchiveClient { List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner); final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0); // create the region - HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); + ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM); HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); List<Region> regions = new ArrayList<>(); regions.add(region); @@ -239,7 +240,7 @@ public class TestZooKeeperTableArchiveClient { loadFlushAndCompact(region, TEST_FAM); compactionCleaner.chore(); // create the another table that we don't archive - hcd = new HColumnDescriptor(TEST_FAM); + hcd = ColumnFamilyDescriptorBuilder.of(TEST_FAM); HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd); regions = new ArrayList<>(); regions.add(otherRegion); @@ -400,12 +401,12 @@ public class TestZooKeeperTableArchiveClient { return allFiles; } - private void loadFlushAndCompact(Region region, byte[] family) throws IOException { + private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException { // create two hfiles in the region createHFileInRegion(region, family); createHFileInRegion(region, family); - Store s = region.getStore(family); + HStore s = region.getStore(family); int count = s.getStorefilesCount(); assertTrue("Don't have the expected store files, wanted >= 2 store files, but was:" + count, count >= 2); http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java index 30fe731..2d99e00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAvoidCellReferencesIntoShippedBlocks.java @@ -46,10 +46,10 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CachedBlock; +import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; -import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; @@ -134,9 +134,9 @@ public class TestAvoidCellReferencesIntoShippedBlocks { // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); - Region region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); - Store store = region.getStores().iterator().next(); + HRegion region = + (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); + HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); @@ -313,9 +313,9 @@ public class TestAvoidCellReferencesIntoShippedBlocks { // get the block cache and region RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName); String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); - Region region = - TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName); - Store store = region.getStores().iterator().next(); + HRegion region = (HRegion) TEST_UTIL.getRSForFirstRegionInTable(tableName) + .getRegion(regionName); + HStore store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true);