HBASE-18825 Use HStoreFile instead of StoreFile in our own code base and remove 
unnecessary methods in StoreFile interface


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a5f84430
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a5f84430
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a5f84430

Branch: refs/heads/master
Commit: a5f84430a305db6a1eddd45f82d19babf43a8d01
Parents: 1540483
Author: zhangduo <zhang...@apache.org>
Authored: Sun Sep 24 19:22:16 2017 +0800
Committer: zhangduo <zhang...@apache.org>
Committed: Mon Sep 25 09:35:39 2017 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/backup/util/RestoreTool.java   |    4 +-
 .../example/ZooKeeperScanPolicyObserver.java    |   68 +-
 .../hbase/mapreduce/HFileOutputFormat2.java     |   30 +-
 .../hbase/regionserver/CompactionTool.java      |    6 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java |    4 +-
 .../hadoop/hbase/backup/HFileArchiver.java      |   30 +-
 .../hbase/coprocessor/RegionObserver.java       |    4 +-
 .../hadoop/hbase/io/HalfStoreFileReader.java    |   23 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |   11 +-
 .../hbase/io/hfile/HFilePrettyPrinter.java      |    2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |   36 +-
 .../assignment/SplitTableRegionProcedure.java   |   31 +-
 .../master/balancer/StochasticLoadBalancer.java |    5 +-
 .../apache/hadoop/hbase/mob/CachedMobFile.java  |    7 +-
 .../hbase/mob/DefaultMobStoreCompactor.java     |    2 +-
 .../hbase/mob/DefaultMobStoreFlusher.java       |    9 +-
 .../org/apache/hadoop/hbase/mob/MobFile.java    |   15 +-
 .../apache/hadoop/hbase/mob/MobStoreEngine.java |    8 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |   11 +-
 .../PartitionedMobCompactionRequest.java        |   13 +-
 .../compactions/PartitionedMobCompactor.java    |   58 +-
 .../regionserver/ChangedReadersObserver.java    |    4 +-
 .../hbase/regionserver/CompactingMemStore.java  |    4 +-
 .../regionserver/DateTieredStoreEngine.java     |    8 +-
 .../hbase/regionserver/DefaultStoreEngine.java  |   20 +-
 .../regionserver/DefaultStoreFileManager.java   |  155 +-
 .../hbase/regionserver/DefaultStoreFlusher.java |    2 +-
 .../hadoop/hbase/regionserver/HMobStore.java    |    8 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   90 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   44 +-
 .../hbase/regionserver/HRegionServer.java       |   78 +-
 .../hadoop/hbase/regionserver/HStore.java       |  524 +++---
 .../hadoop/hbase/regionserver/HStoreFile.java   |  170 +-
 .../hbase/regionserver/MemStoreCompactor.java   |   31 +-
 .../MemStoreCompactorSegmentsIterator.java      |    8 +-
 .../hbase/regionserver/MobStoreScanner.java     |    4 +-
 .../regionserver/RegionCoprocessorHost.java     |   54 +-
 .../hbase/regionserver/RegionSplitPolicy.java   |   15 +-
 .../regionserver/ReversedMobStoreScanner.java   |    4 +-
 .../regionserver/ReversedStoreScanner.java      |    4 +-
 .../apache/hadoop/hbase/regionserver/Store.java |  121 +-
 .../hadoop/hbase/regionserver/StoreEngine.java  |   16 +-
 .../hadoop/hbase/regionserver/StoreFile.java    |  126 +-
 .../regionserver/StoreFileComparators.java      |   28 +-
 .../hbase/regionserver/StoreFileManager.java    |   42 +-
 .../hbase/regionserver/StoreFileReader.java     |   46 +-
 .../hbase/regionserver/StoreFileScanner.java    |   24 +-
 .../hbase/regionserver/StoreFileWriter.java     |   42 +-
 .../hadoop/hbase/regionserver/StoreFlusher.java |    6 +-
 .../hadoop/hbase/regionserver/StoreScanner.java |   29 +-
 .../hadoop/hbase/regionserver/StoreUtils.java   |   86 +-
 .../hbase/regionserver/StripeStoreEngine.java   |    8 +-
 .../regionserver/StripeStoreFileManager.java    |  225 +--
 .../hbase/regionserver/StripeStoreFlusher.java  |    6 +-
 .../AbstractMultiOutputCompactor.java           |    6 +-
 .../compactions/CompactionContext.java          |   11 +-
 .../compactions/CompactionPolicy.java           |    8 +-
 .../compactions/CompactionRequest.java          |   12 +-
 .../regionserver/compactions/Compactor.java     |   26 +-
 .../compactions/DateTieredCompactionPolicy.java |   38 +-
 .../DateTieredCompactionRequest.java            |    4 +-
 .../compactions/DateTieredCompactor.java        |    6 +-
 .../compactions/DefaultCompactor.java           |   10 +-
 .../compactions/ExploringCompactionPolicy.java  |   40 +-
 .../compactions/FIFOCompactionPolicy.java       |   29 +-
 .../compactions/RatioBasedCompactionPolicy.java |   18 +-
 .../compactions/SortedCompactionPolicy.java     |   60 +-
 .../compactions/StripeCompactionPolicy.java     |   92 +-
 .../compactions/StripeCompactor.java            |    6 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |   21 +-
 .../hbase/tool/LoadIncrementalHFiles.java       |   25 +-
 .../hadoop/hbase/util/BloomFilterFactory.java   |   14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   17 +-
 .../hadoop/hbase/util/RowBloomContext.java      |    7 +-
 .../hadoop/hbase/util/RowColBloomContext.java   |    7 +-
 .../hbase-webapps/regionserver/region.jsp       |    2 +-
 .../org/apache/hadoop/hbase/TestIOFencing.java  |   12 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   17 +-
 ...estAvoidCellReferencesIntoShippedBlocks.java |   13 +-
 .../hadoop/hbase/client/TestFromClientSide.java |    7 +-
 .../hbase/coprocessor/SimpleRegionObserver.java |   15 +-
 .../TestRegionObserverScannerOpenHook.java      |    3 +-
 .../hbase/io/TestHalfStoreFileReader.java       |    4 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java |    4 +-
 .../hbase/io/hfile/TestHFileBlockIndex.java     |    8 +-
 .../hadoop/hbase/io/hfile/TestHFileSeek.java    |    4 +-
 .../hbase/mob/compactions/TestMobCompactor.java |    6 +-
 .../TestPartitionedMobCompactor.java            |   74 +-
 .../hbase/quotas/SpaceQuotaHelperForTests.java  |   12 +-
 .../AbstractTestDateTieredCompactionPolicy.java |   24 +-
 .../regionserver/DataBlockEncodingTool.java     |    2 +-
 .../EncodedSeekPerformanceTest.java             |    8 +-
 .../hbase/regionserver/MockHStoreFile.java      |  229 +++
 .../hbase/regionserver/MockStoreFile.java       |  220 ---
 .../regionserver/NoOpScanPolicyObserver.java    |   15 +-
 .../regionserver/TestCacheOnWriteInSchema.java  |    2 +-
 .../hbase/regionserver/TestCompaction.java      |   41 +-
 .../TestCompactionArchiveConcurrentClose.java   |   15 +-
 .../TestCompactionArchiveIOException.java       |   18 +-
 .../regionserver/TestCompactionPolicy.java      |   26 +-
 .../regionserver/TestCompoundBloomFilter.java   |    2 +-
 .../TestDefaultCompactSelection.java            |   10 +-
 .../regionserver/TestDefaultStoreEngine.java    |   10 +-
 .../regionserver/TestEncryptionKeyRotation.java |   24 +-
 .../TestEncryptionRandomKeying.java             |    4 +-
 .../hbase/regionserver/TestFSErrorsExposed.java |    6 +-
 .../hbase/regionserver/TestHMobStore.java       |    6 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  135 +-
 .../regionserver/TestHRegionReplayEvents.java   |    8 +-
 .../regionserver/TestHRegionServerBulkLoad.java |   16 +-
 .../hadoop/hbase/regionserver/TestHStore.java   | 1743 ++++++++++++++++++
 .../hbase/regionserver/TestHStoreFile.java      |   15 +-
 .../hbase/regionserver/TestMajorCompaction.java |    8 +-
 .../regionserver/TestMobStoreCompaction.java    |   16 +-
 .../hbase/regionserver/TestRegionReplicas.java  |   15 +-
 .../regionserver/TestRegionSplitPolicy.java     |   13 +-
 .../regionserver/TestReversibleScanners.java    |   45 +-
 .../regionserver/TestScannerWithBulkload.java   |    9 +-
 .../TestSplitTransactionOnCluster.java          |   17 +-
 .../hadoop/hbase/regionserver/TestStore.java    | 1740 -----------------
 .../regionserver/TestStripeStoreEngine.java     |   12 +-
 .../TestStripeStoreFileManager.java             |  155 +-
 .../regionserver/TestSwitchToStreamRead.java    |   11 +-
 .../ConstantSizeFileListGenerator.java          |   11 +-
 .../compactions/EverythingPolicy.java           |   12 +-
 .../compactions/ExplicitFileListGenerator.java  |   11 +-
 .../compactions/GaussianFileListGenerator.java  |   11 +-
 .../compactions/MockStoreFileGenerator.java     |   24 +-
 .../compactions/PerfTestCompactionPolicies.java |   38 +-
 .../SemiConstantSizeFileListGenerator.java      |   11 +-
 .../SinusoidalFileListGenerator.java            |   13 +-
 .../compactions/SpikyFileListGenerator.java     |   11 +-
 .../compactions/StoreFileListGenerator.java     |    9 +-
 .../TestCompactedHFilesDischarger.java          |   63 +-
 .../regionserver/compactions/TestCompactor.java |    6 +-
 .../compactions/TestDateTieredCompactor.java    |   16 +-
 .../compactions/TestStripeCompactionPolicy.java |   89 +-
 .../compactions/TestStripeCompactor.java        |    8 +-
 .../regionserver/wal/AbstractTestWALReplay.java |   17 +-
 .../visibility/TestVisibilityLabels.java        |   10 +-
 .../apache/hadoop/hbase/util/HFileTestUtil.java |    4 +-
 .../hbase/util/TestCoprocessorScanPolicy.java   |   13 +-
 .../hbase/util/TestHBaseFsckEncryption.java     |   13 +-
 .../hadoop/hbase/spark/HBaseContext.scala       |   10 +-
 144 files changed, 3920 insertions(+), 4147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index ca0d026..b00351b 100644
--- 
a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ 
b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -440,8 +440,8 @@ public class RestoreTool {
           final byte[] first, last;
           try {
             reader.loadFileInfo();
-            first = reader.getFirstRowKey();
-            last = reader.getLastRowKey();
+            first = reader.getFirstRowKey().get();
+            last = reader.getLastRowKey().get();
             LOG.debug("Trying to figure out region boundaries hfile=" + hfile 
+ " first="
                 + Bytes.toStringBinary(first) + " last=" + 
Bytes.toStringBinary(last));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 6b31664..a730403 100644
--- 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.zookeeper.KeeperException;
@@ -130,32 +129,32 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
 
     @Override
     public void process(WatchedEvent event) {
-      switch(event.getType()) {
-      case NodeDataChanged:
-      case NodeCreated:
-      try {
-        // get data and re-watch
-        data = zk.getData(node, this, null);
-        LOG.debug("Read asynchronously: "+(data == null ? "null" : 
Bytes.toLong(data)));
-      } catch (InterruptedException ix) {
-      } catch (KeeperException kx) {
-        needSetup = true;
-      }
-      break;
+      switch (event.getType()) {
+        case NodeDataChanged:
+        case NodeCreated:
+          try {
+            // get data and re-watch
+            data = zk.getData(node, this, null);
+            LOG.debug("Read asynchronously: " + (data == null ? "null" : 
Bytes.toLong(data)));
+          } catch (InterruptedException ix) {
+          } catch (KeeperException kx) {
+            needSetup = true;
+          }
+          break;
 
-      case NodeDeleted:
-      try {
-        // just re-watch
-        zk.exists(node, this);
-        data = null;
-      } catch (InterruptedException ix) {
-      } catch (KeeperException kx) {
-        needSetup = true;
-      }
-      break;
+        case NodeDeleted:
+          try {
+            // just re-watch
+            zk.exists(node, this);
+            data = null;
+          } catch (InterruptedException ix) {
+          } catch (KeeperException kx) {
+            needSetup = true;
+          }
+          break;
 
-      default:
-        // ignore
+        default:
+          // ignore
       }
     }
   }
@@ -166,15 +165,13 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
     if (!re.getSharedData().containsKey(zkkey)) {
       // there is a short race here
       // in the worst case we create a watcher that will be notified once
-      re.getSharedData().putIfAbsent(
-          zkkey,
-          new ZKWatcher(re.getRegionServerServices().getZooKeeper()
-              .getRecoverableZooKeeper().getZooKeeper()));
+      re.getSharedData().putIfAbsent(zkkey, new ZKWatcher(
+          
re.getRegionServerServices().getZooKeeper().getRecoverableZooKeeper().getZooKeeper()));
     }
   }
 
   protected ScanInfo getScanInfo(Store store, RegionCoprocessorEnvironment e) {
-    byte[] data = ((ZKWatcher)e.getSharedData().get(zkkey)).getData();
+    byte[] data = ((ZKWatcher) e.getSharedData().get(zkkey)).getData();
     if (data == null) {
       return null;
     }
@@ -182,8 +179,7 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
     if (oldSI.getTtl() == Long.MAX_VALUE) {
       return null;
     }
-    long ttl = Math.max(EnvironmentEdgeManager.currentTime() -
-        Bytes.toLong(data), oldSI.getTtl());
+    long ttl = Math.max(EnvironmentEdgeManager.currentTime() - 
Bytes.toLong(data), oldSI.getTtl());
     return new ScanInfo(oldSI.getConfiguration(), 
store.getColumnFamilyDescriptor(), ttl,
         oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
   }
@@ -197,7 +193,7 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
       // take default action
       return null;
     }
-    return new StoreScanner(store, scanInfo, OptionalInt.empty(), scanners,
+    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), 
scanners,
         ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), 
HConstants.OLDEST_TIMESTAMP);
   }
 
@@ -210,7 +206,7 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
       // take default action
       return null;
     }
-    return new StoreScanner(store, scanInfo, OptionalInt.empty(), scanners, 
scanType,
+    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), 
scanners, scanType,
         store.getSmallestReadPoint(), earliestPutTs);
   }
 
@@ -223,7 +219,7 @@ public class ZooKeeperScanPolicyObserver implements 
RegionObserver {
       // take default action
       return null;
     }
-    return new StoreScanner(store, scanInfo, scan, targetCols,
-      
((HStore)store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
+    return new StoreScanner((HStore) store, scanInfo, scan, targetCols,
+        ((HStore) 
store).getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 41a9839..e8b7d11 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TASK_KEY;
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY;
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY;
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
+
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
@@ -43,8 +48,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
@@ -52,10 +61,8 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.fs.HFileSystem;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
@@ -65,13 +72,9 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStore;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -85,6 +88,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
@@ -413,13 +417,13 @@ public class HFileOutputFormat2
 
       private void close(final StoreFileWriter w) throws IOException {
         if (w != null) {
-          w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY,
+          w.appendFileInfo(BULKLOAD_TIME_KEY,
               Bytes.toBytes(System.currentTimeMillis()));
-          w.appendFileInfo(StoreFile.BULKLOAD_TASK_KEY,
+          w.appendFileInfo(BULKLOAD_TASK_KEY,
               Bytes.toBytes(context.getTaskAttemptID().toString()));
-          w.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY,
+          w.appendFileInfo(MAJOR_COMPACTION_KEY,
               Bytes.toBytes(true));
-          w.appendFileInfo(StoreFile.EXCLUDE_FROM_MINOR_COMPACTION_KEY,
+          w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY,
               Bytes.toBytes(compactionExclude));
           w.appendTrackedTimestampsToMetadata();
           w.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index bb01459..7912340 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.mapreduce.JobUtil;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
@@ -62,6 +61,7 @@ import 
org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /*
  * The CompactionTool allows to execute a compaction specifying a:
@@ -165,11 +165,11 @@ public class CompactionTool extends Configured implements 
Tool {
         if (!compaction.isPresent()) {
           break;
         }
-        List<StoreFile> storeFiles =
+        List<HStoreFile> storeFiles =
             store.compact(compaction.get(), 
NoLimitThroughputController.INSTANCE);
         if (storeFiles != null && !storeFiles.isEmpty()) {
           if (keepCompactedFiles && deleteCompacted) {
-            for (StoreFile storeFile: storeFiles) {
+            for (HStoreFile storeFile: storeFiles) {
               fs.delete(storeFile.getPath(), false);
             }
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index cbff2de..0b5a929 100644
--- 
a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.mapreduce;
 
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.BLOOM_FILTER_TYPE_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -85,7 +86,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.TestHRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -1162,7 +1162,7 @@ public class TestHFileOutputFormat2  {
         Reader reader = HFile.createReader(fs, dataFilePath, new 
CacheConfig(conf), true, conf);
         Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
 
-        byte[] bloomFilter = fileInfo.get(StoreFile.BLOOM_FILTER_TYPE_KEY);
+        byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY);
         if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE");
         assertEquals("Incorrect bloom filter used for column family " + 
familyStr +
           "(reader: " + reader + ")",

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index d17546e..4321dc8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -33,14 +33,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.io.MultipleIOException;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Function;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
@@ -48,7 +48,7 @@ import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Collections2;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 /**
- * Utility class to handle the removal of HFiles (or the respective {@link 
StoreFile StoreFiles})
+ * Utility class to handle the removal of HFiles (or the respective {@link 
HStoreFile StoreFiles})
  * for a HRegion from the {@link FileSystem}. The hfiles will be archived or 
deleted, depending on
  * the state of the system.
  */
@@ -226,7 +226,7 @@ public class HFileArchiver {
    * @throws IOException if the files could not be correctly disposed.
    */
   public static void archiveStoreFiles(Configuration conf, FileSystem fs, 
HRegionInfo regionInfo,
-      Path tableDir, byte[] family, Collection<StoreFile> compactedFiles)
+      Path tableDir, byte[] family, Collection<HStoreFile> compactedFiles)
       throws IOException, FailedArchiveException {
 
     // sometimes in testing, we don't have rss, so we need to check for that
@@ -479,13 +479,13 @@ public class HFileArchiver {
    * @throws IOException if a file cannot be deleted. All files will be 
attempted to deleted before
    *           throwing the exception, rather than failing at the first file.
    */
-  private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> 
compactedFiles)
+  private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> 
compactedFiles)
       throws IOException {
     LOG.debug("Deleting store files without archiving.");
     List<IOException> errors = new ArrayList<>(0);
-    for (StoreFile hsf : compactedFiles) {
+    for (HStoreFile hsf : compactedFiles) {
       try {
-        hsf.deleteReader();
+        hsf.deleteStoreFile();
       } catch (IOException e) {
         LOG.error("Failed to delete store file:" + hsf.getPath());
         errors.add(e);
@@ -524,16 +524,16 @@ public class HFileArchiver {
   }
 
   /**
-   * Convert the {@link StoreFile} into something we can manage in the archive
+   * Convert the {@link HStoreFile} into something we can manage in the archive
    * methods
    */
-  private static class StoreToFile extends FileConverter<StoreFile> {
+  private static class StoreToFile extends FileConverter<HStoreFile> {
     public StoreToFile(FileSystem fs) {
       super(fs);
     }
 
     @Override
-    public File apply(StoreFile input) {
+    public File apply(HStoreFile input) {
       return new FileableStoreFile(fs, input);
     }
   }
@@ -656,20 +656,20 @@ public class HFileArchiver {
   }
 
   /**
-   * {@link File} adapter for a {@link StoreFile} living on a {@link 
FileSystem}
+   * {@link File} adapter for a {@link HStoreFile} living on a {@link 
FileSystem}
    * .
    */
   private static class FileableStoreFile extends File {
-    StoreFile file;
+    HStoreFile file;
 
-    public FileableStoreFile(FileSystem fs, StoreFile store) {
+    public FileableStoreFile(FileSystem fs, HStoreFile store) {
       super(fs);
       this.file = store;
     }
 
     @Override
     public void delete() throws IOException {
-      file.deleteReader();
+      file.deleteStoreFile();
     }
 
     @Override
@@ -690,7 +690,7 @@ public class HFileArchiver {
 
     @Override
     public void close() throws IOException {
-      file.closeReader(true);
+      file.closeStoreFile(true);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 9318b9a..60e5f40 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -189,7 +189,7 @@ public interface RegionObserver extends Coprocessor {
    * @param tracker tracker used to track the life cycle of a compaction
    */
   default void 
preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store 
store,
-      List<StoreFile> candidates, CompactionLifeCycleTracker tracker) throws 
IOException {}
+      List<? extends StoreFile> candidates, CompactionLifeCycleTracker 
tracker) throws IOException {}
 
   /**
    * Called after the {@link StoreFile}s to compact have been selected from 
the available
@@ -200,7 +200,7 @@ public interface RegionObserver extends Coprocessor {
    * @param tracker tracker used to track the life cycle of a compaction
    */
   default void 
postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c, Store 
store,
-      ImmutableList<StoreFile> selected, CompactionLifeCycleTracker tracker) {}
+      ImmutableList<? extends StoreFile> selected, CompactionLifeCycleTracker 
tracker) {}
 
   /**
    * Called prior to writing the {@link StoreFile}s selected for compaction 
into a new

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
index 0c915d1..18ddb6a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -48,7 +49,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  * <p>This type works in tandem with the {@link Reference} type.  This class
  * is used reading while Reference is used writing.
  *
- * <p>This file is not splitable.  Calls to {@link #midkey()} return null.
+ * <p>This file is not splitable.  Calls to {@link #midKey()} return null.
  */
 @InterfaceAudience.Private
 public class HalfStoreFileReader extends StoreFileReader {
@@ -60,7 +61,7 @@ public class HalfStoreFileReader extends StoreFileReader {
 
   protected final Cell splitCell;
 
-  private Cell firstKey = null;
+  private Optional<Cell> firstKey = null;
 
   private boolean firstKeySeeked = false;
 
@@ -258,8 +259,8 @@ public class HalfStoreFileReader extends StoreFileReader {
       @Override
       public boolean seekBefore(Cell key) throws IOException {
         if (top) {
-          Cell fk = getFirstKey();
-          if (getComparator().compareKeyIgnoresMvcc(key, fk) <= 0) {
+          Optional<Cell> fk = getFirstKey();
+          if (getComparator().compareKeyIgnoresMvcc(key, fk.get()) <= 0) {
             return false;
           }
         } else {
@@ -303,7 +304,7 @@ public class HalfStoreFileReader extends StoreFileReader {
   }
   
   @Override
-  public Cell getLastKey() {
+  public Optional<Cell> getLastKey() {
     if (top) {
       return super.getLastKey();
     }
@@ -311,7 +312,7 @@ public class HalfStoreFileReader extends StoreFileReader {
     HFileScanner scanner = getScanner(true, true);
     try {
       if (scanner.seekBefore(this.splitCell)) {
-        return scanner.getKey();
+        return Optional.ofNullable(scanner.getKey());
       }
     } catch (IOException e) {
       LOG.warn("Failed seekBefore " + Bytes.toStringBinary(this.splitkey), e);
@@ -320,22 +321,22 @@ public class HalfStoreFileReader extends StoreFileReader {
         scanner.close();
       }
     }
-    return null;
+    return Optional.empty();
   }
 
   @Override
-  public Cell midkey() throws IOException {
+  public Optional<Cell> midKey() throws IOException {
     // Returns null to indicate file is not splitable.
-    return null;
+    return Optional.empty();
   }
 
   @Override
-  public Cell getFirstKey() {
+  public Optional<Cell> getFirstKey() {
     if (!firstKeySeeked) {
       HFileScanner scanner = getScanner(true, true, false);
       try {
         if (scanner.seekTo()) {
-          this.firstKey = scanner.getKey();
+          this.firstKey = Optional.ofNullable(scanner.getKey());
         }
         firstKeySeeked = true;
       } catch (IOException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 3d6cdaf..9cf00b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -31,6 +31,7 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
@@ -434,21 +435,21 @@ public class HFile {
 
     Map<byte[], byte[]> loadFileInfo() throws IOException;
 
-    Cell getLastKey();
+    Optional<Cell> getLastKey();
 
-    Cell midkey() throws IOException;
+    Optional<Cell> midKey() throws IOException;
 
     long length();
 
     long getEntries();
 
-    Cell getFirstKey();
+    Optional<Cell> getFirstKey();
 
     long indexSize();
 
-    byte[] getFirstRowKey();
+    Optional<byte[]> getFirstRowKey();
 
-    byte[] getLastRowKey();
+    Optional<byte[]> getLastRowKey();
 
     FixedFileTrailer getTrailer();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index dcfffb8..4524350 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -544,7 +544,7 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
     }
 
     try {
-      out.println("Mid-key: " + 
(CellUtil.getCellKeyAsString(reader.midkey())));
+      out.println("Mid-key: " + 
reader.midKey().map(CellUtil::getCellKeyAsString));
     } catch (Exception e) {
       out.println ("Unable to retrieve the midkey");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index bf722de..381279a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import java.security.Key;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Optional;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -338,14 +339,12 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
     }
   }
 
-  private String toStringFirstKey() {
-    if(getFirstKey() == null)
-      return null;
-    return CellUtil.getCellKeyAsString(getFirstKey());
+  private Optional<String> toStringFirstKey() {
+    return getFirstKey().map(CellUtil::getCellKeyAsString);
   }
 
-  private String toStringLastKey() {
-    return CellUtil.toString(getLastKey(), false);
+  private Optional<String> toStringLastKey() {
+    return getLastKey().map(CellUtil::getCellKeyAsString);
   }
 
   @Override
@@ -382,12 +381,12 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    *         first KeyValue.
    */
   @Override
-  public Cell getFirstKey() {
+  public Optional<Cell> getFirstKey() {
     if (dataBlockIndexReader == null) {
       throw new BlockIndexNotLoadedException();
     }
-    return dataBlockIndexReader.isEmpty() ? null
-        : dataBlockIndexReader.getRootBlockKey(0);
+    return dataBlockIndexReader.isEmpty() ? Optional.empty()
+        : Optional.of(dataBlockIndexReader.getRootBlockKey(0));
   }
 
   /**
@@ -397,10 +396,9 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    * @return the first row key, or null if the file is empty.
    */
   @Override
-  public byte[] getFirstRowKey() {
-    Cell firstKey = getFirstKey();
+  public Optional<byte[]> getFirstRowKey() {
     // We have to copy the row part to form the row key alone
-    return firstKey == null? null: CellUtil.cloneRow(firstKey);
+    return getFirstKey().map(CellUtil::cloneRow);
   }
 
   /**
@@ -410,9 +408,9 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    * @return the last row key, or null if the file is empty.
    */
   @Override
-  public byte[] getLastRowKey() {
-    Cell lastKey = getLastKey();
-    return lastKey == null? null: CellUtil.cloneRow(lastKey);
+  public Optional<byte[]> getLastRowKey() {
+    // We have to copy the row part to form the row key alone
+    return getLastKey().map(CellUtil::cloneRow);
   }
 
   /** @return number of KV entries in this HFile */
@@ -1550,8 +1548,8 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    *         key
    */
   @Override
-  public Cell getLastKey() {
-    return dataBlockIndexReader.isEmpty() ? null : lastKeyCell;
+  public Optional<Cell> getLastKey() {
+    return dataBlockIndexReader.isEmpty() ? Optional.empty() : 
Optional.of(lastKeyCell);
   }
 
   /**
@@ -1560,8 +1558,8 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
    * @throws IOException
    */
   @Override
-  public Cell midkey() throws IOException {
-    return dataBlockIndexReader.midkey();
+  public Optional<Cell> midKey() throws IOException {
+    return Optional.ofNullable(dataBlockIndexReader.midkey());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 98cd16d..f67aa5b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -18,12 +18,14 @@
 
 package org.apache.hadoop.hbase.master.assignment;
 
-import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -41,7 +43,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@@ -58,19 +60,21 @@ import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
 
 /**
  * The procedure to split a region in a table.
@@ -636,8 +640,8 @@ public class SplitTableRegionProcedure
     }
   }
 
-  private Pair<Path, Path> splitStoreFile(final HRegionFileSystem regionFs,
-      final byte[] family, final StoreFile sf) throws IOException {
+  private Pair<Path, Path> splitStoreFile(HRegionFileSystem regionFs, byte[] 
family, HStoreFile sf)
+      throws IOException {
     if (LOG.isDebugEnabled()) {
       LOG.debug("pid=" + getProcId() + " splitting started for store file: " +
           sf.getPath() + " for region: " + 
getParentRegion().getShortNameToLog());
@@ -663,7 +667,7 @@ public class SplitTableRegionProcedure
   private class StoreFileSplitter implements Callable<Pair<Path,Path>> {
     private final HRegionFileSystem regionFs;
     private final byte[] family;
-    private final StoreFile sf;
+    private final HStoreFile sf;
 
     /**
      * Constructor that takes what it needs to split
@@ -671,8 +675,7 @@ public class SplitTableRegionProcedure
      * @param family Family that contains the store file
      * @param sf which file
      */
-    public StoreFileSplitter(final HRegionFileSystem regionFs, final byte[] 
family,
-        final StoreFile sf) {
+    public StoreFileSplitter(HRegionFileSystem regionFs, byte[] family, 
HStoreFile sf) {
       this.regionFs = regionFs;
       this.sf = sf;
       this.family = family;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index c1fef92..957c182 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.master.balancer;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import java.util.ArrayDeque;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -42,7 +43,6 @@ import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegi
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Optional;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
@@ -1241,7 +1242,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   /**
    * Compute a cost of a potential cluster configuration based upon where
-   * {@link org.apache.hadoop.hbase.regionserver.StoreFile}s are located.
+   * {@link org.apache.hadoop.hbase.regionserver.HStoreFile}s are located.
    */
   static abstract class LocalityBasedCostFunction extends CostFunction {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
index 9dc32be..397570c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/CachedMobFile.java
@@ -24,11 +24,10 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Cached mob file.
@@ -39,7 +38,7 @@ public class CachedMobFile extends MobFile implements 
Comparable<CachedMobFile>
   private long accessCount;
   private AtomicLong referenceCount = new AtomicLong(0);
 
-  public CachedMobFile(StoreFile sf) {
+  public CachedMobFile(HStoreFile sf) {
     super(sf);
   }
 
@@ -47,7 +46,7 @@ public class CachedMobFile extends MobFile implements 
Comparable<CachedMobFile>
       CacheConfig cacheConf) throws IOException {
     // XXX: primaryReplica is only used for constructing the key of block 
cache so it is not a
     // critical problem if we pass the wrong value, so here we always pass 
true. Need to fix later.
-    StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
+    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
     return new CachedMobFile(sf);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
index 1badeb2..e8ada97 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java
@@ -91,7 +91,7 @@ public class DefaultMobStoreCompactor extends 
DefaultCompactor {
         }
       };
 
-  public DefaultMobStoreCompactor(Configuration conf, Store store) {
+  public DefaultMobStoreCompactor(Configuration conf, HStore store) {
     super(conf, store);
     // The mob cells reside in the mob-enabled column family which is held by 
HMobStore.
     // During the compaction, the compactor reads the cells from the mob files 
and

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
index bb0301e..bef73f2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java
@@ -32,19 +32,19 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
 import org.apache.hadoop.hbase.regionserver.HMobStore;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputControlUtil;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * An implementation of the StoreFlusher. It extends the DefaultStoreFlusher.
@@ -70,8 +70,11 @@ public class DefaultMobStoreFlusher extends 
DefaultStoreFlusher {
   private Path targetPath;
   private HMobStore mobStore;
 
-  public DefaultMobStoreFlusher(Configuration conf, Store store) throws 
IOException {
+  public DefaultMobStoreFlusher(Configuration conf, HStore store) throws 
IOException {
     super(conf, store);
+    if (!(store instanceof HMobStore)) {
+      throw new IllegalArgumentException("The store " + store + " is not a 
HMobStore");
+    }
     mobCellValueSizeThreshold = 
store.getColumnFamilyDescriptor().getMobThreshold();
     this.targetPath = MobUtils.getMobFamilyPath(conf, store.getTableName(),
         store.getColumnFamilyName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
index 929bfd2..91702c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobFile.java
@@ -26,12 +26,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * The mob file.
@@ -39,13 +38,13 @@ import 
org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 @InterfaceAudience.Private
 public class MobFile {
 
-  private StoreFile sf;
+  private HStoreFile sf;
 
   // internal use only for sub classes
   protected MobFile() {
   }
 
-  protected MobFile(StoreFile sf) {
+  protected MobFile(HStoreFile sf) {
     this.sf = sf;
   }
 
@@ -56,7 +55,7 @@ public class MobFile {
    * @throws IOException
    */
   public StoreFileScanner getScanner() throws IOException {
-    List<StoreFile> sfs = new ArrayList<>();
+    List<HStoreFile> sfs = new ArrayList<>();
     sfs.add(sf);
     List<StoreFileScanner> sfScanners = 
StoreFileScanner.getScannersForStoreFiles(sfs, false, true,
         false, false, sf.getMaxMemstoreTS());
@@ -86,7 +85,7 @@ public class MobFile {
   public Cell readCell(Cell search, boolean cacheMobBlocks, long readPt) 
throws IOException {
     Cell result = null;
     StoreFileScanner scanner = null;
-    List<StoreFile> sfs = new ArrayList<>();
+    List<HStoreFile> sfs = new ArrayList<>();
     sfs.add(sf);
     try {
       List<StoreFileScanner> sfScanners = 
StoreFileScanner.getScannersForStoreFiles(sfs,
@@ -129,7 +128,7 @@ public class MobFile {
    */
   public void close() throws IOException {
     if (sf != null) {
-      sf.closeReader(false);
+      sf.closeStoreFile(false);
       sf = null;
     }
   }
@@ -147,7 +146,7 @@ public class MobFile {
       throws IOException {
     // XXX: primaryReplica is only used for constructing the key of block 
cache so it is not a
     // critical problem if we pass the wrong value, so here we always pass 
true. Need to fix later.
-    StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
+    HStoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
     return new MobFile(sf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobStoreEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobStoreEngine.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobStoreEngine.java
index bdd336f..ee1fe7d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobStoreEngine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobStoreEngine.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.hbase.mob;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * MobStoreEngine creates the mob specific compactor, and store flusher.
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
 public class MobStoreEngine extends DefaultStoreEngine {
 
   @Override
-  protected void createStoreFlusher(Configuration conf, Store store) throws 
IOException {
+  protected void createStoreFlusher(Configuration conf, HStore store) throws 
IOException {
     // When using MOB, we use DefaultMobStoreFlusher always
     // Just use the compactor and compaction policy as that in 
DefaultStoreEngine. We can have MOB
     // specific compactor and policy when that is implemented.
@@ -42,7 +42,7 @@ public class MobStoreEngine extends DefaultStoreEngine {
    * Creates the DefaultMobCompactor.
    */
   @Override
-  protected void createCompactor(Configuration conf, Store store) throws 
IOException {
+  protected void createCompactor(Configuration conf, HStore store) throws 
IOException {
     compactor = new DefaultMobStoreCompactor(conf, store);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 2592b72..54f1373 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
 import org.apache.hadoop.hbase.client.Scan;
@@ -70,7 +69,6 @@ import 
org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ChecksumType;
@@ -78,6 +76,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * The mob utilities
@@ -315,7 +314,7 @@ public final class MobUtils {
       // no file found
       return;
     }
-    List<StoreFile> filesToClean = new ArrayList<>();
+    List<HStoreFile> filesToClean = new ArrayList<>();
     int deletedFileCount = 0;
     for (FileStatus file : stats) {
       String fileName = file.getPath().getName();
@@ -467,7 +466,7 @@ public final class MobUtils {
    * @throws IOException
    */
   public static void removeMobFiles(Configuration conf, FileSystem fs, 
TableName tableName,
-      Path tableDir, byte[] family, Collection<StoreFile> storeFiles) throws 
IOException {
+      Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) throws 
IOException {
     HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName), 
tableDir, family,
         storeFiles);
   }
@@ -721,7 +720,7 @@ public final class MobUtils {
    */
   private static void validateMobFile(Configuration conf, FileSystem fs, Path 
path,
       CacheConfig cacheConfig, boolean primaryReplica) throws IOException {
-    StoreFile storeFile = null;
+    HStoreFile storeFile = null;
     try {
       storeFile = new HStoreFile(fs, path, conf, cacheConfig, BloomType.NONE, 
primaryReplica);
       storeFile.initReader();
@@ -730,7 +729,7 @@ public final class MobUtils {
       throw e;
     } finally {
       if (storeFile != null) {
-        storeFile.closeReader(false);
+        storeFile.closeStoreFile(false);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
index 7916779..aaf545b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactionRequest.java
@@ -25,10 +25,10 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * An implementation of {@link MobCompactionRequest} that is used in
@@ -106,8 +106,7 @@ public class PartitionedMobCompactionRequest extends 
MobCompactionRequest {
      * Set start key of this partition, only if the input startKey is less than
      * the current start key.
      */
-    public void setStartKey(final byte[] startKey)
-    {
+    public void setStartKey(final byte[] startKey) {
       if ((this.startKey == null) || (Bytes.compareTo(startKey, this.startKey) 
< 0)) {
         this.startKey = startKey;
       }
@@ -227,7 +226,7 @@ public class PartitionedMobCompactionRequest extends 
MobCompactionRequest {
    */
   protected static class CompactionDelPartition {
     private List<Path> delFiles = new ArrayList<Path>();
-    private List<StoreFile> storeFiles = new ArrayList<>();
+    private List<HStoreFile> storeFiles = new ArrayList<>();
     private CompactionDelPartitionId id;
 
     public CompactionDelPartition(CompactionDelPartitionId id) {
@@ -241,11 +240,11 @@ public class PartitionedMobCompactionRequest extends 
MobCompactionRequest {
     void addDelFile(FileStatus file) {
       delFiles.add(file.getPath());
     }
-    public void addStoreFile(final StoreFile file) {
+    public void addStoreFile(HStoreFile file) {
       storeFiles.add(file);
     }
 
-    public List<StoreFile> getStoreFiles() {
+    public List<HStoreFile> getStoreFiles() {
       return storeFiles;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index e40f3a7..1fc2902 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -18,6 +18,10 @@
  */
 package org.apache.hadoop.hbase.mob.compactions;
 
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.BULKLOAD_TIME_KEY;
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.MOB_CELLS_COUNT;
+import static 
org.apache.hadoop.hbase.regionserver.HStoreFile.SKIP_RESET_SEQ_ID;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -50,7 +54,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -75,7 +78,6 @@ import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
@@ -85,6 +87,7 @@ import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
 
 import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
@@ -225,8 +228,8 @@ public class PartitionedMobCompactor extends MobCompactor {
 
         // Get delId from the file
         try (Reader reader = HFile.createReader(fs, linkedFile.getPath(), 
conf)) {
-          delId.setStartKey(reader.getFirstRowKey());
-          delId.setEndKey(reader.getLastRowKey());
+          delId.setStartKey(reader.getFirstRowKey().get());
+          delId.setEndKey(reader.getLastRowKey().get());
         }
         CompactionDelPartition delPartition = delFilesToCompact.get(delId);
         if (delPartition == null) {
@@ -266,8 +269,8 @@ public class PartitionedMobCompactor extends MobCompactor {
             // get startKey and endKey from the file and update partition
             // TODO: is it possible to skip read of most hfiles?
             try (Reader reader = HFile.createReader(fs, linkedFile.getPath(), 
conf)) {
-              compactionPartition.setStartKey(reader.getFirstRowKey());
-              compactionPartition.setEndKey(reader.getLastRowKey());
+              compactionPartition.setStartKey(reader.getFirstRowKey().get());
+              compactionPartition.setEndKey(reader.getLastRowKey().get());
             }
           }
 
@@ -335,7 +338,7 @@ public class PartitionedMobCompactor extends MobCompactor {
     try {
       for (CompactionDelPartition delPartition : request.getDelPartitions()) {
         for (Path newDelPath : delPartition.listDelFiles()) {
-          StoreFile sf =
+          HStoreFile sf =
               new HStoreFile(fs, newDelPath, conf, compactionCacheConfig, 
BloomType.NONE, true);
           // pre-create reader of a del file to avoid race condition when 
opening the reader in each
           // partition.
@@ -361,7 +364,8 @@ public class PartitionedMobCompactor extends MobCompactor {
       for (CompactionDelPartition delPartition : request.getDelPartitions()) {
         LOG.info(delPartition.listDelFiles());
         try {
-          MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, 
column.getName(), delPartition.getStoreFiles());
+          MobUtils.removeMobFiles(conf, fs, tableName, mobTableDir, 
column.getName(),
+            delPartition.getStoreFiles());
         } catch (IOException e) {
           LOG.error("Failed to archive the del files " + 
delPartition.getStoreFiles(), e);
         }
@@ -397,11 +401,11 @@ public class PartitionedMobCompactor extends MobCompactor 
{
   }
 
   @VisibleForTesting
-  List<StoreFile> getListOfDelFilesForPartition(final CompactionPartition 
partition,
+  List<HStoreFile> getListOfDelFilesForPartition(final CompactionPartition 
partition,
       final List<CompactionDelPartition> delPartitions) {
     // Binary search for startKey and endKey
 
-    List<StoreFile> result = new ArrayList<>();
+    List<HStoreFile> result = new ArrayList<>();
 
     DelPartitionComparator comparator = new DelPartitionComparator(false);
     CompactionDelPartitionId id = new CompactionDelPartitionId(null, 
partition.getStartKey());
@@ -473,7 +477,7 @@ public class PartitionedMobCompactor extends MobCompactor {
         // Search the delPartitions and collect all the delFiles for the 
partition
         // One optimization can do is that if there is no del file, we do not 
need to
         // come up with startKey/endKey.
-        List<StoreFile> delFiles = getListOfDelFilesForPartition(partition,
+        List<HStoreFile> delFiles = getListOfDelFilesForPartition(partition,
             request.getDelPartitions());
 
         results.put(partition.getPartitionId(), pool.submit(new 
Callable<List<Path>>() {
@@ -521,7 +525,7 @@ public class PartitionedMobCompactor extends MobCompactor {
    */
   private List<Path> compactMobFilePartition(PartitionedMobCompactionRequest 
request,
                                              CompactionPartition partition,
-                                             List<StoreFile> delFiles,
+                                             List<HStoreFile> delFiles,
                                              Connection connection,
                                              Table table) throws IOException {
     if (MobUtils.isMobFileExpired(column, EnvironmentEdgeManager.currentTime(),
@@ -550,9 +554,9 @@ public class PartitionedMobCompactor extends MobCompactor {
       // clean the bulkload directory to avoid loading old files.
       fs.delete(bulkloadPathOfPartition, true);
       // add the selected mob files and del files into filesToCompact
-      List<StoreFile> filesToCompact = new ArrayList<>();
+      List<HStoreFile> filesToCompact = new ArrayList<>();
       for (int i = offset; i < batch + offset; i++) {
-        StoreFile sf = new HStoreFile(fs, files.get(i).getPath(), conf, 
compactionCacheConfig,
+        HStoreFile sf = new HStoreFile(fs, files.get(i).getPath(), conf, 
compactionCacheConfig,
             BloomType.NONE, true);
         filesToCompact.add(sf);
       }
@@ -572,10 +576,10 @@ public class PartitionedMobCompactor extends MobCompactor 
{
    * Closes the readers of store files.
    * @param storeFiles The store files to be closed.
    */
-  private void closeStoreFileReaders(List<StoreFile> storeFiles) {
-    for (StoreFile storeFile : storeFiles) {
+  private void closeStoreFileReaders(List<HStoreFile> storeFiles) {
+    for (HStoreFile storeFile : storeFiles) {
       try {
-        storeFile.closeReader(true);
+        storeFile.closeStoreFile(true);
       } catch (IOException e) {
         LOG.warn("Failed to close the reader on store file " + 
storeFile.getPath(), e);
       }
@@ -600,14 +604,14 @@ public class PartitionedMobCompactor extends MobCompactor 
{
   private void compactMobFilesInBatch(PartitionedMobCompactionRequest request,
                                       CompactionPartition partition,
                                       Connection connection, Table table,
-                                      List<StoreFile> filesToCompact, int 
batch,
+                                      List<HStoreFile> filesToCompact, int 
batch,
                                       Path bulkloadPathOfPartition, Path 
bulkloadColumnPath,
                                       List<Path> newFiles)
       throws IOException {
     // open scanner to the selected mob files and del files.
     StoreScanner scanner = createScanner(filesToCompact, 
ScanType.COMPACT_DROP_DELETES);
     // the mob files to be compacted, not include the del files.
-    List<StoreFile> mobFilesToCompact = filesToCompact.subList(0, batch);
+    List<HStoreFile> mobFilesToCompact = filesToCompact.subList(0, batch);
     // Pair(maxSeqId, cellsCount)
     Pair<Long, Long> fileInfo = getFileInfo(mobFilesToCompact);
     // open writers for the mob files and new ref store files.
@@ -726,7 +730,7 @@ public class PartitionedMobCompactor extends MobCompactor {
       if (delFilePaths.size() - offset < compactionBatchSize) {
         batch = delFilePaths.size() - offset;
       }
-      List<StoreFile> batchedDelFiles = new ArrayList<>();
+      List<HStoreFile> batchedDelFiles = new ArrayList<>();
       if (batch == 1) {
         // only one file left, do not compact it, directly add it to the new 
files.
         paths.add(delFilePaths.get(offset));
@@ -753,7 +757,7 @@ public class PartitionedMobCompactor extends MobCompactor {
    * @throws IOException if IO failure is encountered
    */
   private Path compactDelFilesInBatch(PartitionedMobCompactionRequest request,
-    List<StoreFile> delFiles) throws IOException {
+    List<HStoreFile> delFiles) throws IOException {
     // create a scanner for the del files.
     StoreScanner scanner = createScanner(delFiles, 
ScanType.COMPACT_RETAIN_DELETES);
     StoreFileWriter writer = null;
@@ -803,7 +807,7 @@ public class PartitionedMobCompactor extends MobCompactor {
    * @return The store scanner.
    * @throws IOException if IO failure is encountered
    */
-  private StoreScanner createScanner(List<StoreFile> filesToCompact, ScanType 
scanType)
+  private StoreScanner createScanner(List<HStoreFile> filesToCompact, ScanType 
scanType)
       throws IOException {
     List<StoreFileScanner> scanners = 
StoreFileScanner.getScannersForStoreFiles(filesToCompact,
       false, true, false, false, HConstants.LATEST_TIMESTAMP);
@@ -864,8 +868,8 @@ public class PartitionedMobCompactor extends MobCompactor {
     throws IOException {
     if (writer != null) {
       writer.appendMetadata(maxSeqId, false);
-      writer.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, 
Bytes.toBytes(bulkloadTime));
-      writer.appendFileInfo(StoreFile.SKIP_RESET_SEQ_ID, Bytes.toBytes(true));
+      writer.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(bulkloadTime));
+      writer.appendFileInfo(SKIP_RESET_SEQ_ID, Bytes.toBytes(true));
       try {
         writer.close();
       } catch (IOException e) {
@@ -880,14 +884,14 @@ public class PartitionedMobCompactor extends MobCompactor 
{
    * @return The pair of the max seqId and number of cells of the store files.
    * @throws IOException if IO failure is encountered
    */
-  private Pair<Long, Long> getFileInfo(List<StoreFile> storeFiles) throws 
IOException {
+  private Pair<Long, Long> getFileInfo(List<HStoreFile> storeFiles) throws 
IOException {
     long maxSeqId = 0;
     long maxKeyCount = 0;
-    for (StoreFile sf : storeFiles) {
+    for (HStoreFile sf : storeFiles) {
       // the readers will be closed later after the merge.
       maxSeqId = Math.max(maxSeqId, sf.getMaxSequenceId());
       sf.initReader();
-      byte[] count = 
sf.getReader().loadFileInfo().get(StoreFile.MOB_CELLS_COUNT);
+      byte[] count = sf.getReader().loadFileInfo().get(MOB_CELLS_COUNT);
       if (count != null) {
         maxKeyCount += Bytes.toLong(count);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
index 07b72e2..065fd37 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChangedReadersObserver.java
@@ -16,11 +16,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.List;
+
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -40,5 +40,5 @@ public interface ChangedReadersObserver {
    * @param memStoreScanners scanner of current memstore
    * @throws IOException e
    */
-  void updateReaders(List<StoreFile> sfs, List<KeyValueScanner> 
memStoreScanners) throws IOException;
+  void updateReaders(List<HStoreFile> sfs, List<KeyValueScanner> 
memStoreScanners) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index c31a1cf..da502c2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -69,7 +69,7 @@ public class CompactingMemStore extends AbstractMemStore {
   private static final double IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT = 0.25;
 
   private static final Log LOG = LogFactory.getLog(CompactingMemStore.class);
-  private Store store;
+  private HStore store;
   private RegionServicesForStores regionServices;
   private CompactionPipeline pipeline;
   private MemStoreCompactor compactor;
@@ -341,7 +341,7 @@ public class CompactingMemStore extends AbstractMemStore {
     return store.getSmallestReadPoint();
   }
 
-  public Store getStore() {
+  public HStore getStore() {
     return store;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index 25e1609..f7c18f9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.security.User;
 public class DateTieredStoreEngine extends StoreEngine<DefaultStoreFlusher,
   DateTieredCompactionPolicy, DateTieredCompactor, DefaultStoreFileManager> {
   @Override
-  public boolean needsCompaction(List<StoreFile> filesCompacting) {
+  public boolean needsCompaction(List<HStoreFile> filesCompacting) {
     return compactionPolicy.needsCompaction(storeFileManager.getStorefiles(),
       filesCompacting);
   }
@@ -54,7 +54,7 @@ public class DateTieredStoreEngine extends 
StoreEngine<DefaultStoreFlusher,
   }
 
   @Override
-  protected void createComponents(Configuration conf, Store store, 
CellComparator kvComparator)
+  protected void createComponents(Configuration conf, HStore store, 
CellComparator kvComparator)
       throws IOException {
     this.compactionPolicy = new DateTieredCompactionPolicy(conf, store);
     this.storeFileManager =
@@ -67,13 +67,13 @@ public class DateTieredStoreEngine extends 
StoreEngine<DefaultStoreFlusher,
   private final class DateTieredCompactionContext extends CompactionContext {
 
     @Override
-    public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
+    public List<HStoreFile> preSelect(List<HStoreFile> filesCompacting) {
       return 
compactionPolicy.preSelectCompactionForCoprocessor(storeFileManager.getStorefiles(),
         filesCompacting);
     }
 
     @Override
-    public boolean select(List<StoreFile> filesCompacting, boolean 
isUserCompaction,
+    public boolean select(List<HStoreFile> filesCompacting, boolean 
isUserCompaction,
         boolean mayUseOffPeak, boolean forceMajor) throws IOException {
       request = 
compactionPolicy.selectCompaction(storeFileManager.getStorefiles(), 
filesCompacting,
         isUserCompaction, mayUseOffPeak, forceMajor);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a5f84430/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
index 70421e1..58f8bbb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreEngine.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import 
org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
@@ -33,6 +32,7 @@ import 
org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPoli
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Default StoreEngine creates the default compactor, policy, and store file 
manager, or
@@ -57,14 +57,14 @@ public class DefaultStoreEngine extends StoreEngine<
     DEFAULT_COMPACTION_POLICY_CLASS = ExploringCompactionPolicy.class;
 
   @Override
-  public boolean needsCompaction(List<StoreFile> filesCompacting) {
+  public boolean needsCompaction(List<HStoreFile> filesCompacting) {
     return compactionPolicy.needsCompaction(
         this.storeFileManager.getStorefiles(), filesCompacting);
   }
 
   @Override
   protected void createComponents(
-      Configuration conf, Store store, CellComparator kvComparator) throws 
IOException {
+      Configuration conf, HStore store, CellComparator kvComparator) throws 
IOException {
     createCompactor(conf, store);
     createCompactionPolicy(conf, store);
     createStoreFlusher(conf, store);
@@ -73,17 +73,17 @@ public class DefaultStoreEngine extends StoreEngine<
             compactionPolicy.getConf());
   }
 
-  protected void createCompactor(Configuration conf, Store store) throws 
IOException {
+  protected void createCompactor(Configuration conf, HStore store) throws 
IOException {
     String className = conf.get(DEFAULT_COMPACTOR_CLASS_KEY, 
DEFAULT_COMPACTOR_CLASS.getName());
     try {
       compactor = ReflectionUtils.instantiateWithCustomCtor(className,
-          new Class[] { Configuration.class, Store.class }, new Object[] { 
conf, store });
+          new Class[] { Configuration.class, HStore.class }, new Object[] { 
conf, store });
     } catch (Exception e) {
       throw new IOException("Unable to load configured compactor '" + 
className + "'", e);
     }
   }
 
-  protected void createCompactionPolicy(Configuration conf, Store store) 
throws IOException {
+  protected void createCompactionPolicy(Configuration conf, HStore store) 
throws IOException {
     String className = conf.get(
         DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
DEFAULT_COMPACTION_POLICY_CLASS.getName());
     try {
@@ -95,12 +95,12 @@ public class DefaultStoreEngine extends StoreEngine<
     }
   }
 
-  protected void createStoreFlusher(Configuration conf, Store store) throws 
IOException {
+  protected void createStoreFlusher(Configuration conf, HStore store) throws 
IOException {
     String className = conf.get(
         DEFAULT_STORE_FLUSHER_CLASS_KEY, 
DEFAULT_STORE_FLUSHER_CLASS.getName());
     try {
       storeFlusher = ReflectionUtils.instantiateWithCustomCtor(className,
-          new Class[] { Configuration.class, Store.class }, new Object[] { 
conf, store });
+          new Class[] { Configuration.class, HStore.class }, new Object[] { 
conf, store });
     } catch (Exception e) {
       throw new IOException("Unable to load configured store flusher '" + 
className + "'", e);
     }
@@ -113,7 +113,7 @@ public class DefaultStoreEngine extends StoreEngine<
 
   private class DefaultCompactionContext extends CompactionContext {
     @Override
-    public boolean select(List<StoreFile> filesCompacting, boolean 
isUserCompaction,
+    public boolean select(List<HStoreFile> filesCompacting, boolean 
isUserCompaction,
         boolean mayUseOffPeak, boolean forceMajor) throws IOException {
       request = 
compactionPolicy.selectCompaction(storeFileManager.getStorefiles(),
           filesCompacting, isUserCompaction, mayUseOffPeak, forceMajor);
@@ -127,7 +127,7 @@ public class DefaultStoreEngine extends StoreEngine<
     }
 
     @Override
-    public List<StoreFile> preSelect(List<StoreFile> filesCompacting) {
+    public List<HStoreFile> preSelect(List<HStoreFile> filesCompacting) {
       return compactionPolicy.preSelectCompactionForCoprocessor(
           storeFileManager.getStorefiles(), filesCompacting);
     }

Reply via email to