http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java
new file mode 100644
index 0000000..961e338
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileComparators.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Ordering;
+
+import java.util.Comparator;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Useful comparators for comparing StoreFiles.
+ */
+@InterfaceAudience.Private
+final class StoreFileComparators {
+  /**
+   * Comparator that compares based on the Sequence Ids of the the StoreFiles. 
Bulk loads that did
+   * not request a seq ID are given a seq id of -1; thus, they are placed 
before all non- bulk
+   * loads, and bulk loads with sequence Id. Among these files, the size is 
used to determine the
+   * ordering, then bulkLoadTime. If there are ties, the path name is used as 
a tie-breaker.
+   */
+  public static final Comparator<StoreFile> SEQ_ID =
+      Ordering.compound(ImmutableList.of(Ordering.natural().onResultOf(new 
GetSeqId()),
+        Ordering.natural().onResultOf(new GetFileSize()).reverse(),
+        Ordering.natural().onResultOf(new GetBulkTime()),
+        Ordering.natural().onResultOf(new GetPathName())));
+
+  /**
+   * Comparator for time-aware compaction. SeqId is still the first ordering 
criterion to maintain
+   * MVCC.
+   */
+  public static final Comparator<StoreFile> SEQ_ID_MAX_TIMESTAMP =
+      Ordering.compound(ImmutableList.of(Ordering.natural().onResultOf(new 
GetSeqId()),
+        Ordering.natural().onResultOf(new GetMaxTimestamp()),
+        Ordering.natural().onResultOf(new GetFileSize()).reverse(),
+        Ordering.natural().onResultOf(new GetBulkTime()),
+        Ordering.natural().onResultOf(new GetPathName())));
+
+  private static class GetSeqId implements Function<StoreFile, Long> {
+    @Override
+    public Long apply(StoreFile sf) {
+      return sf.getMaxSequenceId();
+    }
+  }
+
+  private static class GetFileSize implements Function<StoreFile, Long> {
+    @Override
+    public Long apply(StoreFile sf) {
+      if (sf.getReader() != null) {
+        return sf.getReader().length();
+      } else {
+        // the reader may be null for the compacted files and if the archiving
+        // had failed.
+        return -1L;
+      }
+    }
+  }
+
+  private static class GetBulkTime implements Function<StoreFile, Long> {
+    @Override
+    public Long apply(StoreFile sf) {
+      return sf.getBulkLoadTimestamp().orElse(Long.MAX_VALUE);
+    }
+  }
+
+  private static class GetPathName implements Function<StoreFile, String> {
+    @Override
+    public String apply(StoreFile sf) {
+      return sf.getPath().getName();
+    }
+  }
+
+  private static class GetMaxTimestamp implements Function<StoreFile, Long> {
+    @Override
+    public Long apply(StoreFile sf) {
+      return sf.getMaximumTimestamp().orElse(Long.MAX_VALUE);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
index 0e99c74..c656183 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java
@@ -283,7 +283,6 @@ public class StoreFileInfo {
    */
   public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem 
fs)
       throws IOException {
-
     // guard against the case where we get the FileStatus from link, but by 
the time we
     // call compute the file is moved again
     if (this.link != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 42c2af2..13a5f01 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.PriorityQueue;
 import java.util.concurrent.atomic.LongAdder;
 
 import org.apache.hadoop.fs.Path;
@@ -120,13 +121,20 @@ public class StoreFileScanner implements KeyValueScanner {
   public static List<StoreFileScanner> 
getScannersForStoreFiles(Collection<StoreFile> files,
       boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean 
canUseDrop,
       ScanQueryMatcher matcher, long readPt) throws IOException {
+    if (files.isEmpty()) {
+      return Collections.emptyList();
+    }
     List<StoreFileScanner> scanners = new ArrayList<>(files.size());
-    List<StoreFile> sortedFiles = new ArrayList<>(files);
-    Collections.sort(sortedFiles, StoreFile.Comparators.SEQ_ID);
     boolean canOptimizeForNonNullColumn = matcher != null ? 
!matcher.hasNullColumnInQuery() : false;
-    for (int i = 0, n = sortedFiles.size(); i < n; i++) {
-      StoreFile sf = sortedFiles.get(i);
-      sf.initReader();
+    PriorityQueue<StoreFile> sortedFiles =
+        new PriorityQueue<>(files.size(), StoreFileComparators.SEQ_ID);
+    for (StoreFile file : files) {
+      // The sort function needs metadata so we need to open reader first 
before sorting the list.
+      file.initReader();
+      sortedFiles.add(file);
+    }
+    for (int i = 0, n = files.size(); i < n; i++) {
+      StoreFile sf = sortedFiles.remove();
       StoreFileScanner scanner;
       if (usePread) {
         scanner = sf.getPreadScanner(cacheBlocks, readPt, i, 
canOptimizeForNonNullColumn);
@@ -147,7 +155,7 @@ public class StoreFileScanner implements KeyValueScanner {
       boolean canUseDropBehind, long readPt) throws IOException {
     List<StoreFileScanner> scanners = new ArrayList<>(files.size());
     List<StoreFile> sortedFiles = new ArrayList<>(files);
-    Collections.sort(sortedFiles, StoreFile.Comparators.SEQ_ID);
+    Collections.sort(sortedFiles, StoreFileComparators.SEQ_ID);
     boolean succ = false;
     try {
       for (int i = 0, n = sortedFiles.size(); i < n; i++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index 88cba75..2e3b6f5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -18,8 +18,11 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import com.google.common.base.Preconditions;
+
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -33,7 +36,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -42,13 +44,10 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.BloomFilterWriter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.util.RowBloomContext;
 import org.apache.hadoop.hbase.util.RowColBloomContext;
 import org.apache.hadoop.io.WritableUtils;
 
-import com.google.common.base.Preconditions;
-
 /**
  * A StoreFile writer.  Use this to read/write HBase Store Files. It is package
  * local because it is an implementation detail of the HBase regionserver.
@@ -359,6 +358,18 @@ public class StoreFileWriter implements CellSink, 
ShipperListener {
     return writer;
   }
 
+  /**
+   * @param fs
+   * @param dir Directory to create file in.
+   * @return random filename inside passed <code>dir</code>
+   */
+  static Path getUniqueFile(final FileSystem fs, final Path dir) throws 
IOException {
+    if (!fs.getFileStatus(dir).isDirectory()) {
+      throw new IOException("Expecting " + dir.toString() + " to be a 
directory");
+    }
+    return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
+  }
+
   
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG",
       justification="Will not overflow")
   public static class Builder {
@@ -496,7 +507,7 @@ public class StoreFileWriter implements CellSink, 
ShipperListener {
       FSUtils.setStoragePolicy(this.fs, dir, policyName);
 
       if (filePath == null) {
-        filePath = StoreFile.getUniqueFile(fs, dir);
+        filePath = getUniqueFile(fs, dir);
         if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
           bloomType = BloomType.NONE;
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index c7bf78d..6990e91 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -19,8 +19,6 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -55,6 +53,8 @@ import 
org.apache.hadoop.hbase.regionserver.querymatcher.UserScanQueryMatcher;
 import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Scanner scans both the memstore and the Store. Coalesce KeyValue stream 
into List&lt;KeyValue&gt;
  * for a single row.

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 196c889..5623adb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -20,7 +20,14 @@ package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
 import java.util.Collection;
+import java.util.Optional;
+import java.util.OptionalInt;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -28,14 +35,14 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public class StoreUtils {
+
+  private static final Log LOG = LogFactory.getLog(StoreUtils.class);
+
   /**
    * Creates a deterministic hash code for store file collection.
    */
-  public static Integer getDeterministicRandomSeed(final Collection<StoreFile> 
files) {
-    if (files != null && !files.isEmpty()) {
-      return files.iterator().next().getPath().getName().hashCode();
-    }
-    return null;
+  public static OptionalInt getDeterministicRandomSeed(Collection<StoreFile> 
files) {
+    return files.stream().mapToInt(f -> 
f.getPath().getName().hashCode()).findFirst();
   }
 
   /**
@@ -70,18 +77,73 @@ public class StoreUtils {
    * @param candidates The files to choose from.
    * @return The largest file; null if no file has a reader.
    */
-  static StoreFile getLargestFile(final Collection<StoreFile> candidates) {
-    long maxSize = -1L;
-    StoreFile largestSf = null;
-    for (StoreFile sf : candidates) {
-      StoreFileReader r = sf.getReader();
-      if (r == null) continue;
-      long size = r.length();
-      if (size > maxSize) {
-        maxSize = size;
-        largestSf = sf;
+  static Optional<StoreFile> getLargestFile(Collection<StoreFile> candidates) {
+    return candidates.stream().filter(f -> f.getReader() != null)
+        .max((f1, f2) -> Long.compare(f1.getReader().length(), 
f2.getReader().length()));
+  }
+
+  /**
+   * Return the largest memstoreTS found across all storefiles in the given 
list. Store files that
+   * were created by a mapreduce bulk load are ignored, as they do not 
correspond to any specific
+   * put operation, and thus do not have a memstoreTS associated with them.
+   * @return 0 if no non-bulk-load files are provided or, this is Store that 
does not yet have any
+   *         store files.
+   */
+  public static long getMaxMemstoreTSInList(Collection<StoreFile> sfs) {
+    long max = 0;
+    for (StoreFile sf : sfs) {
+      if (!sf.isBulkLoadResult()) {
+        max = Math.max(max, sf.getMaxMemstoreTS());
+      }
+    }
+    return max;
+  }
+
+  /**
+   * Return the highest sequence ID found across all storefiles in
+   * the given list.
+   * @param sfs
+   * @return 0 if no non-bulk-load files are provided or, this is Store that
+   * does not yet have any store files.
+   */
+  public static long getMaxSequenceIdInList(Collection<StoreFile> sfs) {
+    long max = 0;
+    for (StoreFile sf : sfs) {
+      max = Math.max(max, sf.getMaxSequenceId());
+    }
+    return max;
+  }
+
+  /**
+   * Gets the approximate mid-point of the given file that is optimal for use 
in splitting it.
+   * @param file the store file
+   * @param comparator Comparator used to compare KVs.
+   * @return The split point row, or null if splitting is not possible, or 
reader is null.
+   */
+  static Optional<byte[]> getFileSplitPoint(StoreFile file, CellComparator 
comparator)
+      throws IOException {
+    StoreFileReader reader = file.getReader();
+    if (reader == null) {
+      LOG.warn("Storefile " + file + " Reader is null; cannot get split 
point");
+      return Optional.empty();
+    }
+    // Get first, last, and mid keys. Midkey is the key that starts block
+    // in middle of hfile. Has column and timestamp. Need to return just
+    // the row we want to split on as midkey.
+    Cell midkey = reader.midkey();
+    if (midkey != null) {
+      Cell firstKey = reader.getFirstKey();
+      Cell lastKey = reader.getLastKey();
+      // if the midkey is the same as the first or last keys, we cannot (ever) 
split this region.
+      if (comparator.compareRows(midkey, firstKey) == 0 ||
+          comparator.compareRows(midkey, lastKey) == 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("cannot split because midkey is the same as first or last 
row");
+        }
+        return Optional.empty();
       }
+      return Optional.of(CellUtil.cloneRow(midkey));
     }
-    return largestSf;
+    return Optional.empty();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
index ef86e2f..3c7469e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
@@ -28,6 +28,7 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
@@ -38,7 +39,6 @@ import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -216,7 +216,12 @@ public class StripeStoreFileManager
     return original;
   }
 
-  @Override
+  private byte[] getSplitPoint(Collection<StoreFile> sfs) throws IOException {
+    Optional<StoreFile> largestFile = StoreUtils.getLargestFile(sfs);
+    return largestFile.isPresent()
+        ? StoreUtils.getFileSplitPoint(largestFile.get(), 
cellComparator).orElse(null) : null;
+  }
+
   /**
    * Override of getSplitPoint that determines the split point as the boundary 
between two
    * stripes, unless it causes significant imbalance between split sides' 
sizes. In that
@@ -224,6 +229,7 @@ public class StripeStoreFileManager
    * minimize imbalance.
    * @return The split point, or null if no split is possible.
    */
+  @Override
   public byte[] getSplitPoint() throws IOException {
     if (this.getStorefileCount() == 0) return null;
     if (state.stripeFiles.size() <= 1) {
@@ -271,16 +277,14 @@ public class StripeStoreFileManager
     LOG.debug("Splitting the stripe - ratio w/o split " + ratio + ", ratio 
with split "
         + newRatio + " configured ratio " + config.getMaxSplitImbalance());
     // Ok, we may get better ratio, get it.
-    return StoreUtils.getLargestFile(state.stripeFiles.get(
-        isRightLarger ? rightIndex : 
leftIndex)).getFileSplitPoint(this.cellComparator);
+    return getSplitPoint(state.stripeFiles.get(isRightLarger ? rightIndex : 
leftIndex));
   }
 
   private byte[] getSplitPointFromAllFiles() throws IOException {
     ConcatenatedLists<StoreFile> sfs = new ConcatenatedLists<>();
     sfs.addSublist(state.level0Files);
     sfs.addAllSublists(state.stripeFiles);
-    if (sfs.isEmpty()) return null;
-    return 
StoreUtils.getLargestFile(sfs).getFileSplitPoint(this.cellComparator);
+    return getSplitPoint(sfs);
   }
 
   private double getMidStripeSplitRatio(long smallerSize, long largerSize, 
long lastLargerSize) {
@@ -639,7 +643,7 @@ public class StripeStoreFileManager
     // we will store the file in reverse order by seqNum from the outset.
     for (int insertBefore = 0; ; ++insertBefore) {
       if (insertBefore == stripe.size()
-          || (StoreFile.Comparators.SEQ_ID.compare(sf, 
stripe.get(insertBefore)) >= 0)) {
+          || (StoreFileComparators.SEQ_ID.compare(sf, 
stripe.get(insertBefore)) >= 0)) {
         stripe.add(insertBefore, sf);
         break;
       }
@@ -1071,6 +1075,6 @@ public class StripeStoreFileManager
 
   @Override
   public Comparator<StoreFile> getStoreFileComparator() {
-    return StoreFile.Comparators.SEQ_ID;
+    return StoreFileComparators.SEQ_ID;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
index 3d4f9a1..716a820 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
@@ -18,24 +18,19 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Collections2;
-
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.stream.Collectors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileReader;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+
+import com.google.common.base.Preconditions;
 
 /**
  * This class holds all logical details necessary to run a compaction.
@@ -43,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
 @InterfaceAudience.LimitedPrivate({ "coprocessor" })
 @InterfaceStability.Evolving
 public class CompactionRequest implements Comparable<CompactionRequest> {
-  private static final Log LOG = LogFactory.getLog(CompactionRequest.class);
+
   // was this compaction promoted to an off-peak
   private boolean isOffPeak = false;
   private enum DisplayCompactionType { MINOR, ALL_FILES, MAJOR }
@@ -207,27 +202,15 @@ public class CompactionRequest implements 
Comparable<CompactionRequest> {
 
   @Override
   public String toString() {
-    String fsList = Joiner.on(", ").join(
-        Collections2.transform(Collections2.filter(
-            this.getFiles(),
-            new Predicate<StoreFile>() {
-              @Override
-              public boolean apply(StoreFile sf) {
-                return sf.getReader() != null;
-              }
-          }), new Function<StoreFile, String>() {
-            @Override
-            public String apply(StoreFile sf) {
-              return StringUtils.humanReadableInt(
-                (sf.getReader() == null) ? 0 : sf.getReader().length());
-            }
-          }));
-
-    return "regionName=" + regionName + ", storeName=" + storeName +
-      ", fileCount=" + this.getFiles().size() +
-      ", fileSize=" + StringUtils.humanReadableInt(totalSize) +
-        ((fsList.isEmpty()) ? "" : " (" + fsList + ")") +
-      ", priority=" + priority + ", time=" + timeInNanos;
+    String fsList = filesToCompact.stream().filter(f -> f.getReader() != null)
+        .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), 
"", 1))
+        .collect(Collectors.joining(", "));
+
+    return "regionName=" + regionName + ", storeName=" + storeName + ", 
fileCount=" +
+        this.getFiles().size() + ", fileSize=" +
+        TraditionalBinaryPrefix.long2String(totalSize, "", 1) +
+        ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + 
priority + ", time=" +
+        timeInNanos;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 0ba500a..463ed86 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
-import com.google.common.io.Closeables;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -60,6 +58,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
+import com.google.common.io.Closeables;
+
 /**
  * A compactor is a compaction algorithm associated a given policy. Base class 
also contains
  * reusable parts for implementing compactors (what is common and what isn't 
is evolving).

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
index 6413ee6..de461e9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
@@ -18,17 +18,12 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.collect.PeekingIterator;
-import com.google.common.math.LongMath;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.OptionalLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,6 +39,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.PeekingIterator;
+import com.google.common.math.LongMath;
+
 /**
  * HBASE-15181 This is a simple implementation of date-based tiered compaction 
similar to
  * Cassandra's for the following benefits:
@@ -135,24 +136,24 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
     boolean[] filesInWindow = new boolean[boundaries.size()];
 
     for (StoreFile file: filesToCompact) {
-      Long minTimestamp = file.getMinimumTimestamp();
-      long oldest = (minTimestamp == null) ? Long.MIN_VALUE : now - 
minTimestamp.longValue();
+      OptionalLong minTimestamp = file.getMinimumTimestamp();
+      long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() 
: Long.MIN_VALUE;
       if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) {
         LOG.debug("Major compaction triggered on store " + this
           + "; for TTL maintenance");
         return true;
       }
-      if (!file.isMajorCompaction() || file.isBulkLoadResult()) {
+      if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
         LOG.debug("Major compaction triggered on store " + this
           + ", because there are new files and time since last major 
compaction "
           + (now - lowTimestamp) + "ms");
         return true;
       }
 
-      int lowerWindowIndex = Collections.binarySearch(boundaries,
-        minTimestamp == null ? (Long)Long.MAX_VALUE : minTimestamp);
-      int upperWindowIndex = Collections.binarySearch(boundaries,
-        file.getMaximumTimestamp() == null ? (Long)Long.MAX_VALUE : 
file.getMaximumTimestamp());
+      int lowerWindowIndex =
+          Collections.binarySearch(boundaries, 
minTimestamp.orElse(Long.MAX_VALUE));
+      int upperWindowIndex =
+          Collections.binarySearch(boundaries, 
file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
       // Handle boundary conditions and negative values of binarySearch
       lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 
2) : lowerWindowIndex;
       upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 
2) : upperWindowIndex;
@@ -220,8 +221,8 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
     for (StoreFile storeFile : candidateSelection) {
       // if there is out-of-order data,
       // we put them in the same window as the last file in increasing order
-      maxTimestampSeen = Math.max(maxTimestampSeen,
-        storeFile.getMaximumTimestamp() == null? Long.MIN_VALUE : 
storeFile.getMaximumTimestamp());
+      maxTimestampSeen =
+          Math.max(maxTimestampSeen, 
storeFile.getMaximumTimestamp().orElse(Long.MIN_VALUE));
       storefileMaxTimestampPairs.add(new Pair<>(storeFile, maxTimestampSeen));
     }
     Collections.reverse(storefileMaxTimestampPairs);
@@ -288,23 +289,18 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
   }
 
   /**
-   * Return a list of boundaries for multiple compaction output
-   *   in ascending order.
+   * Return a list of boundaries for multiple compaction output in ascending 
order.
    */
   private List<Long> getCompactBoundariesForMajor(Collection<StoreFile> 
filesToCompact, long now) {
-    long minTimestamp = Long.MAX_VALUE;
-    for (StoreFile file : filesToCompact) {
-      minTimestamp =
-        Math.min(minTimestamp,
-          file.getMinimumTimestamp() == null ? Long.MAX_VALUE : 
file.getMinimumTimestamp());
-    }
+    long minTimestamp =
+        filesToCompact.stream().mapToLong(f -> 
f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min()
+            .orElse(Long.MAX_VALUE);
 
     List<Long> boundaries = new ArrayList<>();
 
     // Add startMillis of all windows between now and min timestamp
-    for (CompactionWindow window = getIncomingWindow(now);
-        window.compareToTimestamp(minTimestamp) > 0;
-        window = window.nextEarlierWindow()) {
+    for (CompactionWindow window = getIncomingWindow(now); window
+        .compareToTimestamp(minTimestamp) > 0; window = 
window.nextEarlierWindow()) {
       boundaries.add(window.startMillis());
     }
     boundaries.add(Long.MIN_VALUE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
index b1203c5..0eb9433 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
@@ -28,7 +28,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 
@@ -47,7 +47,7 @@ public class DateTieredCompactor extends 
AbstractMultiOutputCompactor<DateTiered
   private boolean needEmptyFile(CompactionRequest request) {
     // if we are going to compact the last N files, then we need to emit an 
empty file to retain the
     // maxSeqId if we haven't written out anything.
-    return StoreFile.getMaxSequenceIdInList(request.getFiles()) == 
store.getMaxSequenceId();
+    return StoreUtils.getMaxSequenceIdInList(request.getFiles()) == 
store.getMaxSequenceId();
   }
 
   public List<Path> compact(final CompactionRequest request, final List<Long> 
lowerBoundaries,

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
index 2ee051b..28f669b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.OptionalLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -79,9 +80,9 @@ public class RatioBasedCompactionPolicy extends 
SortedCompactionPolicy {
       if (filesToCompact.size() == 1) {
         // Single file
         StoreFile sf = filesToCompact.iterator().next();
-        Long minTimestamp = sf.getMinimumTimestamp();
-        long oldest = (minTimestamp == null) ? Long.MIN_VALUE : now - 
minTimestamp.longValue();
-        if (sf.isMajorCompaction() && (cfTTL == Long.MAX_VALUE || oldest < 
cfTTL)) {
+        OptionalLong minTimestamp = sf.getMinimumTimestamp();
+        long oldest = minTimestamp.isPresent() ? now - 
minTimestamp.getAsLong() : Long.MIN_VALUE;
+        if (sf.isMajorCompactionResult() && (cfTTL == Long.MAX_VALUE || oldest 
< cfTTL)) {
           float blockLocalityIndex =
             sf.getHDFSBlockDistribution().getBlockLocalityIndex(
             RSRpcServices.getHostname(comConf.conf, false));

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
index 42b57a4..232e552 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
@@ -10,15 +10,11 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Collections2;
-import com.google.common.collect.Lists;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+import java.util.OptionalInt;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -29,6 +25,11 @@ import 
org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
 
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+
 /**
  * An abstract compaction policy that select files on seq id order.
  */
@@ -118,30 +119,31 @@ public abstract class SortedCompactionPolicy extends 
CompactionPolicy {
    * @param filesToCompact
    * @return When to run next major compaction
    */
-  public long getNextMajorCompactTime(final Collection<StoreFile> 
filesToCompact) {
+  public long getNextMajorCompactTime(Collection<StoreFile> filesToCompact) {
     // default = 24hrs
-    long ret = comConf.getMajorCompactionPeriod();
-    if (ret > 0) {
-      // default = 20% = +/- 4.8 hrs
-      double jitterPct = comConf.getMajorCompactionJitter();
-      if (jitterPct > 0) {
-        long jitter = Math.round(ret * jitterPct);
-        // deterministic jitter avoids a major compaction storm on restart
-        Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
-        if (seed != null) {
-          // Synchronized to ensure one user of random instance at a time.
-          double rnd = -1;
-          synchronized (this) {
-            this.random.setSeed(seed);
-            rnd = this.random.nextDouble();
-          }
-          ret += jitter - Math.round(2L * jitter * rnd);
-        } else {
-          ret = 0; // If seed is null, then no storefiles == no major 
compaction
-        }
+    long period = comConf.getMajorCompactionPeriod();
+    if (period <= 0) {
+      return period;
+    }
+    // default = 20% = +/- 4.8 hrs
+    double jitterPct = comConf.getMajorCompactionJitter();
+    if (jitterPct <= 0) {
+      return period;
+    }
+    // deterministic jitter avoids a major compaction storm on restart
+    OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
+    if (seed.isPresent()) {
+      // Synchronized to ensure one user of random instance at a time.
+      double rnd;
+      synchronized (this) {
+        this.random.setSeed(seed.getAsInt());
+        rnd = this.random.nextDouble();
       }
+      long jitter = Math.round(period * jitterPct);
+      return period + jitter - Math.round(2L * jitter * rnd);
+    } else {
+      return 0L;
     }
-    return ret;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index c5b24e9..7ba5312 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.hbase.snapshot;
 
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
-import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -45,15 +42,17 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
index 22d6fe1..48b98df 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
@@ -24,9 +24,9 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilter;
 import org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterBase;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
index 24b5051..c508b02 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
@@ -54,6 +53,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.Leases;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 274a76e..3533f8a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -22,15 +22,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.lang.reflect.Field;
-import java.util.Arrays;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -81,7 +80,6 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import 
org.apache.hadoop.hbase.mapreduce.TestImportTSVWithTTLs.TTLCheckingObserver;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -93,7 +91,6 @@ import 
org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
index 6647ffe..9b69411 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -61,7 +61,7 @@ public class TestMobFile extends TestCase {
     MobTestUtil.writeStoreFile(writer, caseName);
 
     MobFile mobFile =
-        new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, 
BloomType.NONE, true));
+        new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, 
BloomType.NONE, true));
     byte[] family = Bytes.toBytes(caseName);
     byte[] qualify = Bytes.toBytes(caseName);
 
@@ -113,7 +113,7 @@ public class TestMobFile extends TestCase {
     MobTestUtil.writeStoreFile(writer, getName());
 
     MobFile mobFile =
-        new MobFile(new StoreFile(fs, writer.getPath(), conf, cacheConf, 
BloomType.NONE, true));
+        new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, 
BloomType.NONE, true));
     assertNotNull(mobFile.getScanner());
     assertTrue(mobFile.getScanner() instanceof StoreFileScanner);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
index 47a1c24..398d14d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestMobCompactor.java
@@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.mob.MobFileName;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -814,7 +815,7 @@ public class TestMobCompactor {
       Assert.assertTrue(hasFiles);
       Path path = files[0].getPath();
       CacheConfig cacheConf = new CacheConfig(conf);
-      StoreFile sf = new StoreFile(TEST_UTIL.getTestFileSystem(), path, conf, 
cacheConf,
+      StoreFile sf = new HStoreFile(TEST_UTIL.getTestFileSystem(), path, conf, 
cacheConf,
         BloomType.NONE, true);
       sf.initReader();
       HFile.Reader reader = sf.getReader().getHFileReader();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
index f65e224..f93ce98 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java
@@ -18,9 +18,20 @@
  */
 package org.apache.hadoop.hbase.mob.compactions;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.text.ParseException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.UUID;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.RejectedExecutionHandler;
@@ -34,11 +45,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
-import org.apache.hadoop.hbase.regionserver.*;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
@@ -49,13 +65,23 @@ import org.apache.hadoop.hbase.mob.MobUtils;
 import 
org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest.CompactionType;
 import 
org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionDelPartition;
 import 
org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactionRequest.CompactionPartition;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
+import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
+import org.apache.hadoop.hbase.regionserver.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
+import org.apache.hadoop.hbase.regionserver.StoreScanner;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.junit.AfterClass;
 import org.junit.Assert;
-import static org.junit.Assert.assertTrue;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -516,7 +542,7 @@ public class TestPartitionedMobCompactor {
         for (CompactionDelPartition delPartition : request.getDelPartitions()) 
{
           for (Path newDelPath : delPartition.listDelFiles()) {
             StoreFile sf =
-                new StoreFile(fs, newDelPath, conf, this.cacheConfig, 
BloomType.NONE, true);
+                new HStoreFile(fs, newDelPath, conf, this.cacheConfig, 
BloomType.NONE, true);
             // pre-create reader of a del file to avoid race condition when 
opening the reader in
             // each partition.
             sf.initReader();
@@ -851,7 +877,7 @@ public class TestPartitionedMobCompactor {
     List<StoreFile> sfs = new ArrayList<>();
     int size = 0;
     for (Path path : paths) {
-      StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
+      StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
       sfs.add(sf);
     }
     List<KeyValueScanner> scanners = new 
ArrayList<>(StoreFileScanner.getScannersForStoreFiles(sfs,

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
index bc7f32a..7bc5002 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java
@@ -25,6 +25,8 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import com.google.common.collect.Sets;
+
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
index 7229c40..efbac63 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/AbstractTestDateTieredCompactionPolicy.java
@@ -17,19 +17,20 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
 import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy;
 import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-import static org.junit.Assert.*;
 
 public class AbstractTestDateTieredCompactionPolicy extends 
TestCompactionPolicy {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
index e36d16f..2635e2d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java
@@ -592,7 +592,7 @@ public class DataBlockEncodingTool {
     Path path = new Path(hfilePath);
     CacheConfig cacheConf = new CacheConfig(conf);
     FileSystem fs = FileSystem.get(conf);
-    StoreFile hsf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
+    StoreFile hsf = new HStoreFile(fs, path, conf, cacheConf, BloomType.NONE, 
true);
     hsf.initReader();
     StoreFileReader reader = hsf.getReader();
     reader.loadFileInfo();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
index f47fc4e..bde0934 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java
@@ -59,7 +59,7 @@ public class EncodedSeekPerformanceTest {
     List<Cell> allKeyValues = new ArrayList<>();
 
     // read all of the key values
-    StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
+    StoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
         path, configuration, cacheConf, BloomType.NONE, true);
     storeFile.initReader();
     StoreFileReader reader = storeFile.getReader();
@@ -89,7 +89,7 @@ public class EncodedSeekPerformanceTest {
   private void runTest(Path path, DataBlockEncoding blockEncoding,
       List<Cell> seeks) throws IOException {
     // read all of the key values
-    StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(),
+    StoreFile storeFile = new HStoreFile(testingUtility.getTestFileSystem(),
       path, configuration, cacheConf, BloomType.NONE, true);
     storeFile.initReader();
     long totalSize = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
index 91b85d3..6fa951e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java
@@ -19,11 +19,14 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.Map;
+import java.util.OptionalLong;
 import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -31,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /** A mock used so our tests don't deal with actual StoreFiles */
-public class MockStoreFile extends StoreFile {
+public class MockStoreFile extends HStoreFile {
   long length = 0;
   boolean isRef = false;
   long ageInDisk;
@@ -65,17 +68,12 @@ public class MockStoreFile extends StoreFile {
   }
 
   @Override
-  byte[] getFileSplitPoint(CellComparator comparator) throws IOException {
-    return this.splitPoint;
-  }
-
-  @Override
   public long getMaxSequenceId() {
     return sequenceid;
   }
 
   @Override
-  public boolean isMajorCompaction() {
+  public boolean isMajorCompactionResult() {
     return isMajor;
   }
 
@@ -110,14 +108,14 @@ public class MockStoreFile extends StoreFile {
     this.entryCount = entryCount;
   }
 
-  public Long getMinimumTimestamp() {
-    return (timeRangeTracker == null) ?
-      null : timeRangeTracker.getMin();
+  public OptionalLong getMinimumTimestamp() {
+    return timeRangeTracker == null ? OptionalLong.empty()
+        : OptionalLong.of(timeRangeTracker.getMin());
   }
 
-  public Long getMaximumTimestamp() {
-    return (timeRangeTracker == null) ?
-      null : timeRangeTracker.getMax();
+  public OptionalLong getMaximumTimestamp() {
+    return timeRangeTracker == null ? OptionalLong.empty()
+        : OptionalLong.of(timeRangeTracker.getMax());
   }
 
   @Override
@@ -184,6 +182,39 @@ public class MockStoreFile extends StoreFile {
       public void close(boolean evictOnClose) throws IOException {
         // no-op
       }
+
+      @Override
+      public Cell getLastKey() {
+        if (splitPoint != null) {
+          return CellUtil.createCell(Arrays.copyOf(splitPoint, 
splitPoint.length + 1));
+        } else {
+          return null;
+        }
+      }
+
+      @Override
+      public Cell midkey() throws IOException {
+        if (splitPoint != null) {
+          return CellUtil.createCell(splitPoint);
+        } else {
+          return null;
+        }
+      }
+
+      @Override
+      public Cell getFirstKey() {
+        if (splitPoint != null) {
+          return CellUtil.createCell(Arrays.copyOf(splitPoint, 
splitPoint.length - 1));
+        } else {
+          return null;
+        }
+      }
     };
   }
+
+  @Override
+  public OptionalLong getBulkLoadTimestamp() {
+    // we always return false for isBulkLoadResult so we do not have a bulk 
load timestamp
+    return OptionalLong.empty();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index efe0605..8fad157 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -218,7 +218,7 @@ public class TestCacheOnWriteInSchema {
   private void readStoreFile(Path path) throws IOException {
     CacheConfig cacheConf = store.getCacheConfig();
     BlockCache cache = cacheConf.getBlockCache();
-    StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, 
true);
+    StoreFile sf = new HStoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, 
true);
     sf.initReader();
     HFile.Reader reader = sf.getReader().getHFileReader();
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
index e320368..d68f07e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
@@ -23,6 +23,14 @@ import static org.junit.Assert.assertFalse;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -45,14 +53,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
 /**
  * Tests a race condition between archiving of compacted files in 
CompactedHFilesDischarger chore
  * and HRegion.close();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
index e7fcf18..7fb7f21 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
@@ -30,6 +30,12 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import com.google.common.collect.ImmutableList;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -53,11 +59,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
 /**
  * Tests that archiving compacted files behaves correctly when encountering 
exceptions.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index 57a5f59..384608c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -43,9 +43,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -54,9 +51,12 @@ import 
org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.io.hfile.RandomKeyValueUtil;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.BloomFilterUtil;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -200,7 +200,7 @@ public class TestCompoundBloomFilter {
 
   private void readStoreFile(int t, BloomType bt, List<KeyValue> kvs,
       Path sfPath) throws IOException {
-    StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt, true);
+    StoreFile sf = new HStoreFile(fs, sfPath, conf, cacheConf, bt, true);
     sf.initReader();
     StoreFileReader r = sf.getReader();
     final boolean pread = true; // does not really matter

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
index 4f6d4c2..9acf244 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.security.Key;
@@ -36,8 +37,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Put;
@@ -49,8 +48,9 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Rule;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
index cd1f1e7..3837e94 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 import java.security.Key;
 import java.util.ArrayList;
@@ -29,8 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
@@ -38,8 +37,9 @@ import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
index e231b60..daddb5c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
@@ -91,10 +91,10 @@ public class TestFSErrorsExposed {
             .withOutputDir(hfilePath)
             .withFileContext(meta)
             .build();
-    TestStoreFile.writeStoreFile(
+    TestHStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
-    StoreFile sf = new StoreFile(fs, writer.getPath(), 
util.getConfiguration(), cacheConf,
+    StoreFile sf = new HStoreFile(fs, writer.getPath(), 
util.getConfiguration(), cacheConf,
         BloomType.NONE, true);
     sf.initReader();
     StoreFileReader reader = sf.getReader();
@@ -141,10 +141,10 @@ public class TestFSErrorsExposed {
             .withOutputDir(hfilePath)
             .withFileContext(meta)
             .build();
-    TestStoreFile.writeStoreFile(
+    TestHStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
-    StoreFile sf = new StoreFile(fs, writer.getPath(), 
util.getConfiguration(), cacheConf,
+    StoreFile sf = new HStoreFile(fs, writer.getPath(), 
util.getConfiguration(), cacheConf,
         BloomType.NONE, true);
 
     List<StoreFileScanner> scanners = 
StoreFileScanner.getScannersForStoreFiles(

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index 0f24a24..2eb88f4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -18,11 +18,26 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.IOException;
+import java.security.Key;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.concurrent.ConcurrentSkipListSet;
+
+import javax.crypto.spec.SecretKeySpec;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
@@ -35,7 +50,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
@@ -61,19 +75,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.mockito.Mockito;
 
-import javax.crypto.spec.SecretKeySpec;
-import java.io.IOException;
-import java.security.Key;
-import java.security.SecureRandom;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.concurrent.ConcurrentSkipListSet;
-
 @Category(MediumTests.class)
 public class TestHMobStore {
   public static final Log LOG = LogFactory.getLog(TestHMobStore.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index ed0b9dd..4f46c88 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -116,7 +116,6 @@ import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.FilterAllFilter;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.NullComparator;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 6eed7df..5467c3f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -35,6 +35,8 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
+import com.google.common.collect.Lists;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -64,6 +66,10 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
+import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
+import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
+import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
@@ -75,10 +81,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescript
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
-import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
-import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
-import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -98,8 +100,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import com.google.common.collect.Lists;
-
 /**
  * Tests of HRegion methods for replaying flush, compaction, region open, etc 
events for secondary
  * region replicas
@@ -1350,7 +1350,7 @@ public class TestHRegionReplayEvents {
   }
 
   @Test
-  public void testRefreshStoreFiles() throws IOException {
+  public void testRefresStoreFiles() throws IOException {
     assertEquals(0, primaryRegion.getStoreFileList(families).size());
     assertEquals(0, secondaryRegion.getStoreFileList(families).size());
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ee0f148c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 0ac5153..61fe2cc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
 
+import com.google.common.collect.Lists;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
@@ -63,11 +65,11 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -81,8 +83,6 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import com.google.common.collect.Lists;
-
 /**
  * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of
  * the region server's bullkLoad functionality.

Reply via email to