http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index d558307..dec28f3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -53,11 +53,11 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
 import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -65,9 +65,9 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
@@ -184,8 +184,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region = (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName)
+          .getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -274,8 +275,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -332,8 +334,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -393,7 +396,8 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
       BlockCache cache = setCacheProperties(region);
       Put put = new Put(ROW);
       put.addColumn(FAMILY, QUALIFIER, data);
@@ -485,7 +489,8 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
       BlockCache cache = setCacheProperties(region);
 
       Put put = new Put(ROW);
@@ -568,8 +573,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setEvictOnClose(true);
       BlockCache cache = cacheConf.getBlockCache();
@@ -626,8 +632,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -708,7 +715,8 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
       BlockCache cache = setCacheProperties(region);
 
       Put put = new Put(ROW);
@@ -777,11 +785,11 @@ public class TestBlockEvictionFromClient {
     }
   }
 
-  private BlockCache setCacheProperties(Region region) {
-    Iterator<? extends Store> strItr = region.getStores().iterator();
+  private BlockCache setCacheProperties(HRegion region) {
+    Iterator<HStore> strItr = region.getStores().iterator();
     BlockCache cache = null;
     while (strItr.hasNext()) {
-      Store store = strItr.next();
+      HStore store = strItr.next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -807,8 +815,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -872,8 +881,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -989,8 +999,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -1118,8 +1129,9 @@ public class TestBlockEvictionFromClient {
       // get the block cache and region
       RegionLocator locator = 
TEST_UTIL.getConnection().getRegionLocator(tableName);
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region =
+          (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -1446,14 +1458,14 @@ public class TestBlockEvictionFromClient {
     }
   }
 
-  private void waitForStoreFileCount(Store store, int count, int timeout)
+  private void waitForStoreFileCount(HStore store, int count, int timeout)
       throws InterruptedException {
     long start = System.currentTimeMillis();
     while (start + timeout > System.currentTimeMillis() && 
store.getStorefilesCount() != count) {
       Thread.sleep(100);
     }
-    System.out.println("start=" + start + ", now=" + 
System.currentTimeMillis() + ", cur="
-        + store.getStorefilesCount());
+    System.out.println("start=" + start + ", now=" + 
System.currentTimeMillis() + ", cur=" +
+        store.getStorefilesCount());
     assertEquals(count, store.getStorefilesCount());
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index a938db6..400e109 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -199,19 +199,19 @@ public class TestFromClientSide {
 
       try {
         Append append = new Append(ROW);
-        append.addColumn(TEST_UTIL.fam1, QUALIFIER, VALUE);
+        append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE);
         Result result = table.append(append);
 
         // Verify expected result
         Cell[] cells = result.rawCells();
         assertEquals(1, cells.length);
-        assertKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, VALUE);
+        assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
 
         // Verify expected result again
         Result readResult = table.get(new Get(ROW));
         cells = readResult.rawCells();
         assertEquals(1, cells.length);
-        assertKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, VALUE);
+        assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
       } finally {
         table.close();
         connection.close();
@@ -568,7 +568,7 @@ public class TestFromClientSide {
       }
 
       @Override
-      protected List<KeyValueScanner> selectScannersFrom(Store store,
+      protected List<KeyValueScanner> selectScannersFrom(HStore store,
           List<? extends KeyValueScanner> allScanners) {
         List<KeyValueScanner> scanners = super.selectScannersFrom(store, 
allScanners);
         List<KeyValueScanner> newScanners = new ArrayList<>(scanners.size());
@@ -596,7 +596,8 @@ public class TestFromClientSide {
     public KeyValueScanner 
preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
         Store store, Scan scan, NavigableSet<byte[]> targetCols, 
KeyValueScanner s,
         final long readPt) throws IOException {
-      return new MyStoreScanner((HStore) store, store.getScanInfo(), scan, 
targetCols, readPt);
+      HStore hs = (HStore) store;
+      return new MyStoreScanner(hs, hs.getScanInfo(), scan, targetCols, 
readPt);
     }
   }
 
@@ -2043,7 +2044,6 @@ public class TestFromClientSide {
   public void testDeleteWithFailed() throws Exception {
     final TableName tableName = TableName.valueOf(name.getMethodName());
 
-    byte [][] ROWS = makeNAscii(ROW, 6);
     byte [][] FAMILIES = makeNAscii(FAMILY, 3);
     byte [][] VALUES = makeN(VALUE, 5);
     long [] ts = {1000, 2000, 3000, 4000, 5000};
@@ -2061,7 +2061,7 @@ public class TestFromClientSide {
 
     Get get = new Get(ROW);
     get.addFamily(FAMILIES[0]);
-    get.setMaxVersions(Integer.MAX_VALUE);
+    get.readAllVersions();
     Result result = ht.get(get);
     assertTrue(Bytes.equals(result.getValue(FAMILIES[0], QUALIFIER), 
VALUES[0]));
   }
@@ -5301,8 +5301,9 @@ public class TestFromClientSide {
       // get the block cache and region
       String regionName = 
locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
 
-      Region region = 
TEST_UTIL.getRSForFirstRegionInTable(tableName).getRegion(regionName);
-      Store store = region.getStores().iterator().next();
+      HRegion region = (HRegion) 
TEST_UTIL.getRSForFirstRegionInTable(tableName)
+          .getRegion(regionName);
+      HStore store = region.getStores().iterator().next();
       CacheConfig cacheConf = store.getCacheConfig();
       cacheConf.setCacheDataOnWrite(true);
       cacheConf.setEvictOnClose(true);
@@ -5394,15 +5395,14 @@ public class TestFromClientSide {
     }
   }
 
-  private void waitForStoreFileCount(Store store, int count, int timeout)
-  throws InterruptedException {
+  private void waitForStoreFileCount(HStore store, int count, int timeout)
+      throws InterruptedException {
     long start = System.currentTimeMillis();
-    while (start + timeout > System.currentTimeMillis() &&
-        store.getStorefilesCount() != count) {
+    while (start + timeout > System.currentTimeMillis() && 
store.getStorefilesCount() != count) {
       Thread.sleep(100);
     }
-    System.out.println("start=" + start + ", now=" +
-        System.currentTimeMillis() + ", cur=" + store.getStorefilesCount());
+    System.out.println("start=" + start + ", now=" + 
System.currentTimeMillis() + ", cur=" +
+        store.getStorefilesCount());
     assertEquals(count, store.getStorefilesCount());
   }
 
@@ -5462,8 +5462,8 @@ public class TestFromClientSide {
     // Test Initialization.
     byte [] startKey = Bytes.toBytes("ddc");
     byte [] endKey = Bytes.toBytes("mmm");
-    final TableName tableName = TableName.valueOf(name.getMethodName());
-    Table t = TEST_UTIL.createMultiRegionTable(tableName, new byte[][] { 
FAMILY }, 10);
+    TableName tableName = TableName.valueOf(name.getMethodName());
+    TEST_UTIL.createMultiRegionTable(tableName, new byte[][] { FAMILY }, 10);
 
     int numOfRegions = -1;
     try (RegionLocator r = 
TEST_UTIL.getConnection().getRegionLocator(tableName)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
index 4c8c4ce..0446f61 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java
@@ -125,7 +125,8 @@ public class TestRegionObserverScannerOpenHook {
         Store store, Scan scan, NavigableSet<byte[]> targetCols, 
KeyValueScanner s, long readPt)
         throws IOException {
       scan.setFilter(new NoDataFilter());
-      return new StoreScanner((HStore) store, store.getScanInfo(), scan, 
targetCols, readPt);
+      HStore hs = (HStore) store;
+      return new StoreScanner(hs, hs.getScanInfo(), scan, targetCols, readPt);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index ece658b..e94859a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -25,14 +25,15 @@ import java.util.Collection;
 
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.testclassification.IOTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.testclassification.IOTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.Before;
 import org.junit.Test;
@@ -109,7 +110,7 @@ public class TestForceCacheImportantBlocks {
       setBloomFilterType(BLOOM_TYPE);
     hcd.setBlocksize(BLOCK_SIZE);
     hcd.setBlockCacheEnabled(cfCacheEnabled);
-    Region region = TEST_UTIL.createTestRegion(TABLE, hcd);
+    HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd);
     BlockCache cache = 
region.getStore(hcd.getName()).getCacheConfig().getBlockCache();
     CacheStats stats = cache.getStats();
     writeTestData(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
index fae7247..6c7d686 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
@@ -30,21 +30,21 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ByteBufferKeyValue;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.ByteBufferKeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -307,7 +307,7 @@ public class TestScannerFromBucketCache {
     put.add(kv21);
     region.put(put);
     region.flush(true);
-    Store store = region.getStore(fam1);
+    HStore store = region.getStore(fam1);
     while (store.getStorefilesCount() <= 0) {
       try {
         Thread.sleep(20);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
index be28d17..da71c46 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
@@ -69,7 +69,7 @@ public class DelegatingKeyValueScanner implements 
KeyValueScanner {
   }
 
   @Override
-  public boolean shouldUseScanner(Scan scan, Store store, long 
oldestUnexpiredTS) {
+  public boolean shouldUseScanner(Scan scan, HStore store, long 
oldestUnexpiredTS) {
     return delegate.shouldUseScanner(scan, store, oldestUnexpiredTS);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
index 2046816..bde28a2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
@@ -54,10 +54,11 @@ public class NoOpScanPolicyObserver implements 
RegionCoprocessor, RegionObserver
   public InternalScanner preFlushScannerOpen(final 
ObserverContext<RegionCoprocessorEnvironment> c,
       Store store, List<KeyValueScanner> scanners, InternalScanner s, long 
readPoint)
       throws IOException {
-    ScanInfo oldSI = store.getScanInfo();
+    HStore hs = (HStore) store;
+    ScanInfo oldSI = hs.getScanInfo();
     ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), 
store.getColumnFamilyDescriptor(),
         oldSI.getTtl(), oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
-    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), 
scanners,
+    return new StoreScanner(hs, scanInfo, OptionalInt.empty(), scanners,
         ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), 
HConstants.OLDEST_TIMESTAMP);
   }
 
@@ -69,11 +70,12 @@ public class NoOpScanPolicyObserver implements 
RegionCoprocessor, RegionObserver
       final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
       List<? extends KeyValueScanner> scanners, ScanType scanType, long 
earliestPutTs,
       InternalScanner s, CompactionLifeCycleTracker tracker, long readPoint) 
throws IOException {
+    HStore hs = (HStore) store;
     // this demonstrates how to override the scanners default behavior
-    ScanInfo oldSI = store.getScanInfo();
+    ScanInfo oldSI = hs.getScanInfo();
     ScanInfo scanInfo = new ScanInfo(oldSI.getConfiguration(), 
store.getColumnFamilyDescriptor(),
         oldSI.getTtl(), oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
-    return new StoreScanner((HStore) store, scanInfo, OptionalInt.empty(), 
scanners, scanType,
+    return new StoreScanner(hs, scanInfo, OptionalInt.empty(), scanners, 
scanType,
         store.getSmallestReadPoint(), earliestPutTs);
   }
 
@@ -81,11 +83,12 @@ public class NoOpScanPolicyObserver implements 
RegionCoprocessor, RegionObserver
   public KeyValueScanner 
preStoreScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
       Store store, Scan scan, NavigableSet<byte[]> targetCols, KeyValueScanner 
s, long readPoint)
       throws IOException {
+    HStore hs = (HStore) store;
     Region r = c.getEnvironment().getRegion();
     return scan.isReversed()
-        ? new ReversedStoreScanner((HStore) store, store.getScanInfo(), scan, 
targetCols,
+        ? new ReversedStoreScanner(hs, hs.getScanInfo(), scan, targetCols,
             r.getReadPoint(scan.getIsolationLevel()))
-        : new StoreScanner((HStore) store, store.getScanInfo(), scan, 
targetCols,
+        : new StoreScanner(hs, hs.getScanInfo(), scan, targetCols,
             r.getReadPoint(scan.getIsolationLevel()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
index 2427cad..6a7e98b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
@@ -17,6 +17,21 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -60,21 +75,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 /**
  * Testing of HRegion.incrementColumnValue, HRegion.increment,
  * and HRegion.append
@@ -84,7 +84,7 @@ public class TestAtomicOperation {
   private static final Log LOG = LogFactory.getLog(TestAtomicOperation.class);
   @Rule public TestName name = new TestName();
 
-  Region region = null;
+  HRegion region = null;
   private HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
 
   // Test names

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index 2d08e50..feb456e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -208,7 +208,7 @@ public class TestCacheOnWriteInSchema {
   public void testCacheOnWriteInSchema() throws IOException {
     // Write some random data into the store
     StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE,
-        HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false);
+        HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false);
     writeStoreFile(writer);
     writer.close();
     // Verify the block types of interest were cached on write

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
index 356054e..349815f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY_BYTES;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -182,7 +183,7 @@ public class TestCompaction {
       spyR.compactStores();
 
       // ensure that the compaction stopped, all old files are intact,
-      Store s = r.stores.get(COLUMN_FAMILY);
+      HStore s = r.getStore(COLUMN_FAMILY);
       assertEquals(compactionThreshold, s.getStorefilesCount());
       assertTrue(s.getStorefilesSize() > 15*1000);
       // and no new store files persisted past compactStores()
@@ -210,8 +211,7 @@ public class TestCompaction {
       // Multiple versions allowed for an entry, so the delete isn't enough
       // Lower TTL and expire to ensure that all our entries have been wiped
       final int ttl = 1000;
-      for (Store hstore: this.r.stores.values()) {
-        HStore store = (HStore)hstore;
+      for (HStore store: this.r.stores.values()) {
         ScanInfo old = store.getScanInfo();
         ScanInfo si = new ScanInfo(old.getConfiguration(), old.getFamily(), 
old.getMinVersions(),
             old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 
HConstants.DEFAULT_BLOCKSIZE, 0,
@@ -307,7 +307,7 @@ public class TestCompaction {
 
     CountDownLatch latch = new CountDownLatch(1);
     Tracker tracker = new Tracker(latch);
-    thread.requestCompaction(r, store, "test custom comapction", 
Store.PRIORITY_USER, tracker,
+    thread.requestCompaction(r, store, "test custom comapction", 
PRIORITY_USER, tracker,
       null);
     // wait for the latch to complete.
     latch.await();
@@ -340,7 +340,7 @@ public class TestCompaction {
 
     CountDownLatch latch = new CountDownLatch(1);
     Tracker tracker = new Tracker(latch);
-    thread.requestCompaction(mockRegion, store, "test custom comapction", 
Store.PRIORITY_USER,
+    thread.requestCompaction(mockRegion, store, "test custom comapction", 
PRIORITY_USER,
       tracker, null);
     // wait for the latch to complete.
     latch.await(120, TimeUnit.SECONDS);
@@ -380,7 +380,7 @@ public class TestCompaction {
       createStoreFile(r, store.getColumnFamilyName());
       createStoreFile(r, store.getColumnFamilyName());
       createStoreFile(r, store.getColumnFamilyName());
-      thread.requestCompaction(r, store, "test mulitple custom comapctions", 
Store.PRIORITY_USER,
+      thread.requestCompaction(r, store, "test mulitple custom comapctions", 
PRIORITY_USER,
         tracker, null);
     }
     // wait for the latch to complete.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java
index a3184b4..5735e88 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionFileNotFound.java
@@ -180,7 +180,7 @@ public class TestCompactionFileNotFound {
       // Refresh store files post compaction, this should not open already 
compacted files
       hr1.refreshStoreFiles(true);
       // Archive the store files and try another compaction to see if all is 
good
-      for (Store store : hr1.getStores()) {
+      for (HStore store : hr1.getStores()) {
         store.closeAndArchiveCompactedFiles();
       }
       try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index 9c33d28..d8d38fa 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -40,9 +40,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -288,10 +288,9 @@ public class TestCompoundBloomFilter {
       byte[] qualifier) {
     Scan scan = new Scan().withStartRow(row).withStopRow(row, true);
     scan.addColumn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME), 
qualifier);
-    Store store = mock(Store.class);
-    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
-    
when(hcd.getName()).thenReturn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME));
-    when(store.getColumnFamilyDescriptor()).thenReturn(hcd);
+    HStore store = mock(HStore.class);
+    when(store.getColumnFamilyDescriptor())
+        
.thenReturn(ColumnFamilyDescriptorBuilder.of(RandomKeyValueUtil.COLUMN_FAMILY_NAME));
     return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
index fc0659f..066e686 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER;
+
 import java.io.IOException;
 import java.security.Key;
 import java.security.SecureRandom;
@@ -539,7 +541,7 @@ public class TestHMobStore {
     // Trigger major compaction
     this.store.triggerMajorCompaction();
     Optional<CompactionContext> requestCompaction =
-        this.store.requestCompaction(Store.PRIORITY_USER, 
CompactionLifeCycleTracker.DUMMY, null);
+        this.store.requestCompaction(PRIORITY_USER, 
CompactionLifeCycleTracker.DUMMY, null);
     this.store.compact(requestCompaction.get(), 
NoLimitThroughputController.INSTANCE, null);
     Assert.assertEquals(1, this.store.getStorefiles().size());
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 0b0d651..9fcdf56 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -284,7 +284,7 @@ public class TestHRegion {
   @Test
   public void testCloseCarryingSnapshot() throws IOException {
     HRegion region = initHRegion(tableName, method, CONF, COLUMN_FAMILY_BYTES);
-    Store store = region.getStore(COLUMN_FAMILY_BYTES);
+    HStore store = region.getStore(COLUMN_FAMILY_BYTES);
     // Get some random bytes.
     byte [] value = Bytes.toBytes(method);
     // Make a random put against our cf.
@@ -333,7 +333,7 @@ public class TestHRegion {
     HRegion region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, faultyLog,
         COLUMN_FAMILY_BYTES);
 
-    Store store = region.getStore(COLUMN_FAMILY_BYTES);
+    HStore store = region.getStore(COLUMN_FAMILY_BYTES);
     // Get some random bytes.
     byte [] value = Bytes.toBytes(method);
     faultyLog.setStoreFlushCtx(store.createFlushContext(12345));
@@ -350,7 +350,7 @@ public class TestHRegion {
     } finally {
       assertTrue("The regionserver should have thrown an exception", threwIOE);
     }
-    long sz = store.getSizeToFlush().getDataSize();
+    long sz = store.getFlushableSize().getDataSize();
     assertTrue("flushable size should be zero, but it is " + sz, sz == 0);
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
@@ -382,7 +382,7 @@ public class TestHRegion {
     FSHLog hLog = new FSHLog(fs, rootDir, 
"testMemstoreSizeWithFlushCanceling", CONF);
     HRegion region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, hLog,
         COLUMN_FAMILY_BYTES);
-    Store store = region.getStore(COLUMN_FAMILY_BYTES);
+    HStore store = region.getStore(COLUMN_FAMILY_BYTES);
     assertEquals(0, region.getMemstoreSize());
 
     // Put some value and make sure flush could be completed normally
@@ -394,7 +394,7 @@ public class TestHRegion {
     assertTrue(onePutSize > 0);
     region.flush(true);
     assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
-    assertEquals("flushable size should be zero", 0, 
store.getSizeToFlush().getDataSize());
+    assertEquals("flushable size should be zero", 0, 
store.getFlushableSize().getDataSize());
 
     // save normalCPHost and replaced by mockedCPHost, which will cancel flush 
requests
     RegionCoprocessorHost normalCPHost = region.getCoprocessorHost();
@@ -406,13 +406,13 @@ public class TestHRegion {
     region.flush(true);
     assertEquals("memstoreSize should NOT be zero", onePutSize, 
region.getMemstoreSize());
     assertEquals("flushable size should NOT be zero", onePutSize,
-        store.getSizeToFlush().getDataSize());
+        store.getFlushableSize().getDataSize());
 
     // set normalCPHost and flush again, the snapshot will be flushed
     region.setCoprocessorHost(normalCPHost);
     region.flush(true);
     assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
-    assertEquals("flushable size should be zero", 0, 
store.getSizeToFlush().getDataSize());
+    assertEquals("flushable size should be zero", 0, 
store.getFlushableSize().getDataSize());
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
@@ -424,7 +424,7 @@ public class TestHRegion {
     FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF);
     HRegion region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, hLog,
         COLUMN_FAMILY_BYTES);
-    Store store = region.getStore(COLUMN_FAMILY_BYTES);
+    HStore store = region.getStore(COLUMN_FAMILY_BYTES);
     assertEquals(0, region.getMemstoreSize());
 
     // Put one value
@@ -450,7 +450,7 @@ public class TestHRegion {
     long expectedSize = onePutSize * 2;
     assertEquals("memstoreSize should be incremented", expectedSize, 
region.getMemstoreSize());
     assertEquals("flushable size should be incremented", expectedSize,
-        store.getSizeToFlush().getDataSize());
+        store.getFlushableSize().getDataSize());
 
     region.setCoprocessorHost(null);
     HBaseTestingUtility.closeRegionAndWAL(region);
@@ -565,7 +565,7 @@ public class TestHRegion {
           p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, 
(byte[])null));
           region.put(p1);
           // Manufacture an outstanding snapshot -- fake a failed flush by 
doing prepare step only.
-          Store store = region.getStore(COLUMN_FAMILY_BYTES);
+          HStore store = region.getStore(COLUMN_FAMILY_BYTES);
           StoreFlushContext storeFlushCtx = store.createFlushContext(12345);
           storeFlushCtx.prepare();
           // Now add two entries to the foreground memstore.
@@ -699,7 +699,7 @@ public class TestHRegion {
       }
       MonitoredTask status = TaskMonitor.get().createStatus(method);
       Map<byte[], Long> maxSeqIdInStores = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
+      for (HStore store : region.getStores()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId 
- 1);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir, 
maxSeqIdInStores, null, status);
@@ -751,7 +751,7 @@ public class TestHRegion {
       long recoverSeqId = 1030;
       MonitoredTask status = TaskMonitor.get().createStatus(method);
       Map<byte[], Long> maxSeqIdInStores = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
+      for (HStore store : region.getStores()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), 
recoverSeqId - 1);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir, 
maxSeqIdInStores, null, status);
@@ -796,7 +796,7 @@ public class TestHRegion {
       dos.close();
 
       Map<byte[], Long> maxSeqIdInStores = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
+      for (HStore store : region.getStores()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir, 
maxSeqIdInStores, null, null);
@@ -854,7 +854,7 @@ public class TestHRegion {
       long recoverSeqId = 1030;
       Map<byte[], Long> maxSeqIdInStores = new 
TreeMap<>(Bytes.BYTES_COMPARATOR);
       MonitoredTask status = TaskMonitor.get().createStatus(method);
-      for (Store store : region.getStores()) {
+      for (HStore store : region.getStores()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), 
recoverSeqId - 1);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir, 
maxSeqIdInStores, null, status);
@@ -3713,7 +3713,7 @@ public class TestHRegion {
 
         if (i != 0 && i % compactInterval == 0) {
           region.compact(true);
-          for (Store store : region.getStores()) {
+          for (HStore store : region.getStores()) {
             store.closeAndArchiveCompactedFiles();
           }
         }
@@ -3893,7 +3893,7 @@ public class TestHRegion {
           // Compact regularly to avoid creating too many files and exceeding
           // the ulimit.
           region.compact(false);
-          for (Store store : region.getStores()) {
+          for (HStore store : region.getStores()) {
             store.closeAndArchiveCompactedFiles();
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 243468f..1dc943f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -35,8 +35,6 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -53,34 +51,23 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
 import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -89,6 +76,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
@@ -100,6 +88,19 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+
 /**
  * Tests of HRegion methods for replaying flush, compaction, region open, etc 
events for secondary
  * region replicas
@@ -127,7 +128,7 @@ public class TestHRegionReplayEvents {
 
   // per test fields
   private Path rootDir;
-  private HTableDescriptor htd;
+  private TableDescriptor htd;
   private long time;
   private RegionServerServices rss;
   private HRegionInfo primaryHri, secondaryHri;
@@ -146,11 +147,11 @@ public class TestHRegionReplayEvents {
     rootDir = new Path(dir + method);
     TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString());
     method = name.getMethodName();
-
-    htd = new HTableDescriptor(TableName.valueOf(method));
+    TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(TableName.valueOf(method));
     for (byte[] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
+      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
     }
+    htd = builder.build();
 
     time = System.currentTimeMillis();
     ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 
0, null);
@@ -338,10 +339,10 @@ public class TestHRegionReplayEvents {
       if (flushDesc != null) {
         // first verify that everything is replayed and visible before flush 
event replay
         verifyData(secondaryRegion, 0, lastReplayed, cq, families);
-        Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-        long storeMemstoreSize = store.getMemStoreSize();
+        HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+        long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
         long regionMemstoreSize = secondaryRegion.getMemstoreSize();
-        long storeFlushableSize = store.getFlushableSize();
+        long storeFlushableSize = store.getFlushableSize().getHeapSize();
         long storeSize = store.getSize();
         long storeSizeUncompressed = store.getStoreSizeUncompressed();
         if (flushDesc.getAction() == FlushAction.START_FLUSH) {
@@ -351,7 +352,7 @@ public class TestHRegionReplayEvents {
           assertEquals(result.flushOpSeqId, 
flushDesc.getFlushSequenceNumber());
 
           // assert that the store memstore is smaller now
-          long newStoreMemstoreSize = store.getMemStoreSize();
+          long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
           LOG.info("Memstore size reduced by:"
               + StringUtils.humanReadableInt(newStoreMemstoreSize - 
storeMemstoreSize));
           assertTrue(storeMemstoreSize > newStoreMemstoreSize);
@@ -362,10 +363,10 @@ public class TestHRegionReplayEvents {
 
           // assert that the flush files are picked
           expectedStoreFileCount++;
-          for (Store s : secondaryRegion.getStores()) {
+          for (HStore s : secondaryRegion.getStores()) {
             assertEquals(expectedStoreFileCount, s.getStorefilesCount());
           }
-          long newFlushableSize = store.getFlushableSize();
+          long newFlushableSize = store.getFlushableSize().getHeapSize();
           assertTrue(storeFlushableSize > newFlushableSize);
 
           // assert that the region memstore is smaller now
@@ -383,7 +384,7 @@ public class TestHRegionReplayEvents {
         secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, 
Long.MAX_VALUE);
 
         // assert that the compaction is applied
-        for (Store store : secondaryRegion.getStores()) {
+        for (HStore store : secondaryRegion.getStores()) {
           if (store.getColumnFamilyName().equals("cf1")) {
             assertEquals(1, store.getStorefilesCount());
           } else {
@@ -401,7 +402,7 @@ public class TestHRegionReplayEvents {
 
     LOG.info("-- Verifying edits from primary. Ensuring that files are not 
deleted");
     verifyData(primaryRegion, 0, lastReplayed, cq, families);
-    for (Store store : primaryRegion.getStores()) {
+    for (HStore store : primaryRegion.getStores()) {
       if (store.getColumnFamilyName().equals("cf1")) {
         assertEquals(1, store.getStorefilesCount());
       } else {
@@ -437,10 +438,10 @@ public class TestHRegionReplayEvents {
       = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
       if (flushDesc != null) {
         // first verify that everything is replayed and visible before flush 
event replay
-        Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-        long storeMemstoreSize = store.getMemStoreSize();
+        HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+        long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
         long regionMemstoreSize = secondaryRegion.getMemstoreSize();
-        long storeFlushableSize = store.getFlushableSize();
+        long storeFlushableSize = store.getFlushableSize().getHeapSize();
 
         if (flushDesc.getAction() == FlushAction.START_FLUSH) {
           startFlushDesc = flushDesc;
@@ -452,7 +453,7 @@ public class TestHRegionReplayEvents {
           assertTrue(storeFlushableSize > 0);
 
           // assert that the store memstore is smaller now
-          long newStoreMemstoreSize = store.getMemStoreSize();
+          long newStoreMemstoreSize = store.getMemStoreSize().getHeapSize();
           LOG.info("Memstore size reduced by:"
               + StringUtils.humanReadableInt(newStoreMemstoreSize - 
storeMemstoreSize));
           assertTrue(storeMemstoreSize > newStoreMemstoreSize);
@@ -571,7 +572,7 @@ public class TestHRegionReplayEvents {
 
     // no store files in the region
     int expectedStoreFileCount = 0;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
     long regionMemstoreSize = secondaryRegion.getMemstoreSize();
@@ -586,11 +587,11 @@ public class TestHRegionReplayEvents {
 
     // assert that the flush files are picked
     expectedStoreFileCount++;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    long newFlushableSize = store.getFlushableSize();
+    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+    long newFlushableSize = store.getFlushableSize().getHeapSize();
     assertTrue(newFlushableSize > 0); // assert that the memstore is not 
dropped
 
     // assert that the region memstore is same as before
@@ -661,7 +662,7 @@ public class TestHRegionReplayEvents {
 
     // no store files in the region
     int expectedStoreFileCount = 0;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
     long regionMemstoreSize = secondaryRegion.getMemstoreSize();
@@ -676,11 +677,11 @@ public class TestHRegionReplayEvents {
 
     // assert that the flush files are picked
     expectedStoreFileCount++;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    long newFlushableSize = store.getFlushableSize();
+    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+    long newFlushableSize = store.getFlushableSize().getHeapSize();
     assertTrue(newFlushableSize > 0); // assert that the memstore is not 
dropped
 
     // assert that the region memstore is smaller than before, but not empty
@@ -762,7 +763,7 @@ public class TestHRegionReplayEvents {
 
     // no store files in the region
     int expectedStoreFileCount = 0;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
     long regionMemstoreSize = secondaryRegion.getMemstoreSize();
@@ -772,8 +773,8 @@ public class TestHRegionReplayEvents {
     assertTrue(commitFlushDesc.getFlushSequenceNumber() > 0);
 
     // ensure all files are visible in secondary
-    for (Store store : secondaryRegion.getStores()) {
-      assertTrue(store.getMaxSequenceId() <= 
secondaryRegion.getReadPoint(null));
+    for (HStore store : secondaryRegion.getStores()) {
+      assertTrue(store.getMaxSequenceId().orElse(0L) <= 
secondaryRegion.getReadPoint(null));
     }
 
     LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc);
@@ -781,11 +782,11 @@ public class TestHRegionReplayEvents {
 
     // assert that the flush files are picked
     expectedStoreFileCount++;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    long newFlushableSize = store.getFlushableSize();
+    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+    long newFlushableSize = store.getFlushableSize().getHeapSize();
     if (droppableMemstore) {
       assertTrue(newFlushableSize == 0); // assert that the memstore is dropped
     } else {
@@ -860,7 +861,7 @@ public class TestHRegionReplayEvents {
 
     // no store files in the region
     int expectedStoreFileCount = 0;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
     long regionMemstoreSize = secondaryRegion.getMemstoreSize();
@@ -872,11 +873,11 @@ public class TestHRegionReplayEvents {
 
     // assert that the flush files are picked
     expectedStoreFileCount++;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    long newFlushableSize = store.getFlushableSize();
+    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+    long newFlushableSize = store.getFlushableSize().getHeapSize();
     assertTrue(newFlushableSize == 0);
 
     // assert that the region memstore is empty
@@ -941,7 +942,7 @@ public class TestHRegionReplayEvents {
 
     // no store files in the region
     int expectedStoreFileCount = 0;
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
 
@@ -951,11 +952,11 @@ public class TestHRegionReplayEvents {
 
     // assert that the flush files are picked
     expectedStoreFileCount = 2; // two flushes happened
-    for (Store s : secondaryRegion.getStores()) {
+    for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    Store store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    MemstoreSize newSnapshotSize = store.getSizeOfSnapshot();
+    HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
+    MemstoreSize newSnapshotSize = store.getSnapshotSize();
     assertTrue(newSnapshotSize.getDataSize() == 0);
 
     // assert that the region memstore is empty

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
index 7a78dbb..9b78f41 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,18 +15,18 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.regionserver;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.lang.ref.SoftReference;
@@ -60,19 +59,22 @@ import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MemoryCompactionPolicy;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -95,10 +97,12 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.util.Progressable;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -109,7 +113,7 @@ import org.mockito.Mockito;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 
 /**
- * Test class for the Store
+ * Test class for the HStore
  */
 @Category({ RegionServerTests.class, MediumTests.class })
 public class TestHStore {
@@ -117,6 +121,7 @@ public class TestHStore {
   @Rule
   public TestName name = new TestName();
 
+  HRegion region;
   HStore store;
   byte [] table = Bytes.toBytes("table");
   byte [] family = Bytes.toBytes("family");
@@ -138,8 +143,8 @@ public class TestHStore {
   long id = System.currentTimeMillis();
   Get get = new Get(row);
 
-  private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString();
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final String DIR = 
TEST_UTIL.getDataTestDir("TestStore").toString();
 
 
   /**
@@ -164,55 +169,51 @@ public class TestHStore {
     init(methodName, TEST_UTIL.getConfiguration());
   }
 
-  private Store init(String methodName, Configuration conf) throws IOException 
{
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
+  private HStore init(String methodName, Configuration conf) throws 
IOException {
     // some of the tests write 4 versions and then flush
     // (with HBASE-4241, lower versions are collected on flush)
-    hcd.setMaxVersions(4);
-    return init(methodName, conf, hcd);
+    return init(methodName, conf,
+      
ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(4).build());
   }
 
-  private HStore init(String methodName, Configuration conf, HColumnDescriptor 
hcd)
+  private HStore init(String methodName, Configuration conf, 
ColumnFamilyDescriptor hcd)
       throws IOException {
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
-    return init(methodName, conf, htd, hcd);
+    return init(methodName, conf, 
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)), hcd);
   }
 
-  private HStore init(String methodName, Configuration conf, HTableDescriptor 
htd,
-      HColumnDescriptor hcd) throws IOException {
-    return init(methodName, conf, htd, hcd, null);
+  private HStore init(String methodName, Configuration conf, 
TableDescriptorBuilder builder,
+      ColumnFamilyDescriptor hcd) throws IOException {
+    return init(methodName, conf, builder, hcd, null);
   }
 
-  @SuppressWarnings("deprecation")
-  private HStore init(String methodName, Configuration conf, HTableDescriptor 
htd,
-      HColumnDescriptor hcd, MyStoreHook hook) throws IOException {
-    return init(methodName, conf, htd, hcd, hook, false);
+  private HStore init(String methodName, Configuration conf, 
TableDescriptorBuilder builder,
+      ColumnFamilyDescriptor hcd, MyStoreHook hook) throws IOException {
+    return init(methodName, conf, builder, hcd, hook, false);
   }
-  @SuppressWarnings("deprecation")
-  private HStore init(String methodName, Configuration conf, HTableDescriptor 
htd,
-      HColumnDescriptor hcd, MyStoreHook hook, boolean switchToPread) throws 
IOException {
-    //Setting up a Store
-    Path basedir = new Path(DIR+methodName);
+
+  private void initHRegion(String methodName, Configuration conf, 
TableDescriptorBuilder builder,
+      ColumnFamilyDescriptor hcd, MyStoreHook hook, boolean switchToPread) 
throws IOException {
+    TableDescriptor htd = builder.addColumnFamily(hcd).build();
+    Path basedir = new Path(DIR + methodName);
     Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
     final Path logdir = new Path(basedir, 
AbstractFSWALProvider.getWALDirectoryName(methodName));
 
     FileSystem fs = FileSystem.get(conf);
 
     fs.delete(logdir, true);
-
-    if (htd.hasFamily(hcd.getName())) {
-      htd.modifyFamily(hcd);
-    } else {
-      htd.addFamily(hcd);
-    }
     ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false,
       MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 1, 0, null);
     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
-    final Configuration walConf = new Configuration(conf);
+    Configuration walConf = new Configuration(conf);
     FSUtils.setRootDir(walConf, basedir);
-    final WALFactory wals = new WALFactory(walConf, null, methodName);
-    HRegion region = new HRegion(tableDir, 
wals.getWAL(info.getEncodedNameAsBytes(),
-            info.getTable().getNamespace()), fs, conf, info, htd, null);
+    WALFactory wals = new WALFactory(walConf, null, methodName);
+    region = new HRegion(new HRegionFileSystem(conf, fs, tableDir, info),
+        wals.getWAL(info.getEncodedNameAsBytes(), 
info.getTable().getNamespace()), conf, htd, null);
+  }
+
+  private HStore init(String methodName, Configuration conf, 
TableDescriptorBuilder builder,
+      ColumnFamilyDescriptor hcd, MyStoreHook hook, boolean switchToPread) 
throws IOException {
+    initHRegion(methodName, conf, builder, hcd, hook, switchToPread);
     if (hook == null) {
       store = new HStore(region, hcd, conf);
     } else {
@@ -293,13 +294,14 @@ public class TestHStore {
     Configuration conf = HBaseConfiguration.create();
     FileSystem fs = FileSystem.get(conf);
 
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setCompressionType(Compression.Algorithm.GZ);
-    hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
+    ColumnFamilyDescriptor hcd = 
ColumnFamilyDescriptorBuilder.newBuilder(family)
+        
.setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF)
+        .build();
     init(name.getMethodName(), conf, hcd);
 
     // Test createWriterInTmp()
-    StoreFileWriter writer = store.createWriterInTmp(4, 
hcd.getCompressionType(), false, true, false);
+    StoreFileWriter writer =
+        store.createWriterInTmp(4, hcd.getCompressionType(), false, true, 
false, false);
     Path path = writer.getPath();
     writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
     writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
@@ -335,10 +337,8 @@ public class TestHStore {
     // Set the compaction threshold higher to avoid normal compactions.
     conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 5);
 
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setMinVersions(minVersions);
-    hcd.setTimeToLive(ttl);
-    init(name.getMethodName() + "-" + minVersions, conf, hcd);
+    init(name.getMethodName() + "-" + minVersions, conf, 
ColumnFamilyDescriptorBuilder
+        
.newBuilder(family).setMinVersions(minVersions).setTimeToLive(ttl).build());
 
     long storeTtl = this.store.getScanInfo().getTtl();
     long sleepTime = storeTtl / storeFileNum;
@@ -599,6 +599,22 @@ public class TestHStore {
   @After
   public void tearDown() throws Exception {
     EnvironmentEdgeManagerTestHelper.reset();
+    if (store != null) {
+      try {
+        store.close();
+      } catch (IOException e) {
+      }
+      store = null;
+    }
+    if (region != null) {
+      region.close();
+      region = null;
+    }
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws IOException {
+    TEST_UTIL.cleanupTestDir();
   }
 
   @Test
@@ -824,17 +840,19 @@ public class TestHStore {
 
     // HTD overrides XML.
     --anyValue;
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
-    init(name.getMethodName() + "-htd", conf, htd, hcd);
+    init(name.getMethodName() + "-htd", conf, TableDescriptorBuilder
+        .newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY, 
Long.toString(anyValue)),
+      ColumnFamilyDescriptorBuilder.of(family));
     assertTrue(store.throttleCompaction(anyValue + 1));
     assertFalse(store.throttleCompaction(anyValue));
 
     // HCD overrides them both.
     --anyValue;
-    hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
-    init(name.getMethodName() + "-hcd", conf, htd, hcd);
+    init(name.getMethodName() + "-hcd", conf,
+      
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)).setValue(CONFIG_KEY,
+        Long.toString(anyValue)),
+      ColumnFamilyDescriptorBuilder.newBuilder(family).setValue(CONFIG_KEY, 
Long.toString(anyValue))
+          .build());
     assertTrue(store.throttleCompaction(anyValue + 1));
     assertFalse(store.throttleCompaction(anyValue));
   }
@@ -862,7 +880,7 @@ public class TestHStore {
   private void addStoreFile() throws IOException {
     HStoreFile f = this.store.getStorefiles().iterator().next();
     Path storedir = f.getPath().getParent();
-    long seqid = this.store.getMaxSequenceId();
+    long seqid = this.store.getMaxSequenceId().orElse(0L);
     Configuration c = TEST_UTIL.getConfiguration();
     FileSystem fs = FileSystem.get(c);
     HFileContext fileContext = new 
HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
@@ -989,20 +1007,23 @@ public class TestHStore {
   public void testNumberOfMemStoreScannersAfterFlush() throws IOException {
     long seqId = 100;
     long timestamp = System.currentTimeMillis();
-    Cell cell0 = CellUtil.createCell(row, family, qf1, timestamp,
-            KeyValue.Type.Put.getCode(), qf1);
+    Cell cell0 = 
CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
+        
.setQualifier(qf1).setTimestamp(timestamp).setType(KeyValue.Type.Put.getCode())
+        .setValue(qf1).build();
     CellUtil.setSequenceId(cell0, seqId);
-    testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), 
Collections.EMPTY_LIST);
+    testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), 
Collections.emptyList());
 
-    Cell cell1 = CellUtil.createCell(row, family, qf2, timestamp,
-            KeyValue.Type.Put.getCode(), qf1);
+    Cell cell1 = 
CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
+        
.setQualifier(qf2).setTimestamp(timestamp).setType(KeyValue.Type.Put.getCode())
+        .setValue(qf1).build();
     CellUtil.setSequenceId(cell1, seqId);
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), 
Arrays.asList(cell1));
 
     seqId = 101;
     timestamp = System.currentTimeMillis();
-    Cell cell2 = CellUtil.createCell(row2, family, qf2, timestamp,
-            KeyValue.Type.Put.getCode(), qf1);
+    Cell cell2 = 
CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family)
+        
.setQualifier(qf2).setTimestamp(timestamp).setType(KeyValue.Type.Put.getCode())
+        .setValue(qf1).build();
      CellUtil.setSequenceId(cell2, seqId);
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), 
Arrays.asList(cell1, cell2));
   }
@@ -1046,15 +1067,16 @@ public class TestHStore {
     }
   }
 
-  private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] 
value) throws IOException {
-    Cell c = CellUtil.createCell(row, family, qualifier, ts, 
KeyValue.Type.Put.getCode(), value);
-    CellUtil.setSequenceId(c, sequenceId);
-    return c;
+  private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] 
value)
+      throws IOException {
+    return createCell(row, qualifier, ts, sequenceId, value);
   }
 
   private Cell createCell(byte[] row, byte[] qualifier, long ts, long 
sequenceId, byte[] value)
       throws IOException {
-    Cell c = CellUtil.createCell(row, family, qualifier, ts, 
KeyValue.Type.Put.getCode(), value);
+    Cell c = 
CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
+        
.setQualifier(qualifier).setTimestamp(ts).setType(KeyValue.Type.Put.getCode())
+        .setValue(value).build();
     CellUtil.setSequenceId(c, sequenceId);
     return c;
   }
@@ -1148,8 +1170,6 @@ public class TestHStore {
   private void testFlushBeforeCompletingScan(MyListHook hook, Filter filter, 
int expectedSize)
           throws IOException, InterruptedException {
     Configuration conf = HBaseConfiguration.create();
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setMaxVersions(1);
     byte[] r0 = Bytes.toBytes("row0");
     byte[] r1 = Bytes.toBytes("row1");
     byte[] r2 = Bytes.toBytes("row2");
@@ -1159,12 +1179,14 @@ public class TestHStore {
     MemstoreSize memStoreSize = new MemstoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
     long seqId = 100;
-    init(name.getMethodName(), conf, new 
HTableDescriptor(TableName.valueOf(table)), hcd, new MyStoreHook() {
-      @Override
-      public long getSmallestReadPoint(HStore store) {
-        return seqId + 3;
-      }
-    });
+    init(name.getMethodName(), conf, 
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)),
+      
ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(1).build(),
+      new MyStoreHook() {
+        @Override
+        public long getSmallestReadPoint(HStore store) {
+          return seqId + 3;
+        }
+      });
     // The cells having the value0 won't be flushed to disk because the value 
of max version is 1
     store.add(createCell(r0, qf1, ts, seqId, value0), memStoreSize);
     store.add(createCell(r0, qf2, ts, seqId, value0), memStoreSize);
@@ -1210,9 +1232,8 @@ public class TestHStore {
   public void testCreateScannerAndSnapshotConcurrently() throws IOException, 
InterruptedException {
     Configuration conf = HBaseConfiguration.create();
     conf.set(HStore.MEMSTORE_CLASS_NAME, MyCompactingMemStore.class.getName());
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
-    init(name.getMethodName(), conf, hcd);
+    init(name.getMethodName(), conf, 
ColumnFamilyDescriptorBuilder.newBuilder(family)
+        .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build());
     byte[] value = Bytes.toBytes("value");
     MemstoreSize memStoreSize = new MemstoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
@@ -1402,9 +1423,8 @@ public class TestHStore {
     conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 
String.valueOf(flushSize));
     // Set the lower threshold to invoke the "MERGE" policy
     conf.set(MemStoreCompactor.COMPACTING_MEMSTORE_THRESHOLD_KEY, 
String.valueOf(0));
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
-    init(name.getMethodName(), conf, hcd);
+    init(name.getMethodName(), conf, 
ColumnFamilyDescriptorBuilder.newBuilder(family)
+        .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build());
     byte[] value = Bytes.toBytes("thisisavarylargevalue");
     MemstoreSize memStoreSize = new MemstoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
@@ -1439,18 +1459,57 @@ public class TestHStore {
     storeFlushCtx.commit(Mockito.mock(MonitoredTask.class));
   }
 
+  @Test
+  public void testAge() throws IOException {
+    long currentTime = System.currentTimeMillis();
+    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
+    edge.setValue(currentTime);
+    EnvironmentEdgeManager.injectEdge(edge);
+    Configuration conf = TEST_UTIL.getConfiguration();
+    ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(family);
+    initHRegion(name.getMethodName(), conf,
+      TableDescriptorBuilder.newBuilder(TableName.valueOf(table)), hcd, null, 
false);
+    HStore store = new HStore(region, hcd, conf) {
+
+      @Override
+      protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, 
Configuration conf,
+          CellComparator kvComparator) throws IOException {
+        List<HStoreFile> storefiles =
+            Arrays.asList(mockStoreFile(currentTime - 10), 
mockStoreFile(currentTime - 100),
+              mockStoreFile(currentTime - 1000), mockStoreFile(currentTime - 
10000));
+        StoreFileManager sfm = mock(StoreFileManager.class);
+        when(sfm.getStorefiles()).thenReturn(storefiles);
+        StoreEngine<?, ?, ?, ?> storeEngine = mock(StoreEngine.class);
+        when(storeEngine.getStoreFileManager()).thenReturn(sfm);
+        return storeEngine;
+      }
+    };
+    assertEquals(10L, store.getMinStoreFileAge().getAsLong());
+    assertEquals(10000L, store.getMaxStoreFileAge().getAsLong());
+    assertEquals((10 + 100 + 1000 + 10000) / 4.0, 
store.getAvgStoreFileAge().getAsDouble(), 1E-4);
+  }
+
+  private HStoreFile mockStoreFile(long createdTime) {
+    StoreFileInfo info = mock(StoreFileInfo.class);
+    when(info.getCreatedTimestamp()).thenReturn(createdTime);
+    HStoreFile sf = mock(HStoreFile.class);
+    when(sf.getReader()).thenReturn(mock(StoreFileReader.class));
+    when(sf.isHFile()).thenReturn(true);
+    when(sf.getFileInfo()).thenReturn(info);
+    return sf;
+  }
+
   private MyStore initMyStore(String methodName, Configuration conf, 
MyStoreHook hook)
       throws IOException {
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setMaxVersions(5);
-    return (MyStore) init(methodName, conf, htd, hcd, hook);
+    return (MyStore) init(methodName, conf,
+      TableDescriptorBuilder.newBuilder(TableName.valueOf(table)),
+      
ColumnFamilyDescriptorBuilder.newBuilder(family).setMaxVersions(5).build(), 
hook);
   }
 
-  class MyStore extends HStore {
+  private class MyStore extends HStore {
     private final MyStoreHook hook;
 
-    MyStore(final HRegion region, final HColumnDescriptor family, final 
Configuration confParam,
+    MyStore(final HRegion region, final ColumnFamilyDescriptor family, final 
Configuration confParam,
         MyStoreHook hook, boolean switchToPread) throws IOException {
       super(region, family, confParam);
       this.hook = hook;
@@ -1473,8 +1532,10 @@ public class TestHStore {
   }
 
   private abstract class MyStoreHook {
+
     void getScanners(MyStore store) throws IOException {
     }
+
     long getSmallestReadPoint(HStore store) {
       return store.getHRegion().getSmallestReadPoint();
     }
@@ -1482,13 +1543,10 @@ public class TestHStore {
 
   @Test
   public void testSwitchingPreadtoStreamParallelyWithCompactionDischarger() 
throws Exception {
-    int flushSize = 500;
     Configuration conf = HBaseConfiguration.create();
     conf.set("hbase.hstore.engine.class", DummyStoreEngine.class.getName());
     conf.setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 0);
     // Set the lower threshold to invoke the "MERGE" policy
-    HColumnDescriptor hcd = new HColumnDescriptor(family);
-    hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
     MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() 
{});
     MemstoreSize memStoreSize = new MemstoreSize();
     long ts = System.currentTimeMillis();
@@ -1514,7 +1572,6 @@ public class TestHStore {
     flushStore(store, seqID);
 
     assertEquals(3, store.getStorefilesCount());
-    ScanInfo scanInfo = store.getScanInfo();
     Scan scan = new Scan();
     scan.addFamily(family);
     Collection<HStoreFile> storefiles2 = store.getStorefiles();
@@ -1541,7 +1598,6 @@ public class TestHStore {
     ArrayList<HStoreFile> actualStorefiles1 = Lists.newArrayList(storefiles2);
     actualStorefiles1.removeAll(actualStorefiles);
     // Do compaction
-    List<Exception> exceptions = new ArrayList<Exception>();
     MyThread thread = new MyThread(storeScanner);
     thread.start();
     store.replaceStoreFiles(actualStorefiles, actualStorefiles1);
@@ -1678,7 +1734,7 @@ public class TestHStore {
     public Object[] toArray() {return delegatee.toArray();}
 
     @Override
-    public <T> T[] toArray(T[] a) {return delegatee.toArray(a);}
+    public <R> R[] toArray(R[] a) {return delegatee.toArray(a);}
 
     @Override
     public boolean add(T e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index b20cae8..2123a8a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -205,11 +207,10 @@ public class TestHStoreFile extends HBaseTestCase {
   @Test
   public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
     StoreFileReader reader = mock(StoreFileReader.class);
-    Store store = mock(Store.class);
-    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
+    HStore store = mock(HStore.class);
     byte[] cf = Bytes.toBytes("ty");
-    when(hcd.getName()).thenReturn(cf);
-    when(store.getColumnFamilyDescriptor()).thenReturn(hcd);
+    ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf);
+    when(store.getColumnFamilyDescriptor()).thenReturn(cfd);
     StoreFileScanner scanner =
         new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 
0, 0, true);
     Scan scan = new Scan();
@@ -526,10 +527,8 @@ public class TestHStoreFile extends HBaseTestCase {
 
       Scan scan = new Scan(row.getBytes(),row.getBytes());
       scan.addColumn("family".getBytes(), "family:col".getBytes());
-      Store store = mock(Store.class);
-      HColumnDescriptor hcd = mock(HColumnDescriptor.class);
-      when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
-      when(store.getColumnFamilyDescriptor()).thenReturn(hcd);
+      HStore store = mock(HStore.class);
+      
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
       boolean exists = scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
       if (i % 2 == 0) {
         if (!exists) falseNeg++;
@@ -713,10 +712,8 @@ public class TestHStoreFile extends HBaseTestCase {
       StoreFileScanner scanner = getStoreFileScanner(reader, false, false);
       assertEquals(expKeys[x], reader.generalBloomFilter.getKeyCount());
 
-      Store store = mock(Store.class);
-      HColumnDescriptor hcd = mock(HColumnDescriptor.class);
-      when(hcd.getName()).thenReturn(Bytes.toBytes("family"));
-      when(store.getColumnFamilyDescriptor()).thenReturn(hcd);
+      HStore store = mock(HStore.class);
+      
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of("family"));
       // check false positives rate
       int falsePos = 0;
       int falseNeg = 0;
@@ -857,10 +854,8 @@ public class TestHStoreFile extends HBaseTestCase {
 
     HStoreFile hsf = new HStoreFile(this.fs, writer.getPath(), conf, cacheConf,
       BloomType.NONE, true);
-    Store store = mock(Store.class);
-    HColumnDescriptor hcd = mock(HColumnDescriptor.class);
-    when(hcd.getName()).thenReturn(family);
-    when(store.getColumnFamilyDescriptor()).thenReturn(hcd);
+    HStore store = mock(HStore.class);
+    
when(store.getColumnFamilyDescriptor()).thenReturn(ColumnFamilyDescriptorBuilder.of(family));
     hsf.initReader();
     StoreFileReader reader = hsf.getReader();
     StoreFileScanner scanner = getStoreFileScanner(reader, false, false);

Reply via email to