Repository: hbase
Updated Branches:
  refs/heads/branch-2 f73a3a6fb -> d26b8f8dd


http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
index d93152a..7edcf54 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
@@ -99,7 +99,7 @@ public class TestKeepDeletes {
     // keep 3 versions, rows do not expire
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
3,
         HConstants.FOREVER, KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
@@ -241,7 +241,7 @@ public class TestKeepDeletes {
     // KEEP_DELETED_CELLS is NOT enabled
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
3,
         HConstants.FOREVER, KeepDeletedCells.FALSE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
@@ -408,7 +408,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerExpirationEmptyStore() throws Exception {
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
         HConstants.FOREVER, KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
 
@@ -451,7 +451,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerExpiration() throws Exception {
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
         HConstants.FOREVER, KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
 
@@ -514,7 +514,7 @@ public class TestKeepDeletes {
   public void testWithOldRow() throws Exception {
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
         HConstants.FOREVER, KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
 
@@ -674,7 +674,7 @@ public class TestKeepDeletes {
   public void testDeleteMarkerVersioning() throws Exception {
     HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 
1,
         HConstants.FOREVER, KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime();
     Put p = new Put(T1, ts);
@@ -818,7 +818,7 @@ public class TestKeepDeletes {
   public void testWithMinVersions() throws Exception {
     HTableDescriptor htd =
         hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, 
KeepDeletedCells.TRUE);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
@@ -897,7 +897,7 @@ public class TestKeepDeletes {
   public void testWithTTL() throws Exception {
     HTableDescriptor htd =
         hbu.createTableDescriptor(name.getMethodName(), 1, 1000, 1, 
KeepDeletedCells.TTL);
-    Region region = hbu.createLocalHRegion(htd, null, null);
+    HRegion region = hbu.createLocalHRegion(htd, null, null);
 
     long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past
 
@@ -945,7 +945,7 @@ public class TestKeepDeletes {
 
   }
 
-  private int countDeleteMarkers(Region region) throws IOException {
+  private int countDeleteMarkers(HRegion region) throws IOException {
     Scan s = new Scan();
     s.setRaw(true);
     // use max versions from the store(s)

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
index 0c33bdb..71f18c0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY_BYTES;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
+import static org.apache.hadoop.hbase.regionserver.Store.PRIORITY_USER;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -181,8 +182,8 @@ public class TestMajorCompaction {
 
   public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
       throws Exception {
-    Map<Store, HFileDataBlockEncoder> replaceBlockCache = new HashMap<>();
-    for (Store store : r.getStores()) {
+    Map<HStore, HFileDataBlockEncoder> replaceBlockCache = new HashMap<>();
+    for (HStore store : r.getStores()) {
       HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
       replaceBlockCache.put(store, blockEncoder);
       final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
@@ -194,7 +195,7 @@ public class TestMajorCompaction {
     majorCompaction();
 
     // restore settings
-    for (Entry<Store, HFileDataBlockEncoder> entry : 
replaceBlockCache.entrySet()) {
+    for (Entry<HStore, HFileDataBlockEncoder> entry : 
replaceBlockCache.entrySet()) {
       ((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue());
     }
   }
@@ -211,11 +212,11 @@ public class TestMajorCompaction {
     // Default is that there only 3 (MAXVERSIONS) versions allowed per column.
     //
     // Assert == 3 when we ask for versions.
-    Result result = r.get(new 
Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
+    Result result = r.get(new 
Get(STARTROW).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     assertEquals(compactionThreshold, result.size());
 
     // see if CompactionProgress is in place but null
-    for (Store store : r.getStores()) {
+    for (HStore store : r.getStores()) {
       assertNull(store.getCompactionProgress());
     }
 
@@ -224,7 +225,7 @@ public class TestMajorCompaction {
 
     // see if CompactionProgress has done its thing on at least one store
     int storeCount = 0;
-    for (Store store : r.getStores()) {
+    for (HStore store : r.getStores()) {
       CompactionProgress progress = store.getCompactionProgress();
       if( progress != null ) {
         ++storeCount;
@@ -240,8 +241,7 @@ public class TestMajorCompaction {
     secondRowBytes[START_KEY_BYTES.length - 1]++;
 
     // Always 3 versions if that is what max versions is.
-    result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).
-        setMaxVersions(100));
+    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     LOG.debug("Row " + Bytes.toStringBinary(secondRowBytes) + " after " +
         "initial compaction: " + result);
     assertEquals("Invalid number of versions of row "
@@ -260,26 +260,26 @@ public class TestMajorCompaction {
     r.delete(delete);
 
     // Assert deleted.
-    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
+    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     assertTrue("Second row should have been deleted", result.isEmpty());
 
     r.flush(true);
 
-    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
+    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     assertTrue("Second row should have been deleted", result.isEmpty());
 
     // Add a bit of data and flush.  Start adding at 'bbb'.
     createSmallerStoreFile(this.r);
     r.flush(true);
     // Assert that the second row is still deleted.
-    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
+    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     assertTrue("Second row should still be deleted", result.isEmpty());
 
     // Force major compaction.
     r.compact(true);
     assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1);
 
-    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100));
+    result = r.get(new 
Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).readVersions(100));
     assertTrue("Second row should still be deleted", result.isEmpty());
 
     // Make sure the store files do have some 'aaa' keys in them -- exactly 3.
@@ -290,8 +290,7 @@ public class TestMajorCompaction {
     // Multiple versions allowed for an entry, so the delete isn't enough
     // Lower TTL and expire to ensure that all our entries have been wiped
     final int ttl = 1000;
-    for (Store hstore : r.getStores()) {
-      HStore store = ((HStore) hstore);
+    for (HStore store : r.getStores()) {
       ScanInfo old = store.getScanInfo();
       ScanInfo si = new ScanInfo(old.getConfiguration(), old.getFamily(), 
old.getMinVersions(),
           old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 
old.getPreadMaxBytes(), 0,
@@ -411,7 +410,7 @@ public class TestMajorCompaction {
    */
   @Test
   public void testNonUserMajorCompactionRequest() throws Exception {
-    Store store = r.getStore(COLUMN_FAMILY);
+    HStore store = r.getStore(COLUMN_FAMILY);
     createStoreFile(r);
     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
       createStoreFile(r);
@@ -431,14 +430,14 @@ public class TestMajorCompaction {
    */
   @Test
   public void testUserMajorCompactionRequest() throws IOException{
-    Store store = r.getStore(COLUMN_FAMILY);
+    HStore store = r.getStore(COLUMN_FAMILY);
     createStoreFile(r);
     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
       createStoreFile(r);
     }
     store.triggerMajorCompaction();
     CompactionRequest request =
-        store.requestCompaction(Store.PRIORITY_USER, 
CompactionLifeCycleTracker.DUMMY, null).get()
+        store.requestCompaction(PRIORITY_USER, 
CompactionLifeCycleTracker.DUMMY, null).get()
             .getRequest();
     assertNotNull("Expected to receive a compaction request", request);
     assertEquals(

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
index 1bd20c6..a7b5cd5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java
@@ -31,14 +31,14 @@ import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -46,7 +46,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-
 /**
  * Test minor compactions
  */
@@ -57,7 +56,7 @@ public class TestMinorCompaction {
   private static final HBaseTestingUtility UTIL = 
HBaseTestingUtility.createLocalHTU();
   protected Configuration conf = UTIL.getConfiguration();
   
-  private Region r = null;
+  private HRegion r = null;
   private HTableDescriptor htd = null;
   private int compactionThreshold;
   private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
@@ -205,7 +204,7 @@ public class TestMinorCompaction {
     assertEquals(compactionThreshold, result.size());
 
     // do a compaction
-    Store store2 = r.getStore(fam2);
+    HStore store2 = r.getStore(fam2);
     int numFiles1 = store2.getStorefiles().size();
     assertTrue("Was expecting to see 4 store files", numFiles1 > 
compactionThreshold); // > 3
     
((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold);
   // = 3

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index c08bd71..8a3a6dd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -298,7 +298,7 @@ public class TestMobStoreCompaction {
   }
 
   private int countStoreFiles() throws IOException {
-    Store store = region.getStore(COLUMN_FAMILY);
+    HStore store = region.getStore(COLUMN_FAMILY);
     return store.getStorefilesCount();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index 7859366..e1a52a6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -152,9 +152,9 @@ public class TestPerColumnFamilyFlush {
     long smallestSeqCF3 = region.getOldestSeqIdOfStore(FAMILY3);
 
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSize = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSize = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSize = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
 
     // Get the overall smallest LSN in the region's memstores.
     long smallestSeqInRegionCurrentMemstore = getWAL(region)
@@ -184,9 +184,9 @@ public class TestPerColumnFamilyFlush {
     MemstoreSize oldCF3MemstoreSize = cf3MemstoreSize;
 
     // Recalculate everything
-    cf1MemstoreSize = region.getStore(FAMILY1).getSizeOfMemStore();
-    cf2MemstoreSize = region.getStore(FAMILY2).getSizeOfMemStore();
-    cf3MemstoreSize = region.getStore(FAMILY3).getSizeOfMemStore();
+    cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+    cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+    cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
     totalMemstoreSize = region.getMemstoreSize();
     smallestSeqInRegionCurrentMemstore = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -216,15 +216,15 @@ public class TestPerColumnFamilyFlush {
     }
 
     // How much does the CF3 memstore occupy? Will be used later.
-    oldCF3MemstoreSize = region.getStore(FAMILY3).getSizeOfMemStore();
+    oldCF3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
 
     // Flush again
     region.flush(false);
 
     // Recalculate everything
-    cf1MemstoreSize = region.getStore(FAMILY1).getSizeOfMemStore();
-    cf2MemstoreSize = region.getStore(FAMILY2).getSizeOfMemStore();
-    cf3MemstoreSize = region.getStore(FAMILY3).getSizeOfMemStore();
+    cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+    cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+    cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
     totalMemstoreSize = region.getMemstoreSize();
     smallestSeqInRegionCurrentMemstore = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -288,9 +288,9 @@ public class TestPerColumnFamilyFlush {
     long totalMemstoreSize = region.getMemstoreSize();
 
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSize = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSize = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSize = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
 
     // Some other sanity checks.
     assertTrue(cf1MemstoreSize.getDataSize() > 0);
@@ -305,9 +305,9 @@ public class TestPerColumnFamilyFlush {
     // Flush!
     region.flush(false);
 
-    cf1MemstoreSize = region.getStore(FAMILY1).getSizeOfMemStore();
-    cf2MemstoreSize = region.getStore(FAMILY2).getSizeOfMemStore();
-    cf3MemstoreSize = region.getStore(FAMILY3).getSizeOfMemStore();
+    cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize();
+    cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize();
+    cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize();
     totalMemstoreSize = region.getMemstoreSize();
     long smallestSeqInRegionCurrentMemstore =
         
region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -380,9 +380,9 @@ public class TestPerColumnFamilyFlush {
       totalMemstoreSize = desiredRegion.getMemstoreSize();
 
       // Find the sizes of the memstores of each CF.
-      cf1MemstoreSize = 
desiredRegion.getStore(FAMILY1).getSizeOfMemStore().getDataSize();
-      cf2MemstoreSize = 
desiredRegion.getStore(FAMILY2).getSizeOfMemStore().getDataSize();
-      cf3MemstoreSize = 
desiredRegion.getStore(FAMILY3).getSizeOfMemStore().getDataSize();
+      cf1MemstoreSize = 
desiredRegion.getStore(FAMILY1).getMemStoreSize().getDataSize();
+      cf2MemstoreSize = 
desiredRegion.getStore(FAMILY2).getMemStoreSize().getDataSize();
+      cf3MemstoreSize = 
desiredRegion.getStore(FAMILY3).getMemStoreSize().getDataSize();
 
       // CF1 Should have been flushed
       assertEquals(0, cf1MemstoreSize);
@@ -500,9 +500,9 @@ public class TestPerColumnFamilyFlush {
         }
       }
       assertEquals(maxLogs, getNumRolledLogFiles(desiredRegion));
-      assertTrue(desiredRegion.getStore(FAMILY1).getMemStoreSize() > 
cfFlushSizeLowerBound);
-      assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize() < 
cfFlushSizeLowerBound);
-      assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize() < 
cfFlushSizeLowerBound);
+      
assertTrue(desiredRegion.getStore(FAMILY1).getMemStoreSize().getHeapSize() > 
cfFlushSizeLowerBound);
+      
assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize().getHeapSize() < 
cfFlushSizeLowerBound);
+      
assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize().getHeapSize() < 
cfFlushSizeLowerBound);
       table.put(createPut(1, 12345678));
       // Make numRolledLogFiles greater than maxLogs
       desiredRegionAndServer.getSecond().walRoller.requestRollAll();
@@ -525,9 +525,9 @@ public class TestPerColumnFamilyFlush {
       });
       LOG.info("Finished waiting on flush after too many WALs...");
       // Individual families should have been flushed.
-      assertEquals(0, desiredRegion.getStore(FAMILY1).getMemStoreSize());
-      assertEquals(0, desiredRegion.getStore(FAMILY2).getMemStoreSize());
-      assertEquals(0, desiredRegion.getStore(FAMILY3).getMemStoreSize());
+      assertEquals(0, 
desiredRegion.getStore(FAMILY1).getMemStoreSize().getHeapSize());
+      assertEquals(0, 
desiredRegion.getStore(FAMILY2).getMemStoreSize().getHeapSize());
+      assertEquals(0, 
desiredRegion.getStore(FAMILY3).getMemStoreSize().getHeapSize());
       // let WAL cleanOldLogs
       assertNull(getWAL(desiredRegion).rollWriter(true));
       assertTrue(getNumRolledLogFiles(desiredRegion) < maxLogs);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
index 8d0a822..2b79152 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
@@ -50,7 +50,7 @@ public class TestRegionSplitPolicy {
   private Configuration conf;
   private HTableDescriptor htd;
   private HRegion mockRegion;
-  private List<Store> stores;
+  private List<HStore> stores;
   private static final TableName TABLENAME = TableName.valueOf("t");
 
   @Rule

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 3b2ebe2..60afa3d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -169,9 +169,9 @@ public class TestWalAndCompactingMemStoreFlush {
     long smallestSeqCF3PhaseI = region.getOldestSeqIdOfStore(FAMILY3);
 
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getMemStoreSize();
 
     // Get the overall smallest LSN in the region's memstores.
     long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
@@ -220,9 +220,9 @@ public class TestWalAndCompactingMemStoreFlush {
     region.flush(false);
 
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getMemStoreSize();
 
     long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -264,7 +264,7 @@ public class TestWalAndCompactingMemStoreFlush {
         + smallestSeqCF2PhaseII +", the smallest sequence in CF3:" + 
smallestSeqCF3PhaseII + "\n";
 
     // How much does the CF1 memstore occupy? Will be used later.
-    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getMemStoreSize();
     long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1);
 
     s = s + "----After more puts into CF1 its size is:" + 
cf1MemstoreSizePhaseIII
@@ -278,9 +278,9 @@ public class TestWalAndCompactingMemStoreFlush {
     region.flush(false);
 
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseIV = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseIV = 
region.getStore(FAMILY3).getMemStoreSize();
 
     long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -319,9 +319,9 @@ public class TestWalAndCompactingMemStoreFlush {
     region.flush(true);
 
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseV = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseV = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseV = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseV = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseV = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseV = 
region.getStore(FAMILY3).getMemStoreSize();
     long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
@@ -410,9 +410,9 @@ public class TestWalAndCompactingMemStoreFlush {
     long smallestSeqCF2PhaseI = region.getOldestSeqIdOfStore(FAMILY2);
     long smallestSeqCF3PhaseI = region.getOldestSeqIdOfStore(FAMILY3);
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getMemStoreSize();
     // Get the overall smallest LSN in the region's memstores.
     long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -460,9 +460,9 @@ public class TestWalAndCompactingMemStoreFlush {
     
/*------------------------------------------------------------------------------*/
     /* PHASE II - collect sizes */
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getMemStoreSize();
     long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     // Find the smallest LSNs for edits wrt to each CF.
@@ -509,7 +509,7 @@ public class TestWalAndCompactingMemStoreFlush {
     
/*------------------------------------------------------------------------------*/
     /* PHASE III - collect sizes */
     // How much does the CF1 memstore occupy now? Will be used later.
-    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getMemStoreSize();
     long totalMemstoreSizePhaseIII = region.getMemstoreSize();
 
     
/*------------------------------------------------------------------------------*/
@@ -531,9 +531,9 @@ public class TestWalAndCompactingMemStoreFlush {
     
/*------------------------------------------------------------------------------*/
     /* PHASE IV - collect sizes */
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseIV = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseIV = 
region.getStore(FAMILY3).getMemStoreSize();
     long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
@@ -563,9 +563,9 @@ public class TestWalAndCompactingMemStoreFlush {
     
/*------------------------------------------------------------------------------*/
     /* PHASE V - collect sizes */
     // Recalculate everything
-    MemstoreSize cf1MemstoreSizePhaseV = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseV = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseV = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseV = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseV = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseV = 
region.getStore(FAMILY3).getMemStoreSize();
     long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
         
.getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long totalMemstoreSizePhaseV = region.getMemstoreSize();
@@ -601,9 +601,9 @@ public class TestWalAndCompactingMemStoreFlush {
       region.put(createPut(5, i));
     }
 
-    MemstoreSize cf1ActiveSizePhaseVI = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf3ActiveSizePhaseVI = 
region.getStore(FAMILY3).getSizeOfMemStore();
-    MemstoreSize cf5ActiveSizePhaseVI = 
region.getStore(FAMILIES[4]).getSizeOfMemStore();
+    MemstoreSize cf1ActiveSizePhaseVI = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf3ActiveSizePhaseVI = 
region.getStore(FAMILY3).getMemStoreSize();
+    MemstoreSize cf5ActiveSizePhaseVI = 
region.getStore(FAMILIES[4]).getMemStoreSize();
 
     
/*------------------------------------------------------------------------------*/
     /* PHASE VI - Flush */
@@ -614,9 +614,9 @@ public class TestWalAndCompactingMemStoreFlush {
     // Since we won't find any CF above the threshold, and hence no specific
     // store to flush, we should flush all the memstores
     // Also compacted memstores are flushed to disk, but not entirely emptied
-    MemstoreSize cf1ActiveSizePhaseVII = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf3ActiveSizePhaseVII = 
region.getStore(FAMILY3).getSizeOfMemStore();
-    MemstoreSize cf5ActiveSizePhaseVII = 
region.getStore(FAMILIES[4]).getSizeOfMemStore();
+    MemstoreSize cf1ActiveSizePhaseVII = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf3ActiveSizePhaseVII = 
region.getStore(FAMILY3).getMemStoreSize();
+    MemstoreSize cf5ActiveSizePhaseVII = 
region.getStore(FAMILIES[4]).getMemStoreSize();
 
     assertTrue(cf1ActiveSizePhaseVII.getDataSize() < 
cf1ActiveSizePhaseVI.getDataSize());
     assertTrue(cf3ActiveSizePhaseVII.getDataSize() < 
cf3ActiveSizePhaseVI.getDataSize());
@@ -663,9 +663,9 @@ public class TestWalAndCompactingMemStoreFlush {
     long totalMemstoreSize = region.getMemstoreSize();
 
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getMemStoreSize();
 
     // Some other sanity checks.
     assertTrue(cf1MemstoreSizePhaseI.getDataSize() > 0);
@@ -689,7 +689,7 @@ public class TestWalAndCompactingMemStoreFlush {
     cms3.flushInMemory();
     region.flush(false);
 
-    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getSizeOfMemStore();
+    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getMemStoreSize();
 
     long smallestSeqInRegionCurrentMemstorePhaseII =
         
region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
@@ -798,9 +798,9 @@ public class TestWalAndCompactingMemStoreFlush {
     long totalMemstoreSize = region.getMemstoreSize();
 
     // Find the sizes of the memstores of each CF.
-    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseI = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseI = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseI = 
region.getStore(FAMILY3).getMemStoreSize();
 
     // Some other sanity checks.
     assertTrue(cf1MemstoreSizePhaseI.getDataSize() > 0);
@@ -829,9 +829,9 @@ public class TestWalAndCompactingMemStoreFlush {
     // Flush-to-disk! CF2 only should be flushed
     region.flush(false);
 
-    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseII = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseII = 
region.getStore(FAMILY2).getMemStoreSize();
+    MemstoreSize cf3MemstoreSizePhaseII = 
region.getStore(FAMILY3).getMemStoreSize();
 
     // CF1 should be flushed in memory and just flattened, so CF1 heap 
overhead should be smaller
     assertTrue(cf1MemstoreSizePhaseI.getHeapSize() > 
cf1MemstoreSizePhaseII.getHeapSize());
@@ -855,7 +855,7 @@ public class TestWalAndCompactingMemStoreFlush {
       region.put(createPut(2, i));
     }
 
-    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIII = 
region.getStore(FAMILY1).getMemStoreSize();
 
     // Flush in memory!
     ((CompactingMemStore) 
((HStore)region.getStore(FAMILY1)).memstore).flushInMemory();
@@ -871,8 +871,8 @@ public class TestWalAndCompactingMemStoreFlush {
     }
     region.flush(false);
 
-    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getSizeOfMemStore();
-    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhaseIV = 
region.getStore(FAMILY1).getMemStoreSize();
+    MemstoreSize cf2MemstoreSizePhaseIV = 
region.getStore(FAMILY2).getMemStoreSize();
 
     assertEquals(2*cf1MemstoreSizePhaseI.getDataSize(), 
cf1MemstoreSizePhaseIV.getDataSize());
     assertEquals(

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java
index ef0c931..04b0ba9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java
@@ -31,6 +31,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.OptionalLong;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -104,7 +105,7 @@ public class TestDateTieredCompactor {
     when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), 
anyBoolean(),
       anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
     when(store.getComparator()).thenReturn(CellComparator.COMPARATOR);
-    long maxSequenceId = StoreUtils.getMaxSequenceIdInList(storefiles);
+    OptionalLong maxSequenceId = StoreUtils.getMaxSequenceIdInList(storefiles);
     when(store.getMaxSequenceId()).thenReturn(maxSequenceId);
 
     return new DateTieredCompactor(conf, store) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
index 803ef11..166fede 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
@@ -26,20 +26,21 @@ import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -65,32 +66,31 @@ public class TestFIFOCompactionPolicy {
 
   private final byte[] qualifier = Bytes.toBytes("q");
 
-  private Store getStoreWithName(TableName tableName) {
+  private HStore getStoreWithName(TableName tableName) {
     MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
     List<JVMClusterUtil.RegionServerThread> rsts = 
cluster.getRegionServerThreads();
     for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
       HRegionServer hrs = rsts.get(i).getRegionServer();
       for (Region region : hrs.getRegions(tableName)) {
-        return region.getStores().iterator().next();
+        return ((HRegion) region).getStores().iterator().next();
       }
     }
     return null;
   }
 
-  private Store prepareData() throws IOException {
+  private HStore prepareData() throws IOException {
     Admin admin = TEST_UTIL.getAdmin();
     if (admin.tableExists(tableName)) {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
-      FIFOCompactionPolicy.class.getName());
-    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
-      DisabledRegionSplitPolicy.class.getName());
-    HColumnDescriptor colDesc = new HColumnDescriptor(family);
-    colDesc.setTimeToLive(1); // 1 sec
-    desc.addFamily(colDesc);
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+          FIFOCompactionPolicy.class.getName())
+        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+          DisabledRegionSplitPolicy.class.getName())
+        
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
+        .build();
 
     admin.createTable(desc);
     Table table = TEST_UTIL.getConnection().getTable(tableName);
@@ -129,7 +129,7 @@ public class TestFIFOCompactionPolicy {
 
     TEST_UTIL.startMiniCluster(1);
     try {
-      Store store = prepareData();
+      HStore store = prepareData();
       assertEquals(10, store.getStorefilesCount());
       TEST_UTIL.getAdmin().majorCompact(tableName);
       while (store.getStorefilesCount() > 1) {
@@ -141,9 +141,8 @@ public class TestFIFOCompactionPolicy {
     }
   }
   
-  @Test  
-  public void testSanityCheckTTL() throws Exception
-  {
+  @Test
+  public void testSanityCheckTTL() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
     TEST_UTIL.startMiniCluster(1);
@@ -154,25 +153,23 @@ public class TestFIFOCompactionPolicy {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
-      FIFOCompactionPolicy.class.getName());
-    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
-      DisabledRegionSplitPolicy.class.getName());
-    HColumnDescriptor colDesc = new HColumnDescriptor(family);
-    desc.addFamily(colDesc);
-    try{
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+          FIFOCompactionPolicy.class.getName())
+        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+          DisabledRegionSplitPolicy.class.getName())
+        .addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
+    try {
       admin.createTable(desc);
       Assert.fail();
-    }catch(Exception e){      
-    }finally{
+    } catch (Exception e) {
+    } finally {
       TEST_UTIL.shutdownMiniCluster();
     }
   }
 
-  @Test  
-  public void testSanityCheckMinVersion() throws Exception
-  {
+  @Test
+  public void testSanityCheckMinVersion() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10000);
     TEST_UTIL.startMiniCluster(1);
@@ -183,27 +180,25 @@ public class TestFIFOCompactionPolicy {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
-      FIFOCompactionPolicy.class.getName());
-    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
-      DisabledRegionSplitPolicy.class.getName());
-    HColumnDescriptor colDesc = new HColumnDescriptor(family);
-    colDesc.setTimeToLive(1); // 1 sec
-    colDesc.setMinVersions(1);
-    desc.addFamily(colDesc);
-    try{
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+          FIFOCompactionPolicy.class.getName())
+        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+          DisabledRegionSplitPolicy.class.getName())
+        
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1)
+            .setMinVersions(1).build())
+        .build();
+    try {
       admin.createTable(desc);
       Assert.fail();
-    }catch(Exception e){      
-    }finally{
+    } catch (Exception e) {
+    } finally {
       TEST_UTIL.shutdownMiniCluster();
     }
   }
   
-  @Test  
-  public void testSanityCheckBlockingStoreFiles() throws Exception
-  {
+  @Test
+  public void testSanityCheckBlockingStoreFiles() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setInt(HStore.BLOCKING_STOREFILES_KEY, 10);
     TEST_UTIL.startMiniCluster(1);
@@ -214,19 +209,18 @@ public class TestFIFOCompactionPolicy {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    
desc.setConfiguration(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
-      FIFOCompactionPolicy.class.getName());
-    desc.setConfiguration(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, 
-      DisabledRegionSplitPolicy.class.getName());
-    HColumnDescriptor colDesc = new HColumnDescriptor(family);
-    colDesc.setTimeToLive(1); // 1 sec
-    desc.addFamily(colDesc);
-    try{
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+          FIFOCompactionPolicy.class.getName())
+        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+          DisabledRegionSplitPolicy.class.getName())
+        
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
+        .build();
+    try {
       admin.createTable(desc);
       Assert.fail();
-    }catch(Exception e){      
-    }finally{
+    } catch (Exception e) {
+    } finally {
       TEST_UTIL.shutdownMiniCluster();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
index 16fb813..fe33d86 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestCompactionWithThroughputController.java
@@ -22,33 +22,30 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.List;
-import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import org.apache.hadoop.hbase.regionserver.StripeStoreConfig;
 import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
-import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
-import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-import 
org.apache.hadoop.hbase.regionserver.throttle.PressureAwareCompactionThroughputController;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -71,30 +68,29 @@ public class TestCompactionWithThroughputController {
 
   private final byte[] qualifier = Bytes.toBytes("q");
 
-  private Store getStoreWithName(TableName tableName) {
+  private HStore getStoreWithName(TableName tableName) {
     MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
     List<JVMClusterUtil.RegionServerThread> rsts = 
cluster.getRegionServerThreads();
     for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
       HRegionServer hrs = rsts.get(i).getRegionServer();
       for (Region region : hrs.getRegions(tableName)) {
-        return region.getStores().iterator().next();
+        return ((HRegion) region).getStores().iterator().next();
       }
     }
     return null;
   }
 
-  private Store prepareData() throws IOException {
+  private HStore prepareData() throws IOException {
     Admin admin = TEST_UTIL.getAdmin();
     if (admin.tableExists(tableName)) {
       admin.disableTable(tableName);
       admin.deleteTable(tableName);
     }
     Table table = TEST_UTIL.createTable(tableName, family);
-    Random rand = new Random();
     for (int i = 0; i < 10; i++) {
       for (int j = 0; j < 10; j++) {
         byte[] value = new byte[128 * 1024];
-        rand.nextBytes(value);
+        ThreadLocalRandom.current().nextBytes(value);
         table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, 
qualifier, value));
       }
       admin.flush(tableName);
@@ -121,7 +117,7 @@ public class TestCompactionWithThroughputController {
       PressureAwareCompactionThroughputController.class.getName());
     TEST_UTIL.startMiniCluster(1);
     try {
-      Store store = prepareData();
+      HStore store = prepareData();
       assertEquals(10, store.getStorefilesCount());
       long startTime = System.currentTimeMillis();
       TEST_UTIL.getAdmin().majorCompact(tableName);
@@ -150,7 +146,7 @@ public class TestCompactionWithThroughputController {
       NoLimitThroughputController.class.getName());
     TEST_UTIL.startMiniCluster(1);
     try {
-      Store store = prepareData();
+      HStore store = prepareData();
       assertEquals(10, store.getStorefilesCount());
       long startTime = System.currentTimeMillis();
       TEST_UTIL.getAdmin().majorCompact(tableName);
@@ -199,10 +195,10 @@ public class TestCompactionWithThroughputController {
     TEST_UTIL.startMiniCluster(1);
     Connection conn = ConnectionFactory.createConnection(conf);
     try {
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      htd.addFamily(new HColumnDescriptor(family));
-      htd.setCompactionEnabled(false);
-      TEST_UTIL.getAdmin().createTable(htd);
+      TEST_UTIL.getAdmin()
+          .createTable(TableDescriptorBuilder.newBuilder(tableName)
+              
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false)
+              .build());
       TEST_UTIL.waitTableAvailable(tableName);
       HRegionServer regionServer = 
TEST_UTIL.getRSForFirstRegionInTable(tableName);
       PressureAwareCompactionThroughputController throughputController =
@@ -256,12 +252,12 @@ public class TestCompactionWithThroughputController {
     TEST_UTIL.startMiniCluster(1);
     Connection conn = ConnectionFactory.createConnection(conf);
     try {
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      htd.addFamily(new HColumnDescriptor(family));
-      htd.setCompactionEnabled(false);
-      TEST_UTIL.getAdmin().createTable(htd);
+      TEST_UTIL.getAdmin()
+          .createTable(TableDescriptorBuilder.newBuilder(tableName)
+              
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false)
+              .build());
       TEST_UTIL.waitTableAvailable(tableName);
-      HStore store = (HStore) getStoreWithName(tableName);
+      HStore store = getStoreWithName(tableName);
       assertEquals(0, store.getStorefilesCount());
       assertEquals(0.0, store.getCompactionPressure(), EPSILON);
       Table table = conn.getTable(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
index c5a595c..ca6c86b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/throttle/TestFlushWithThroughputController.java
@@ -22,25 +22,26 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreEngine;
 import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.apache.hadoop.hbase.util.Pair;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Rule;
@@ -73,13 +74,13 @@ public class TestFlushWithThroughputController {
     hbtu.shutdownMiniCluster();
   }
 
-  private Store getStoreWithName(TableName tableName) {
+  private HStore getStoreWithName(TableName tableName) {
     MiniHBaseCluster cluster = hbtu.getMiniHBaseCluster();
     List<JVMClusterUtil.RegionServerThread> rsts = 
cluster.getRegionServerThreads();
     for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
       HRegionServer hrs = rsts.get(i).getRegionServer();
       for (Region region : hrs.getRegions(tableName)) {
-        return region.getStores().iterator().next();
+        return ((HRegion) region).getStores().iterator().next();
       }
     }
     return null;
@@ -114,7 +115,7 @@ public class TestFlushWithThroughputController {
       hbtu.getAdmin().flush(tableName);
       duration += System.nanoTime() - startTime;
     }
-    Store store = getStoreWithName(tableName);
+    HStore store = getStoreWithName(tableName);
     assertEquals(NUM_FLUSHES, store.getStorefilesCount());
     double throughput = (double)store.getStorefilesSize()
         / TimeUnit.NANOSECONDS.toSeconds(duration);
@@ -157,10 +158,9 @@ public class TestFlushWithThroughputController {
       3000);
     hbtu.startMiniCluster(1);
     Connection conn = ConnectionFactory.createConnection(conf);
-    HTableDescriptor htd = new HTableDescriptor(tableName);
-    htd.addFamily(new HColumnDescriptor(family));
-    htd.setCompactionEnabled(false);
-    hbtu.getAdmin().createTable(htd);
+    hbtu.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName)
+      
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).setCompactionEnabled(false)
+      .build());
     hbtu.waitTableAvailable(tableName);
     HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
     PressureAwareFlushThroughputController throughputController =

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 391155e..0598e34 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -242,10 +242,11 @@ public abstract class AbstractTestWALReplay {
     assertEquals(0, count);
 
     // flush region and make major compaction
-    Region region =  
destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
+    HRegion region =
+        (HRegion) 
destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
     region.flush(true);
     // wait to complete major compaction
-    for (Store store : region.getStores()) {
+    for (HStore store : region.getStores()) {
       store.triggerMajorCompaction();
     }
     region.compact(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d26b8f8d/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
index 80f1283..67ee8ed 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
@@ -250,18 +250,19 @@ public class TestCoprocessorScanPolicy {
     public InternalScanner preFlushScannerOpen(
         final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
         List<KeyValueScanner> scanners, InternalScanner s, long readPoint) 
throws IOException {
+      HStore hs = (HStore) store;
       Long newTtl = ttls.get(store.getTableName());
       if (newTtl != null) {
         System.out.println("PreFlush:" + newTtl);
       }
       Integer newVersions = versions.get(store.getTableName());
-      ScanInfo oldSI = store.getScanInfo();
+      ScanInfo oldSI = hs.getScanInfo();
       ColumnFamilyDescriptor family = store.getColumnFamilyDescriptor();
       ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), 
family.getName(),
           family.getMinVersions(), newVersions == null ? 
family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl, 
family.getKeepDeletedCells(),
           family.getBlocksize(), oldSI.getTimeToPurgeDeletes(), 
oldSI.getComparator(), family.isNewVersionBehavior());
-      return new StoreScanner((HStore) store, scanInfo,
+      return new StoreScanner(hs, scanInfo,
           newVersions == null ? OptionalInt.empty() : 
OptionalInt.of(newVersions.intValue()),
           scanners, ScanType.COMPACT_RETAIN_DELETES, 
store.getSmallestReadPoint(),
           HConstants.OLDEST_TIMESTAMP);
@@ -272,16 +273,17 @@ public class TestCoprocessorScanPolicy {
         final ObserverContext<RegionCoprocessorEnvironment> c, Store store,
         List<? extends KeyValueScanner> scanners, ScanType scanType, long 
earliestPutTs,
         InternalScanner s,CompactionLifeCycleTracker tracker, long readPoint) 
throws IOException {
+      HStore hs = (HStore) store;
       Long newTtl = ttls.get(store.getTableName());
       Integer newVersions = versions.get(store.getTableName());
-      ScanInfo oldSI = store.getScanInfo();
+      ScanInfo oldSI = hs.getScanInfo();
       ColumnFamilyDescriptor family = store.getColumnFamilyDescriptor();
       ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), 
family.getName(),
           family.getMinVersions(), newVersions == null ? 
family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl, 
family.getKeepDeletedCells(),
           family.getBlocksize(), oldSI.getTimeToPurgeDeletes(), 
oldSI.getComparator(),
           family.isNewVersionBehavior());
-      return new StoreScanner((HStore) store, scanInfo,
+      return new StoreScanner(hs, scanInfo,
           newVersions == null ? OptionalInt.empty() : 
OptionalInt.of(newVersions.intValue()),
           scanners, scanType, store.getSmallestReadPoint(), earliestPutTs);
     }
@@ -292,16 +294,17 @@ public class TestCoprocessorScanPolicy {
         final NavigableSet<byte[]> targetCols, KeyValueScanner s, long readPt) 
throws IOException {
       TableName tn = store.getTableName();
       if (!tn.isSystemTable()) {
+        HStore hs = (HStore) store;
         Long newTtl = ttls.get(store.getTableName());
         Integer newVersions = versions.get(store.getTableName());
-        ScanInfo oldSI = store.getScanInfo();
+        ScanInfo oldSI = hs.getScanInfo();
         ColumnFamilyDescriptor family = store.getColumnFamilyDescriptor();
         ScanInfo scanInfo = new ScanInfo(TEST_UTIL.getConfiguration(), 
family.getName(),
             family.getMinVersions(), newVersions == null ? 
family.getMaxVersions() : newVersions,
             newTtl == null ? oldSI.getTtl() : newTtl, 
family.getKeepDeletedCells(),
             family.getBlocksize(), oldSI.getTimeToPurgeDeletes(), 
oldSI.getComparator(),
             family.isNewVersionBehavior());
-        return new StoreScanner((HStore) store, scanInfo, scan, targetCols, 
readPt);
+        return new StoreScanner(hs, scanInfo, scan, targetCols, readPt);
       } else {
         return s;
       }

Reply via email to