http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 354b056..2ada5a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -81,8 +81,8 @@ public class StoreUtils {
    * were created by a mapreduce bulk load are ignored, as they do not 
correspond to any specific
    * put operation, and thus do not have a memstoreTS associated with them.
    */
-  public static OptionalLong getMaxMemstoreTSInList(Collection<HStoreFile> 
sfs) {
-    return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemstoreTS)
+  public static OptionalLong getMaxMemStoreTSInList(Collection<HStoreFile> 
sfs) {
+    return sfs.stream().filter(sf -> 
!sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS)
         .max();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
index 169d1d8..eb2a9b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreConfig.java
@@ -96,7 +96,7 @@ public class StripeStoreConfig {
     this.splitPartCount = splitPartCount;
     // Arbitrary default split size - 4 times the size of one L0 compaction.
     // If we flush into L0 there's no split compaction, but for default value 
it is ok.
-    double flushSize = sci.getMemstoreFlushSize();
+    double flushSize = sci.getMemStoreFlushSize();
     if (flushSize == 0) {
       flushSize = 128 * 1024 * 1024;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index fe9ae30..b8194eb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -115,10 +115,10 @@ public class CompactionConfiguration {
     this.storeConfigInfo = storeConfigInfo;
 
     maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, 
Long.MAX_VALUE);
-    offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, 
-      maxCompactSize);      
+    offPeakMaxCompactSize = 
conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY,
+      maxCompactSize);
     minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY,
-        storeConfigInfo.getMemstoreFlushSize());
+        storeConfigInfo.getMemStoreFlushSize());
     minFilesToCompact = Math.max(2, 
conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY,
           /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
     maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10);
@@ -126,7 +126,7 @@ public class CompactionConfiguration {
     offPeakCompactionRatio = 
conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F);
 
     throttlePoint = 
conf.getLong("hbase.regionserver.thread.compaction.throttle",
-          2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
+          2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize());
     majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 
1000*60*60*24*7);
     // Make it 0.5 so jitter has us fall evenly either side of when the 
compaction should run
     majorCompactionJitter = 
conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 2c9a519..5865ed5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -77,7 +77,7 @@ public abstract class Compactor<T extends CellSink> {
   protected final int compactionKVMax;
   protected final Compression.Algorithm compactionCompression;
 
-  /** specify how many days to keep MVCC values during major compaction **/ 
+  /** specify how many days to keep MVCC values during major compaction **/
   protected int keepSeqIdPeriod;
 
   // Configs that drive whether we drop page cache behind compactions
@@ -141,15 +141,15 @@ public abstract class Compactor<T extends CellSink> {
   protected FileDetails getFileDetails(
       Collection<HStoreFile> filesToCompact, boolean allFiles) throws 
IOException {
     FileDetails fd = new FileDetails();
-    long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() - 
-      (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);  
+    long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() -
+      (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
 
     for (HStoreFile file : filesToCompact) {
       if(allFiles && (file.getModificationTimeStamp() < 
oldestHFileTimeStampToKeepMVCC)) {
-        // when isAllFiles is true, all files are compacted so we can 
calculate the smallest 
+        // when isAllFiles is true, all files are compacted so we can 
calculate the smallest
         // MVCC value to keep
-        if(fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
-          fd.minSeqIdToKeep = file.getMaxMemstoreTS();
+        if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
+          fd.minSeqIdToKeep = file.getMaxMemStoreTS();
         }
       }
       long seqNum = file.getMaxSequenceId();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 3868ba7..8455968 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -302,7 +302,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> 
implements WAL {
   }
 
   private int calculateMaxLogFiles(Configuration conf, long logRollSize) {
-    Pair<Long, MemoryType> globalMemstoreSize = 
MemorySizeUtil.getGlobalMemstoreSize(conf);
+    Pair<Long, MemoryType> globalMemstoreSize = 
MemorySizeUtil.getGlobalMemStoreSize(conf);
     return (int) ((globalMemstoreSize.getFirst() * 2) / logRollSize);
   }
 
@@ -468,13 +468,13 @@ public abstract class AbstractFSWAL<W extends WriterBase> 
implements WAL {
   }
 
   @Override
-  public long getEarliestMemstoreSeqNum(byte[] encodedRegionName) {
+  public long getEarliestMemStoreSeqNum(byte[] encodedRegionName) {
     // Used by tests. Deprecated as too subtle for general usage.
     return this.sequenceIdAccounting.getLowestSequenceId(encodedRegionName);
   }
 
   @Override
-  public long getEarliestMemstoreSeqNum(byte[] encodedRegionName, byte[] 
familyName) {
+  public long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] 
familyName) {
     // This method is used by tests and for figuring if we should flush or not 
because our
     // sequenceids are too old. It is also used reporting the master our 
oldest sequenceid for use
     // figuring what edits can be skipped during log recovery. 
getEarliestMemStoreSequenceId
@@ -924,7 +924,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> 
implements WAL {
     assert highestUnsyncedTxid < entry.getTxid();
     highestUnsyncedTxid = entry.getTxid();
     sequenceIdAccounting.update(encodedRegionName, entry.getFamilyNames(), 
regionSequenceId,
-      entry.isInMemstore());
+      entry.isInMemStore());
     coprocessorHost.postWALWrite(entry.getRegionInfo(), entry.getKey(), 
entry.getEdit());
     // Update metrics.
     postAppend(entry, EnvironmentEdgeManager.currentTime() - start);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 0c83374..f9374d8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -92,7 +92,7 @@ class FSWALEntry extends Entry {
     return "sequence=" + this.txid + ", " + super.toString();
   };
 
-  boolean isInMemstore() {
+  boolean isInMemStore() {
     return this.inMemstore;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index e1e2514..69365d8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -400,7 +400,7 @@ public class RegionReplicaReplicationEndpoint extends 
HBaseReplicationEndpoint {
         // check if the table requires memstore replication
         // some unit-test drop the table, so we should do a bypass check and 
always replicate.
         TableDescriptor htd = tableDescriptors.get(tableName);
-        requiresReplication = htd == null || 
htd.hasRegionMemstoreReplication();
+        requiresReplication = htd == null || 
htd.hasRegionMemStoreReplication();
         memstoreReplicationEnabled.put(tableName, requiresReplication);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 652aa2f..1458631 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -220,12 +220,12 @@ class DisabledWALProvider implements WALProvider {
     }
 
     @Override
-    public long getEarliestMemstoreSeqNum(byte[] encodedRegionName) {
+    public long getEarliestMemStoreSeqNum(byte[] encodedRegionName) {
       return HConstants.NO_SEQNUM;
     }
 
     @Override
-    public long getEarliestMemstoreSeqNum(byte[] encodedRegionName, byte[] 
familyName) {
+    public long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] 
familyName) {
       return HConstants.NO_SEQNUM;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
index 886ec78..e319255 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
@@ -190,11 +190,11 @@ public interface WAL extends Closeable, 
WALFileLengthProvider {
    * @param encodedRegionName The region to get the number for.
    * @return The earliest/lowest/oldest sequence id if present, 
HConstants.NO_SEQNUM if absent.
    * @deprecated Since version 1.2.0. Removing because not used and exposes 
subtle internal
-   * workings. Use {@link #getEarliestMemstoreSeqNum(byte[], byte[])}
+   * workings. Use {@link #getEarliestMemStoreSeqNum(byte[], byte[])}
    */
   @VisibleForTesting
   @Deprecated
-  long getEarliestMemstoreSeqNum(byte[] encodedRegionName);
+  long getEarliestMemStoreSeqNum(byte[] encodedRegionName);
 
   /**
    * Gets the earliest unflushed sequence id in the memstore for the store.
@@ -202,7 +202,7 @@ public interface WAL extends Closeable, 
WALFileLengthProvider {
    * @param familyName The family to get the number for.
    * @return The earliest/lowest/oldest sequence id if present, 
HConstants.NO_SEQNUM if absent.
    */
-  long getEarliestMemstoreSeqNum(byte[] encodedRegionName, byte[] familyName);
+  long getEarliestMemStoreSeqNum(byte[] encodedRegionName, byte[] familyName);
 
   /**
    * Human readable identifying information about the state of this WAL.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index 21b36b7..202ea4b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -95,9 +95,9 @@ public class TestGlobalMemStoreSize {
       long globalMemStoreSize = 0;
       for (RegionInfo regionInfo :
           ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
-        globalMemStoreSize += 
server.getRegion(regionInfo.getEncodedName()).getMemstoreSize();
+        globalMemStoreSize += 
server.getRegion(regionInfo.getEncodedName()).getMemStoreSize();
       }
-      
assertEquals(server.getRegionServerAccounting().getGlobalMemstoreDataSize(),
+      
assertEquals(server.getRegionServerAccounting().getGlobalMemStoreDataSize(),
         globalMemStoreSize);
     }
 
@@ -105,7 +105,7 @@ public class TestGlobalMemStoreSize {
     int i = 0;
     for (HRegionServer server : getOnlineRegionServers()) {
       LOG.info("Starting flushes on " + server.getServerName() +
-        ", size=" + 
server.getRegionServerAccounting().getGlobalMemstoreDataSize());
+        ", size=" + 
server.getRegionServerAccounting().getGlobalMemStoreDataSize());
 
       for (RegionInfo regionInfo :
           ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
@@ -115,18 +115,18 @@ public class TestGlobalMemStoreSize {
       LOG.info("Post flush on " + server.getServerName());
       long now = System.currentTimeMillis();
       long timeout = now + 1000;
-      while(server.getRegionServerAccounting().getGlobalMemstoreDataSize() != 
0 &&
+      while(server.getRegionServerAccounting().getGlobalMemStoreDataSize() != 
0 &&
           timeout < System.currentTimeMillis()) {
         Threads.sleep(10);
       }
-      long size = 
server.getRegionServerAccounting().getGlobalMemstoreDataSize();
+      long size = 
server.getRegionServerAccounting().getGlobalMemStoreDataSize();
       if (size > 0) {
         // If size > 0, see if its because the meta region got edits while
         // our test was running....
         for (RegionInfo regionInfo :
             ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) {
           Region r = server.getRegion(regionInfo.getEncodedName());
-          long l = r.getMemstoreSize();
+          long l = r.getMemStoreSize();
           if (l > 0) {
             // Only meta could have edits at this stage.  Give it another flush
             // clear them.
@@ -136,7 +136,7 @@ public class TestGlobalMemStoreSize {
           }
         }
       }
-      size = server.getRegionServerAccounting().getGlobalMemstoreDataSize();
+      size = server.getRegionServerAccounting().getGlobalMemStoreDataSize();
       assertEquals("Server=" + server.getServerName() + ", i=" + i++, 0, size);
     }
 
@@ -154,7 +154,7 @@ public class TestGlobalMemStoreSize {
   throws IOException {
     LOG.info("Flush " + r.toString() + " on " + server.getServerName() +
       ", " +  r.flush(true) + ", size=" +
-      server.getRegionServerAccounting().getGlobalMemstoreDataSize());
+      server.getRegionServerAccounting().getGlobalMemStoreDataSize());
   }
 
   private List<HRegionServer> getOnlineRegionServers() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 9775b86..3966cf2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -325,30 +325,30 @@ public class TestAsyncRegionAdminApi extends 
TestAsyncAdminBase {
     ASYNC_CONN.getRawTable(tableName)
         .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, 
Bytes.toBytes("value-1")))
         .join();
-    
Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize()
 > 0);
+    
Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()
 > 0);
     // flush region and wait flush operation finished.
     LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName()));
     admin.flushRegion(hri.getRegionName()).get();
     LOG.info("blocking until flush is complete: " + 
Bytes.toStringBinary(hri.getRegionName()));
     Threads.sleepWithoutInterrupt(500);
-    while (regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize() 
> 0) {
+    while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() 
> 0) {
       Threads.sleep(50);
     }
     // check the memstore.
-    
Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize(),
 0);
+    
Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(),
 0);
 
     // write another put into the specific region
     ASYNC_CONN.getRawTable(tableName)
         .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, 
Bytes.toBytes("value-2")))
         .join();
-    
Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize()
 > 0);
+    
Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize()
 > 0);
     admin.flush(tableName).get();
     Threads.sleepWithoutInterrupt(500);
-    while (regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize() 
> 0) {
+    while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() 
> 0) {
       Threads.sleep(50);
     }
     // check the memstore.
-    
Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemstoreSize(),
 0);
+    
Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(),
 0);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
index 1b1805e..12c7fae 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
@@ -28,14 +28,13 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.AsyncProcessTask;
 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.MemstoreSize;
+import org.apache.hadoop.hbase.regionserver.MemStoreSize;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -106,7 +105,7 @@ public class TestClientPushback {
     mutator.flush();
 
     // get the current load on RS. Hopefully memstore isn't flushed since we 
wrote the the data
-    int load = (int) ((((HRegion) region).addAndGetMemstoreSize(new 
MemstoreSize(0, 0)) * 100)
+    int load = (int) ((((HRegion) region).addAndGetMemStoreSize(new 
MemStoreSize(0, 0)) * 100)
         / flushSizeBytes);
     LOG.debug("Done writing some data to "+tableName);
 
@@ -114,7 +113,7 @@ public class TestClientPushback {
     ClientBackoffPolicy backoffPolicy = conn.getBackoffPolicy();
     assertTrue("Backoff policy is not correctly configured",
       backoffPolicy instanceof ExponentialClientBackoffPolicy);
-    
+
     ServerStatisticTracker stats = conn.getStatisticsTracker();
     assertNotNull( "No stats configured for the client!", stats);
     // get the names so we can query the stats
@@ -125,7 +124,7 @@ public class TestClientPushback {
     ServerStatistics serverStats = stats.getServerStatsForTesting(server);
     ServerStatistics.RegionStatistics regionStats = 
serverStats.getStatsForRegion(regionName);
     assertEquals("We did not find some load on the memstore", load,
-      regionStats.getMemstoreLoadPercent());
+      regionStats.getMemStoreLoadPercent());
     // check that the load reported produces a nonzero delay
     long backoffTime = backoffPolicy.getBackoffTime(server, regionName, 
serverStats);
     assertNotEquals("Reported load does not produce a backoff", backoffTime, 
0);
@@ -163,7 +162,7 @@ public class TestClientPushback {
     assertEquals(rsStats.heapOccupancyHist.getSnapshot().getMean(),
         (double)regionStats.getHeapOccupancyPercent(), 0.1 );
     assertEquals(rsStats.memstoreLoadHist.getSnapshot().getMean(),
-        (double)regionStats.getMemstoreLoadPercent(), 0.1);
+        (double)regionStats.getMemStoreLoadPercent(), 0.1);
 
     MetricsConnection.RunnerStats runnerStats = 
conn.getConnectionMetrics().runnerStats;
 
@@ -202,6 +201,6 @@ public class TestClientPushback {
     ServerStatistics.RegionStatistics regionStats = 
serverStats.getStatsForRegion(regionName);
 
     assertNotNull(regionStats);
-    assertTrue(regionStats.getMemstoreLoadPercent() > 0);
+    assertTrue(regionStats.getMemStoreLoadPercent() > 0);
     }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java
new file mode 100644
index 0000000..30b3d71
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.MemStoreSize;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that verifies we do not have memstore size negative when a 
postPut/Delete hook is
+ * slow/expensive and a flush is triggered at the same time the coprocessow is 
doing its work. To
+ * simulate this we call flush from the coprocessor itself
+ */
+@Category(LargeTests.class)
+public class TestNegativeMemStoreSizeWithSlowCoprocessor {
+
+  static final Log LOG = 
LogFactory.getLog(TestNegativeMemStoreSizeWithSlowCoprocessor.class);
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final byte[] tableName = Bytes.toBytes("test_table");
+  private static final byte[] family = Bytes.toBytes("f");
+  private static final byte[] qualifier = Bytes.toBytes("q");
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+      FlushingRegionObserver.class.getName());
+    conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
+    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.createTable(TableName.valueOf(tableName), family);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testNegativeMemstoreSize() throws IOException, 
InterruptedException {
+    boolean IOEthrown = false;
+    Table table = null;
+    try {
+      table = TEST_UTIL.getConnection().getTable(TableName.valueOf(tableName));
+
+      // Adding data
+      Put put1 = new Put(Bytes.toBytes("row1"));
+      put1.addColumn(family, qualifier, Bytes.toBytes("Value1"));
+      table.put(put1);
+      Put put2 = new Put(Bytes.toBytes("row2"));
+      put2.addColumn(family, qualifier, Bytes.toBytes("Value2"));
+      table.put(put2);
+      table.put(put2);
+    } catch (IOException e) {
+      IOEthrown = true;
+    } finally {
+      Assert.assertFalse("Shouldn't have thrown an exception", IOEthrown);
+      if (table != null) {
+        table.close();
+      }
+    }
+  }
+
+  public static class FlushingRegionObserver extends SimpleRegionObserver {
+
+    @Override
+    public void postPut(final ObserverContext<RegionCoprocessorEnvironment> c, 
final Put put,
+        final WALEdit edit, final Durability durability) throws IOException {
+      HRegion region = (HRegion) c.getEnvironment().getRegion();
+      super.postPut(c, put, edit, durability);
+
+      if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) {
+        region.flush(false);
+        Assert.assertTrue(region.addAndGetMemStoreSize(new MemStoreSize()) >= 
0);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
deleted file mode 100644
index 42a8ee4..0000000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-package org.apache.hadoop.hbase.coprocessor;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.MemstoreSize;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Test that verifies we do not have memstore size negative when a 
postPut/Delete hook is
- * slow/expensive and a flush is triggered at the same time the coprocessow is 
doing its work. To
- * simulate this we call flush from the coprocessor itself
- */
-@Category(LargeTests.class)
-public class TestNegativeMemstoreSizeWithSlowCoprocessor {
-
-  static final Log LOG = 
LogFactory.getLog(TestNegativeMemstoreSizeWithSlowCoprocessor.class);
-  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
-  private static final byte[] tableName = Bytes.toBytes("test_table");
-  private static final byte[] family = Bytes.toBytes("f");
-  private static final byte[] qualifier = Bytes.toBytes("q");
-
-  @BeforeClass
-  public static void setupBeforeClass() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
-      FlushingRegionObserver.class.getName());
-    conf.setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, true);
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); // Let's fail fast.
-    TEST_UTIL.startMiniCluster(1);
-    TEST_UTIL.createTable(TableName.valueOf(tableName), family);
-  }
-
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  @Test
-  public void testNegativeMemstoreSize() throws IOException, 
InterruptedException {
-    boolean IOEthrown = false;
-    Table table = null;
-    try {
-      table = TEST_UTIL.getConnection().getTable(TableName.valueOf(tableName));
-
-      // Adding data
-      Put put1 = new Put(Bytes.toBytes("row1"));
-      put1.addColumn(family, qualifier, Bytes.toBytes("Value1"));
-      table.put(put1);
-      Put put2 = new Put(Bytes.toBytes("row2"));
-      put2.addColumn(family, qualifier, Bytes.toBytes("Value2"));
-      table.put(put2);
-      table.put(put2);
-    } catch (IOException e) {
-      IOEthrown = true;
-    } finally {
-      Assert.assertFalse("Shouldn't have thrown an exception", IOEthrown);
-      if (table != null) {
-        table.close();
-      }
-    }
-  }
-
-  public static class FlushingRegionObserver extends SimpleRegionObserver {
-
-    @Override
-    public void postPut(final ObserverContext<RegionCoprocessorEnvironment> c, 
final Put put,
-        final WALEdit edit, final Durability durability) throws IOException {
-      HRegion region = (HRegion) c.getEnvironment().getRegion();
-      super.postPut(c, put, edit, durability);
-
-      if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) {
-        region.flush(false);
-        Assert.assertTrue(region.addAndGetMemstoreSize(new MemstoreSize()) >= 
0);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
index c5bf581..334127c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java
@@ -418,7 +418,7 @@ public class TestHFileBlock {
                 .build();
           HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(is, 
totalSize, meta);
           hbr.setDataBlockEncoder(dataBlockEncoder);
-          hbr.setIncludesMemstoreTS(includesMemstoreTS);
+          hbr.setIncludesMemStoreTS(includesMemstoreTS);
           HFileBlock blockFromHFile, blockUnpacked;
           int pos = 0;
           for (int blockId = 0; blockId < numBlocks; ++blockId) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index e547f87..68d009d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -343,7 +343,7 @@ public class TestStochasticLoadBalancer extends 
BalancerTestBase {
   public void testCostFromArray() {
     Configuration conf = HBaseConfiguration.create();
     StochasticLoadBalancer.CostFromRegionLoadFunction
-        costFunction = new 
StochasticLoadBalancer.MemstoreSizeCostFunction(conf);
+        costFunction = new 
StochasticLoadBalancer.MemStoreSizeCostFunction(conf);
     costFunction.init(mockCluster(new int[]{0, 0, 0, 0, 1}));
 
     double[] statOne = new double[100];

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index e2b4ab3..d56823e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -61,7 +61,7 @@ public class MetricsRegionServerWrapperStub implements 
MetricsRegionServerWrappe
   }
 
   @Override
-  public long getMemstoreSize() {
+  public long getMemStoreSize() {
     return 1025;
   }
 
@@ -206,7 +206,7 @@ public class MetricsRegionServerWrapperStub implements 
MetricsRegionServerWrappe
   }
 
   @Override
-  public long getMemstoreLimit() {
+  public long getMemStoreLimit() {
          return 419;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
index 6f32000..524d03b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
@@ -61,7 +61,7 @@ public class MetricsRegionWrapperStub implements 
MetricsRegionWrapper {
   }
 
   @Override
-  public long getMemstoreSize() {
+  public long getMemStoreSize() {
     return 103;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
index 6fd8dd7..ba333a5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java
@@ -42,7 +42,7 @@ public class MetricsTableWrapperStub implements 
MetricsTableWrapperAggregate {
   }
 
   @Override
-  public long getMemstoresSize(String table) {
+  public long getMemStoresSize(String table) {
     return 1000;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index dc3cf4d..0886fd1 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -260,7 +260,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
   @Override
   @Test
   public void testUpsertMemstoreSize() throws Exception {
-    MemstoreSize oldSize = memstore.size();
+    MemStoreSize oldSize = memstore.size();
 
     List<Cell> l = new ArrayList<>();
     KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
@@ -275,7 +275,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     l.add(kv3);
 
     this.memstore.upsert(l, 2, null);// readpoint is 2
-    MemstoreSize newSize = this.memstore.size();
+    MemStoreSize newSize = this.memstore.size();
     assert (newSize.getDataSize() > oldSize.getDataSize());
     //The kv1 should be removed.
     assert (memstore.getActive().getCellsCount() == 2);
@@ -593,7 +593,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
                 .length(kv));
 
     long totalHeapSize = numOfCells * oneCellOnCSLMHeapSize + 
MutableSegment.DEEP_OVERHEAD;
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and flatten
@@ -605,15 +605,15 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + 
CellChunkImmutableSegment.DEEP_OVERHEAD_CCM
         + numOfCells * oneCellOnCCMHeapSize;
 
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(numOfCells, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -638,7 +638,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     int oneCellOnCSLMHeapSize = 120;
     int oneCellOnCAHeapSize = 88;
     long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * 
oneCellOnCSLMHeapSize;
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and compact
@@ -647,15 +647,15 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     // totalCellsLen remains the same
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + 
CellArrayImmutableSegment.DEEP_OVERHEAD_CAM
         + 4 * oneCellOnCAHeapSize;
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(4, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -677,7 +677,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     int oneCellOnCAHeapSize = 88;
     long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * 
oneCellOnCSLMHeapSize;
 
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and compact
@@ -689,30 +689,30 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // There is no compaction, as the compacting memstore type is basic.
     // totalCellsLen remains the same
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + 
CellArrayImmutableSegment.DEEP_OVERHEAD_CAM
         + 4 * oneCellOnCAHeapSize;
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     int totalCellsLen2 = addRowsByKeys(memstore, keys2);
     totalHeapSize += 3 * oneCellOnCSLMHeapSize;
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore) memstore).heapSize());
 
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and compact
     assertEquals(0, memstore.getSnapshot().getCellsCount());
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + 
CellArrayImmutableSegment.DEEP_OVERHEAD_CAM
         + 7 * oneCellOnCAHeapSize;
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(7, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -732,7 +732,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells.
     int oneCellOnCSLMHeapSize = 120;
     int oneCellOnCAHeapSize = 88;
-    assertEquals(totalCellsLen1, region.getMemstoreSize());
+    assertEquals(totalCellsLen1, region.getMemStoreSize());
     long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * 
oneCellOnCSLMHeapSize;
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
and compact
@@ -741,7 +741,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     // One cell is duplicated and the compaction will remove it. All cells of 
same time so adjusting
     // totalCellsLen
     totalCellsLen1 = (totalCellsLen1 * 3) / 4;
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     // In memory flush to make a CellArrayMap instead of CSLM. See the 
overhead diff.
     totalHeapSize = MutableSegment.DEEP_OVERHEAD + 
CellArrayImmutableSegment.DEEP_OVERHEAD_CAM
         + 3 * oneCellOnCAHeapSize;
@@ -750,21 +750,21 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     int totalCellsLen2 = addRowsByKeys(memstore, keys2);// Adding 3 more cells.
     long totalHeapSize2 = totalHeapSize + 3 * oneCellOnCSLMHeapSize;
 
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize2, ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore) memstore).disableCompaction();
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline 
without compaction
     assertEquals(0, memstore.getSnapshot().getCellsCount());
     // No change in the cells data size. ie. memstore size. as there is no 
compaction.
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize2 + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM,
         ((CompactingMemStore) memstore).heapSize());
 
     int totalCellsLen3 = addRowsByKeys(memstore, keys3);// 3 more cells added
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
-        regionServicesForStores.getMemstoreSize());
+        regionServicesForStores.getMemStoreSize());
     long totalHeapSize3 = totalHeapSize2 + 
CellArrayImmutableSegment.DEEP_OVERHEAD_CAM
         + 3 * oneCellOnCSLMHeapSize;
     assertEquals(totalHeapSize3, ((CompactingMemStore) memstore).heapSize());
@@ -778,17 +778,17 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
     totalCellsLen2 = totalCellsLen2 / 3;// 2 out of 3 cells are duplicated
     totalCellsLen3 = 0;// All duplicated cells.
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
-        regionServicesForStores.getMemstoreSize());
+        regionServicesForStores.getMemStoreSize());
     // Only 4 unique cells left
     assertEquals(4 * oneCellOnCAHeapSize + MutableSegment.DEEP_OVERHEAD
         + CellArrayImmutableSegment.DEEP_OVERHEAD_CAM, ((CompactingMemStore) 
memstore).heapSize());
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(4, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -809,7 +809,7 @@ public class TestCompactingMemStore extends 
TestDefaultMemStore {
       hmc.add(kv, null);
       LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + 
kv.getTimestamp());
     }
-    regionServicesForStores.addMemstoreSize(new 
MemstoreSize(hmc.getActive().keySize() - size,
+    regionServicesForStores.addMemStoreSize(new 
MemStoreSize(hmc.getActive().keySize() - size,
         hmc.getActive().heapSize() - heapOverhead));
     return totalLen;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
index 6011af7..3fa5cd0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java
@@ -99,7 +99,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     long cellAfterFlushSize  = cellAfterFlushSize();
     long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * 
cellBeforeFlushSize;
 
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize());
 
     assertEquals(4, memstore.getActive().getCellsCount());
@@ -108,7 +108,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     // One cell is duplicated and the compaction will remove it. All cells of 
same size so adjusting
     // totalCellsLen
     totalCellsLen = (totalCellsLen * 3) / 4;
-    assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen, regionServicesForStores.getMemStoreSize());
 
     totalHeapSize =
         3 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD
@@ -120,12 +120,12 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
       counter += s.getCellsCount();
     }
     assertEquals(3, counter);
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(3, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -144,7 +144,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     long cellBeforeFlushSize = cellBeforeFlushSize();
     long cellAfterFlushSize = cellAfterFlushSize();
     long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * 
cellBeforeFlushSize;
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
and compact
@@ -161,12 +161,12 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
         + (toCellChunkMap ?
         CellChunkImmutableSegment.DEEP_OVERHEAD_CCM :
         CellArrayImmutableSegment.DEEP_OVERHEAD_CAM);
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     long totalCellsLen2 = addRowsByKeys(memstore, keys2);   // INSERT 3 (3+3=6)
     long totalHeapSize2 = 3 * cellBeforeFlushSize;
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) 
memstore).heapSize());
 
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
and compact
@@ -177,16 +177,16 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     }
     assertEquals(4,counter);
     totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     totalHeapSize2 = 1 * cellAfterFlushSize;
     assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) 
memstore).heapSize());
 
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(4, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
   }
@@ -206,10 +206,10 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     long cellBeforeFlushSize = cellBeforeFlushSize();
     long cellAfterFlushSize = cellAfterFlushSize();
     long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * 
cellBeforeFlushSize;
-    assertEquals(totalCellsLen1, region.getMemstoreSize());
+    assertEquals(totalCellsLen1, region.getMemStoreSize());
     assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
-    MemstoreSize size = memstore.getFlushableSize();
+    MemStoreSize size = memstore.getFlushableSize();
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
and compact
 
     assertEquals(0, memstore.getSnapshot().getCellsCount());
@@ -220,13 +220,13 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
         + (toCellChunkMap ?
         CellChunkImmutableSegment.DEEP_OVERHEAD_CCM :
         CellArrayImmutableSegment.DEEP_OVERHEAD_CAM);
-    assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1, regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize());
 
     long totalCellsLen2 = addRowsByKeys(memstore, keys2);
     long totalHeapSize2 = 3 * cellBeforeFlushSize;
 
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) 
memstore).heapSize());
 
     ((CompactingMemStore) memstore).disableCompaction();
@@ -234,13 +234,13 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline 
without compaction
     totalHeapSize2 = totalHeapSize2 + CSLMImmutableSegment.DEEP_OVERHEAD_CSLM;
     assertEquals(0, memstore.getSnapshot().getCellsCount());
-    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemstoreSize());
+    assertEquals(totalCellsLen1 + totalCellsLen2, 
regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) 
memstore).heapSize());
 
     long totalCellsLen3 = addRowsByKeys(memstore, keys3);
     long totalHeapSize3 = 3 * cellBeforeFlushSize;
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
-        regionServicesForStores.getMemstoreSize());
+        regionServicesForStores.getMemStoreSize());
     assertEquals(totalHeapSize1 + totalHeapSize2 + totalHeapSize3,
         ((CompactingMemStore) memstore).heapSize());
 
@@ -256,7 +256,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
     totalCellsLen2 = totalCellsLen2 / 3;// 2 out of 3 cells are duplicated
     totalCellsLen3 = 0;// All duplicated cells.
     assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3,
-        regionServicesForStores.getMemstoreSize());
+        regionServicesForStores.getMemStoreSize());
     // Only 4 unique cells left
     long totalHeapSize4 = 4 * cellAfterFlushSize + MutableSegment.DEEP_OVERHEAD
         + (toCellChunkMap ?
@@ -266,10 +266,10 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
 
     size = memstore.getFlushableSize();
     MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot
-    region.decrMemstoreSize(size);  // simulate flusher
+    region.decrMemStoreSize(size);  // simulate flusher
     ImmutableSegment s = memstore.getSnapshot();
     assertEquals(4, s.getCellsCount());
-    assertEquals(0, regionServicesForStores.getMemstoreSize());
+    assertEquals(0, regionServicesForStores.getMemStoreSize());
 
     memstore.clearSnapshot(snapshot.getId());
 
@@ -524,7 +524,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
   private long addRowsByKeys(final AbstractMemStore hmc, String[] keys) {
     byte[] fam = Bytes.toBytes("testfamily");
     byte[] qf = Bytes.toBytes("testqualifier");
-    MemstoreSize memstoreSize = new MemstoreSize();
+    MemStoreSize memstoreSize = new MemStoreSize();
     for (int i = 0; i < keys.length; i++) {
       long timestamp = System.currentTimeMillis();
       Threads.sleep(1); // to make sure each kv gets a different ts
@@ -534,7 +534,7 @@ public class TestCompactingToCellFlatMapMemStore extends 
TestCompactingMemStore
       hmc.add(kv, memstoreSize);
       LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + 
kv.getTimestamp());
     }
-    regionServicesForStores.addMemstoreSize(memstoreSize);
+    regionServicesForStores.addMemStoreSize(memstoreSize);
     return memstoreSize.getDataSize();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index e40ff8e..eb9efab 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -126,9 +126,9 @@ public class TestDefaultMemStore {
   public void testPutSameCell() {
     byte[] bytes = Bytes.toBytes(getName());
     KeyValue kv = new KeyValue(bytes, bytes, bytes, bytes);
-    MemstoreSize sizeChangeForFirstCell = new MemstoreSize();
+    MemStoreSize sizeChangeForFirstCell = new MemStoreSize();
     this.memstore.add(kv, sizeChangeForFirstCell);
-    MemstoreSize sizeChangeForSecondCell = new MemstoreSize();
+    MemStoreSize sizeChangeForSecondCell = new MemStoreSize();
     this.memstore.add(kv, sizeChangeForSecondCell);
     // make sure memstore size increase won't double-count MSLAB chunk size
     assertEquals(Segment.getCellLength(kv), 
sizeChangeForFirstCell.getDataSize());
@@ -826,7 +826,7 @@ public class TestDefaultMemStore {
   public void testUpsertMemstoreSize() throws Exception {
     Configuration conf = HBaseConfiguration.create();
     memstore = new DefaultMemStore(conf, CellComparator.COMPARATOR);
-    MemstoreSize oldSize = memstore.size();
+    MemStoreSize oldSize = memstore.size();
 
     List<Cell> l = new ArrayList<>();
     KeyValue kv1 = KeyValueTestUtil.create("r", "f", "q", 100, "v");
@@ -837,7 +837,7 @@ public class TestDefaultMemStore {
     l.add(kv1); l.add(kv2); l.add(kv3);
 
     this.memstore.upsert(l, 2, null);// readpoint is 2
-    MemstoreSize newSize = this.memstore.size();
+    MemStoreSize newSize = this.memstore.size();
     assert (newSize.getDataSize() > oldSize.getDataSize());
     //The kv1 should be removed.
     assert(memstore.getActive().getCellsCount() == 2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 6a41742..38f3060 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -65,7 +65,6 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Iterators;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 
 @Category(LargeTests.class)
 public class TestEndToEndSplitTransaction {
@@ -316,7 +315,7 @@ public class TestEndToEndSplitTransaction {
     admin.flushRegion(regionName);
     log("blocking until flush is complete: " + 
Bytes.toStringBinary(regionName));
     Threads.sleepWithoutInterrupt(500);
-    while (rs.getOnlineRegion(regionName).getMemstoreSize() > 0) {
+    while (rs.getOnlineRegion(regionName).getMemStoreSize() > 0) {
       Threads.sleep(50);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 4d557b9..a7793f6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -300,7 +300,7 @@ public class TestHRegion {
     region.put(put);
     // Close with something in memstore and something in the snapshot.  Make 
sure all is cleared.
     region.close();
-    assertEquals(0, region.getMemstoreSize());
+    assertEquals(0, region.getMemStoreSize());
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
@@ -384,17 +384,17 @@ public class TestHRegion {
     HRegion region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, hLog,
         COLUMN_FAMILY_BYTES);
     HStore store = region.getStore(COLUMN_FAMILY_BYTES);
-    assertEquals(0, region.getMemstoreSize());
+    assertEquals(0, region.getMemStoreSize());
 
     // Put some value and make sure flush could be completed normally
     byte [] value = Bytes.toBytes(method);
     Put put = new Put(value);
     put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
     region.put(put);
-    long onePutSize = region.getMemstoreSize();
+    long onePutSize = region.getMemStoreSize();
     assertTrue(onePutSize > 0);
     region.flush(true);
-    assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
+    assertEquals("memstoreSize should be zero", 0, region.getMemStoreSize());
     assertEquals("flushable size should be zero", 0, 
store.getFlushableSize().getDataSize());
 
     // save normalCPHost and replaced by mockedCPHost, which will cancel flush 
requests
@@ -405,14 +405,14 @@ public class TestHRegion {
     region.setCoprocessorHost(mockedCPHost);
     region.put(put);
     region.flush(true);
-    assertEquals("memstoreSize should NOT be zero", onePutSize, 
region.getMemstoreSize());
+    assertEquals("memstoreSize should NOT be zero", onePutSize, 
region.getMemStoreSize());
     assertEquals("flushable size should NOT be zero", onePutSize,
         store.getFlushableSize().getDataSize());
 
     // set normalCPHost and flush again, the snapshot will be flushed
     region.setCoprocessorHost(normalCPHost);
     region.flush(true);
-    assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
+    assertEquals("memstoreSize should be zero", 0, region.getMemStoreSize());
     assertEquals("flushable size should be zero", 0, 
store.getFlushableSize().getDataSize());
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
@@ -426,14 +426,14 @@ public class TestHRegion {
     HRegion region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, hLog,
         COLUMN_FAMILY_BYTES);
     HStore store = region.getStore(COLUMN_FAMILY_BYTES);
-    assertEquals(0, region.getMemstoreSize());
+    assertEquals(0, region.getMemStoreSize());
 
     // Put one value
     byte [] value = Bytes.toBytes(method);
     Put put = new Put(value);
     put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
     region.put(put);
-    long onePutSize = region.getMemstoreSize();
+    long onePutSize = region.getMemStoreSize();
     assertTrue(onePutSize > 0);
 
     RegionCoprocessorHost mockedCPHost = 
Mockito.mock(RegionCoprocessorHost.class);
@@ -449,7 +449,7 @@ public class TestHRegion {
     } catch (IOException expected) {
     }
     long expectedSize = onePutSize * 2;
-    assertEquals("memstoreSize should be incremented", expectedSize, 
region.getMemstoreSize());
+    assertEquals("memstoreSize should be incremented", expectedSize, 
region.getMemStoreSize());
     assertEquals("flushable size should be incremented", expectedSize,
         store.getFlushableSize().getDataSize());
 
@@ -494,13 +494,13 @@ public class TestHRegion {
           // Initialize region
           region = initHRegion(tableName, null, null, false, 
Durability.SYNC_WAL, wal,
               COLUMN_FAMILY_BYTES);
-          long size = region.getMemstoreSize();
+          long size = region.getMemStoreSize();
           Assert.assertEquals(0, size);
           // Put one item into memstore.  Measure the size of one item in 
memstore.
           Put p1 = new Put(row);
           p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) 
null));
           region.put(p1);
-          final long sizeOfOnePut = region.getMemstoreSize();
+          final long sizeOfOnePut = region.getMemStoreSize();
           // Fail a flush which means the current memstore will hang out as 
memstore 'snapshot'.
           try {
             LOG.info("Flushing");
@@ -513,7 +513,7 @@ public class TestHRegion {
           // Make it so all writes succeed from here on out
           ffs.fault.set(false);
           // Check sizes.  Should still be the one entry.
-          Assert.assertEquals(sizeOfOnePut, region.getMemstoreSize());
+          Assert.assertEquals(sizeOfOnePut, region.getMemStoreSize());
           // Now add two entries so that on this next flush that fails, we can 
see if we
           // subtract the right amount, the snapshot size only.
           Put p2 = new Put(row);
@@ -521,13 +521,13 @@ public class TestHRegion {
           p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, 
(byte[])null));
           region.put(p2);
           long expectedSize = sizeOfOnePut * 3;
-          Assert.assertEquals(expectedSize, region.getMemstoreSize());
+          Assert.assertEquals(expectedSize, region.getMemStoreSize());
           // Do a successful flush.  It will clear the snapshot only.  Thats 
how flushes work.
           // If already a snapshot, we clear it else we move the memstore to 
be snapshot and flush
           // it
           region.flush(true);
           // Make sure our memory accounting is right.
-          Assert.assertEquals(sizeOfOnePut * 2, region.getMemstoreSize());
+          Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreSize());
         } finally {
           HBaseTestingUtility.closeRegionAndWAL(region);
         }
@@ -559,7 +559,7 @@ public class TestHRegion {
           // Initialize region
           region = initHRegion(tableName, null, null, false,
               Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES);
-          long size = region.getMemstoreSize();
+          long size = region.getMemStoreSize();
           Assert.assertEquals(0, size);
           // Put one item into memstore.  Measure the size of one item in 
memstore.
           Put p1 = new Put(row);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 63f5dfc..fe684bb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -282,12 +282,12 @@ public class TestHRegionReplayEvents {
       }
     }
 
-    assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreDataSize() > 
0);
+    assertTrue(rss.getRegionServerAccounting().getGlobalMemStoreDataSize() > 
0);
     // now close the region which should not cause hold because of 
un-committed flush
     secondaryRegion.close();
 
     // verify that the memstore size is back to what it was
-    assertEquals(0, 
rss.getRegionServerAccounting().getGlobalMemstoreDataSize());
+    assertEquals(0, 
rss.getRegionServerAccounting().getGlobalMemStoreDataSize());
   }
 
   static int replayEdit(HRegion region, WAL.Entry entry) throws IOException {
@@ -341,7 +341,7 @@ public class TestHRegionReplayEvents {
         verifyData(secondaryRegion, 0, lastReplayed, cq, families);
         HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
         long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
-        long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+        long regionMemstoreSize = secondaryRegion.getMemStoreSize();
         long storeFlushableSize = store.getFlushableSize().getHeapSize();
         long storeSize = store.getSize();
         long storeSizeUncompressed = store.getStoreSizeUncompressed();
@@ -370,7 +370,7 @@ public class TestHRegionReplayEvents {
           assertTrue(storeFlushableSize > newFlushableSize);
 
           // assert that the region memstore is smaller now
-          long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+          long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
           assertTrue(regionMemstoreSize > newRegionMemstoreSize);
 
           // assert that the store sizes are bigger
@@ -440,7 +440,7 @@ public class TestHRegionReplayEvents {
         // first verify that everything is replayed and visible before flush 
event replay
         HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
         long storeMemstoreSize = store.getMemStoreSize().getHeapSize();
-        long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+        long regionMemstoreSize = secondaryRegion.getMemStoreSize();
         long storeFlushableSize = store.getFlushableSize().getHeapSize();
 
         if (flushDesc.getAction() == FlushAction.START_FLUSH) {
@@ -480,7 +480,7 @@ public class TestHRegionReplayEvents {
     assertNotNull(secondaryRegion.getPrepareFlushResult());
     assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,
       startFlushDesc.getFlushSequenceNumber());
-    assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty
+    assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty
     verifyData(secondaryRegion, 0, numRows, cq, families);
 
     // Test case 2: replay a flush start marker with a smaller seqId
@@ -493,7 +493,7 @@ public class TestHRegionReplayEvents {
     assertNotNull(secondaryRegion.getPrepareFlushResult());
     assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,
       startFlushDesc.getFlushSequenceNumber());
-    assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty
+    assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty
     verifyData(secondaryRegion, 0, numRows, cq, families);
 
     // Test case 3: replay a flush start marker with a larger seqId
@@ -506,7 +506,7 @@ public class TestHRegionReplayEvents {
     assertNotNull(secondaryRegion.getPrepareFlushResult());
     assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId,
       startFlushDesc.getFlushSequenceNumber());
-    assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty
+    assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty
     verifyData(secondaryRegion, 0, numRows, cq, families);
 
     LOG.info("-- Verifying edits from secondary");
@@ -575,7 +575,7 @@ public class TestHRegionReplayEvents {
     for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long regionMemstoreSize = secondaryRegion.getMemStoreSize();
 
     // Test case 1: replay the a flush commit marker smaller than what we have 
prepared
     LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of 
flush START"
@@ -595,7 +595,7 @@ public class TestHRegionReplayEvents {
     assertTrue(newFlushableSize > 0); // assert that the memstore is not 
dropped
 
     // assert that the region memstore is same as before
-    long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
     assertEquals(regionMemstoreSize, newRegionMemstoreSize);
 
     assertNotNull(secondaryRegion.getPrepareFlushResult()); // not dropped
@@ -665,7 +665,7 @@ public class TestHRegionReplayEvents {
     for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long regionMemstoreSize = secondaryRegion.getMemStoreSize();
 
     // Test case 1: replay the a flush commit marker larger than what we have 
prepared
     LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of 
flush START"
@@ -685,7 +685,7 @@ public class TestHRegionReplayEvents {
     assertTrue(newFlushableSize > 0); // assert that the memstore is not 
dropped
 
     // assert that the region memstore is smaller than before, but not empty
-    long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
     assertTrue(newRegionMemstoreSize > 0);
     assertTrue(regionMemstoreSize > newRegionMemstoreSize);
 
@@ -766,7 +766,7 @@ public class TestHRegionReplayEvents {
     for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long regionMemstoreSize = secondaryRegion.getMemStoreSize();
 
     // Test case 1: replay a flush commit marker without start flush marker
     assertNull(secondaryRegion.getPrepareFlushResult());
@@ -795,7 +795,7 @@ public class TestHRegionReplayEvents {
     }
 
     // assert that the region memstore is same as before (we could not drop)
-    long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
     if (droppableMemstore) {
       assertTrue(0 == newRegionMemstoreSize);
     } else {
@@ -865,7 +865,7 @@ public class TestHRegionReplayEvents {
     for (HStore s : secondaryRegion.getStores()) {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
-    long regionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long regionMemstoreSize = secondaryRegion.getMemStoreSize();
     assertTrue(regionMemstoreSize == 0);
 
     // now replay the region open event that should contain new file locations
@@ -882,7 +882,7 @@ public class TestHRegionReplayEvents {
     assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD);
 
     // assert that the region memstore is empty
-    long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
     assertTrue(newRegionMemstoreSize == 0);
 
     assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot 
should be dropped if any
@@ -957,11 +957,11 @@ public class TestHRegionReplayEvents {
       assertEquals(expectedStoreFileCount, s.getStorefilesCount());
     }
     HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1"));
-    MemstoreSize newSnapshotSize = store.getSnapshotSize();
+    MemStoreSize newSnapshotSize = store.getSnapshotSize();
     assertTrue(newSnapshotSize.getDataSize() == 0);
 
     // assert that the region memstore is empty
-    long newRegionMemstoreSize = secondaryRegion.getMemstoreSize();
+    long newRegionMemstoreSize = secondaryRegion.getMemStoreSize();
     assertTrue(newRegionMemstoreSize == 0);
 
     assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot 
should be dropped if any
@@ -1409,7 +1409,7 @@ public class TestHRegionReplayEvents {
     LOG.info("-- Replaying edits in secondary");
 
     // Test case 4: replay some edits, ensure that memstore is dropped.
-    assertTrue(secondaryRegion.getMemstoreSize() == 0);
+    assertTrue(secondaryRegion.getMemStoreSize() == 0);
     putDataWithFlushes(primaryRegion, 400, 400, 0);
     numRows = 400;
 
@@ -1427,11 +1427,11 @@ public class TestHRegionReplayEvents {
       }
     }
 
-    assertTrue(secondaryRegion.getMemstoreSize() > 0);
+    assertTrue(secondaryRegion.getMemStoreSize() > 0);
 
     secondaryRegion.refreshStoreFiles();
 
-    assertTrue(secondaryRegion.getMemstoreSize() == 0);
+    assertTrue(secondaryRegion.getMemStoreSize() == 0);
 
     LOG.info("-- Verifying edits from primary");
     verifyData(primaryRegion, 0, numRows, cq, families);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
index 815166b..7c9b822 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
@@ -249,13 +249,13 @@ public class TestHStore {
         // Initialize region
         init(name.getMethodName(), conf);
 
-        MemstoreSize size = store.memstore.getFlushableSize();
+        MemStoreSize size = store.memstore.getFlushableSize();
         assertEquals(0, size.getDataSize());
         LOG.info("Adding some data");
-        MemstoreSize kvSize = new MemstoreSize();
+        MemStoreSize kvSize = new MemStoreSize();
         store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), kvSize);
         // add the heap size of active (mutable) segment
-        kvSize.incMemstoreSize(0, MutableSegment.DEEP_OVERHEAD);
+        kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD);
         size = store.memstore.getFlushableSize();
         assertEquals(kvSize, size);
         // Flush.  Bug #1 from HBASE-10466.  Make sure size calculation on 
failed flush is right.
@@ -267,13 +267,13 @@ public class TestHStore {
           assertTrue(ioe.getMessage().contains("Fault injected"));
         }
         // due to snapshot, change mutable to immutable segment
-        kvSize.incMemstoreSize(0,
+        kvSize.incMemStoreSize(0,
             
CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD);
         size = store.memstore.getFlushableSize();
         assertEquals(kvSize, size);
-        MemstoreSize kvSize2 = new MemstoreSize();
+        MemStoreSize kvSize2 = new MemStoreSize();
         store.add(new KeyValue(row, family, qf2, 2, (byte[])null), kvSize2);
-        kvSize2.incMemstoreSize(0, MutableSegment.DEEP_OVERHEAD);
+        kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD);
         // Even though we add a new kv, we expect the flushable size to be 
'same' since we have
         // not yet cleared the snapshot -- the above flush failed.
         assertEquals(kvSize, size);
@@ -1182,7 +1182,7 @@ public class TestHStore {
     byte[] value0 = Bytes.toBytes("value0");
     byte[] value1 = Bytes.toBytes("value1");
     byte[] value2 = Bytes.toBytes("value2");
-    MemstoreSize memStoreSize = new MemstoreSize();
+    MemStoreSize memStoreSize = new MemStoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
     long seqId = 100;
     init(name.getMethodName(), conf, 
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)),
@@ -1241,7 +1241,7 @@ public class TestHStore {
     init(name.getMethodName(), conf, 
ColumnFamilyDescriptorBuilder.newBuilder(family)
         .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build());
     byte[] value = Bytes.toBytes("value");
-    MemstoreSize memStoreSize = new MemstoreSize();
+    MemStoreSize memStoreSize = new MemStoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
     long seqId = 100;
     // older data whihc shouldn't be "seen" by client
@@ -1319,7 +1319,7 @@ public class TestHStore {
     });
     byte[] oldValue = Bytes.toBytes("oldValue");
     byte[] currentValue = Bytes.toBytes("currentValue");
-    MemstoreSize memStoreSize = new MemstoreSize();
+    MemStoreSize memStoreSize = new MemStoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
     long seqId = 100;
     // older data whihc shouldn't be "seen" by client
@@ -1432,7 +1432,7 @@ public class TestHStore {
     init(name.getMethodName(), conf, 
ColumnFamilyDescriptorBuilder.newBuilder(family)
         .setInMemoryCompaction(MemoryCompactionPolicy.BASIC).build());
     byte[] value = Bytes.toBytes("thisisavarylargevalue");
-    MemstoreSize memStoreSize = new MemstoreSize();
+    MemStoreSize memStoreSize = new MemStoreSize();
     long ts = EnvironmentEdgeManager.currentTime();
     long seqId = 100;
     // older data whihc shouldn't be "seen" by client
@@ -1554,7 +1554,7 @@ public class TestHStore {
     conf.setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 0);
     // Set the lower threshold to invoke the "MERGE" policy
     MyStore store = initMyStore(name.getMethodName(), conf, new MyStoreHook() 
{});
-    MemstoreSize memStoreSize = new MemstoreSize();
+    MemStoreSize memStoreSize = new MemStoreSize();
     long ts = System.currentTimeMillis();
     long seqID = 1l;
     // Add some data to the region and do some flushes

http://git-wip-us.apache.org/repos/asf/hbase/blob/d35d8376/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 95a94b4..ba0d309 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -824,7 +824,7 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public void setGlobalMemstoreLimit(long globalMemStoreSize) {
+    public void setGlobalMemStoreLimit(long globalMemStoreSize) {
       this.memstoreSize = globalMemStoreSize;
     }
   }
@@ -917,7 +917,7 @@ public class TestHeapMemoryManager {
     public TunerResult tune(TunerContext context) {
       TunerResult result = new TunerResult(true);
       result.setBlockCacheSize(blockCacheSize);
-      result.setMemstoreSize(memstoreSize);
+      result.setMemStoreSize(memstoreSize);
       return result;
     }
   }
@@ -937,12 +937,12 @@ public class TestHeapMemoryManager {
     private long testMemstoreSize = 0;
 
     @Override
-    public long getGlobalMemstoreDataSize() {
+    public long getGlobalMemStoreDataSize() {
       return testMemstoreSize;
     }
 
     @Override
-    public long getGlobalMemstoreHeapSize() {
+    public long getGlobalMemStoreHeapSize() {
       return testMemstoreSize;
     }
 

Reply via email to