hbase git commit: HBASE-16747 Track memstore data size and heap overhead separately - Addendum to fix TestIOFencing.
Repository: hbase Updated Branches: refs/heads/master 6ce05d44e -> 79073cd40 HBASE-16747 Track memstore data size and heap overhead separately - Addendum to fix TestIOFencing. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79073cd4 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79073cd4 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79073cd4 Branch: refs/heads/master Commit: 79073cd40c3b9a32d4509381c9d2763be42045ba Parents: 6ce05d4 Author: anoopsamjohnAuthored: Mon Oct 31 11:05:36 2016 +0530 Committer: anoopsamjohn Committed: Mon Oct 31 11:05:36 2016 +0530 -- .../src/test/java/org/apache/hadoop/hbase/TestIOFencing.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/79073cd4/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 5856b19..eeb4ebf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -241,7 +241,7 @@ public class TestIOFencing { // Insert our custom region c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class); // Encourage plenty of flushes -c.setLong("hbase.hregion.memstore.flush.size", 10); +c.setLong("hbase.hregion.memstore.flush.size", 25000); c.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); // Only run compaction when we tell it to c.setInt("hbase.hstore.compaction.min",1);
hbase git commit: HBASE-16954 Unify HTable#checkAndDelete with AP (ChiaPing Tsai)
Repository: hbase Updated Branches: refs/heads/master ba6d95232 -> 6ce05d44e HBASE-16954 Unify HTable#checkAndDelete with AP (ChiaPing Tsai) Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6ce05d44 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6ce05d44 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6ce05d44 Branch: refs/heads/master Commit: 6ce05d44e5f6c78df1eae79d682b49ae7d410a89 Parents: ba6d952 Author: daoye.chAuthored: Mon Oct 31 09:15:29 2016 +0800 Committer: daoye.ch Committed: Mon Oct 31 09:15:29 2016 +0800 -- .../org/apache/hadoop/hbase/client/HTable.java | 24 .../org/apache/hadoop/hbase/SplitLogTask.java | 2 +- 2 files changed, 5 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/6ce05d44/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java -- diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 8d024dd..b2c012d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -529,8 +529,7 @@ public class HTable implements Table { return ResponseConverter.getResult(request, response, getRpcControllerCellScanner()); } }; -List rows = new ArrayList(); -rows.add(delete); +List rows = Collections.singletonList(delete); AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows, null, null, callable, writeRpcTimeout); ars.waitUntilDone(); @@ -762,21 +761,8 @@ public class HTable implements Table { */ @Override public boolean checkAndDelete(final byte [] row, final byte [] family, final byte [] qualifier, - final byte [] value, final Delete delete) - throws IOException { -ClientServiceCallable callable = new ClientServiceCallable(this.connection, getName(), row, -this.rpcControllerFactory.newController()) { - @Override - protected Boolean rpcCall() throws Exception { -MutateRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), row, family, qualifier, - new BinaryComparator(value), CompareType.EQUAL, delete); -MutateResponse response = doMutate(request); -return Boolean.valueOf(response.getProcessed()); - } -}; -return rpcCallerFactory. newCaller(this.writeRpcTimeout). -callWithRetries(callable, this.operationTimeout); + final byte [] value, final Delete delete) throws IOException { +return checkAndDelete(row, family, qualifier, CompareOp.EQUAL, value, delete); } /** @@ -801,9 +787,7 @@ public class HTable implements Table { return ResponseConverter.getResult(request, response, getRpcControllerCellScanner()); } }; -List rows = new ArrayList(); -rows.add(delete); - +List rows = Collections.singletonList(delete); Object[] results = new Object[1]; AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows, null, results, callable, -1); http://git-wip-us.apache.org/repos/asf/hbase/blob/6ce05d44/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java index 986e5bf..66493e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java @@ -153,7 +153,7 @@ public class SplitLogTask { /** * @param data Serialized date to parse. * @return An SplitLogTaskState instance made of the passed data - * @throws DeserializationException + * @throws DeserializationException * @see #toByteArray() */ public static SplitLogTask parseFrom(final byte [] data) throws DeserializationException {
[4/4] hbase git commit: HBASE-16747 Track memstore data size and heap overhead separately.
HBASE-16747 Track memstore data size and heap overhead separately. Project: http://git-wip-us.apache.org/repos/asf/hbase/repo Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba6d9523 Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba6d9523 Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba6d9523 Branch: refs/heads/master Commit: ba6d95232401ce533fb8c121ade7f4a864d06f12 Parents: 6127753 Author: anoopsamjohnAuthored: Sun Oct 30 12:20:46 2016 +0530 Committer: anoopsamjohn Committed: Sun Oct 30 12:20:46 2016 +0530 -- .../java/org/apache/hadoop/hbase/CellUtil.java | 6 + .../org/apache/hadoop/hbase/ExtendedCell.java | 5 + .../java/org/apache/hadoop/hbase/KeyValue.java | 22 +- .../apache/hadoop/hbase/OffheapKeyValue.java| 9 +- .../apache/hadoop/hbase/SizeCachedKeyValue.java | 11 +- .../io/encoding/BufferedDataBlockEncoder.java | 10 + .../hbase/mob/DefaultMobStoreFlusher.java | 2 +- .../hbase/regionserver/AbstractMemStore.java| 209 ++- .../hbase/regionserver/CompactingMemStore.java | 51 ++-- .../hbase/regionserver/CompactionPipeline.java | 43 +-- .../hbase/regionserver/DefaultMemStore.java | 39 ++- .../hbase/regionserver/DefaultStoreFlusher.java | 2 +- .../regionserver/FlushLargeStoresPolicy.java| 5 +- .../hadoop/hbase/regionserver/HRegion.java | 250 ++ .../hadoop/hbase/regionserver/HStore.java | 92 +++ .../hbase/regionserver/HeapMemoryManager.java | 6 +- .../hbase/regionserver/ImmutableSegment.java| 46 ++-- .../hadoop/hbase/regionserver/MemStore.java | 57 ++-- .../hbase/regionserver/MemStoreFlusher.java | 29 +- .../hbase/regionserver/MemStoreSnapshot.java| 14 +- .../hadoop/hbase/regionserver/MemstoreSize.java | 91 +++ .../MetricsRegionServerWrapperImpl.java | 2 +- .../regionserver/MetricsRegionWrapperImpl.java | 2 +- .../hbase/regionserver/MutableSegment.java | 67 - .../hadoop/hbase/regionserver/Region.java | 6 +- .../regionserver/RegionServerAccounting.java| 54 ++-- .../regionserver/RegionServicesForStores.java | 9 +- .../hadoop/hbase/regionserver/Segment.java | 71 +++-- .../apache/hadoop/hbase/regionserver/Store.java | 23 ++ .../hadoop/hbase/client/TestClientPushback.java | 6 +- .../regionserver/TestCompactingMemStore.java| 263 +-- .../TestCompactingToCellArrayMapMemStore.java | 131 + .../hbase/regionserver/TestDefaultMemStore.java | 174 +--- .../hbase/regionserver/TestHMobStore.java | 87 +++--- .../hadoop/hbase/regionserver/TestHRegion.java | 19 +- .../regionserver/TestHRegionReplayEvents.java | 4 +- .../regionserver/TestMemStoreChunkPool.java | 24 +- .../regionserver/TestPerColumnFamilyFlush.java | 104 .../TestRegionMergeTransaction.java | 5 +- .../regionserver/TestReversibleScanners.java| 6 +- .../regionserver/TestSplitTransaction.java | 5 +- .../hadoop/hbase/regionserver/TestStore.java| 258 +++--- .../TestWalAndCompactingMemStoreFlush.java | 252 +++--- .../regionserver/wal/AbstractTestWALReplay.java | 8 +- 44 files changed, 1225 insertions(+), 1354 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6d9523/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 7988352..484eebd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -381,6 +381,7 @@ public final class CellUtil { private static class TagRewriteCell implements ExtendedCell { protected Cell cell; protected byte[] tags; +private static final long HEAP_SIZE_OVERHEAD = 2 * ClassSize.REFERENCE + ClassSize.ARRAY; /** * @param cell The original Cell which it rewrites @@ -552,6 +553,11 @@ public final class CellUtil { offset = Bytes.putAsShort(buf, offset, tagsLen); System.arraycopy(this.tags, 0, buf, offset, tagsLen); } + +@Override +public long heapOverhead() { + return ((ExtendedCell) this.cell).heapOverhead() + HEAP_SIZE_OVERHEAD; +} } /** http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6d9523/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java -- diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
[2/4] hbase git commit: HBASE-16747 Track memstore data size and heap overhead separately.
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6d9523/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 11f43d5..433388d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -105,10 +105,10 @@ public class TestDefaultMemStore { public void testPutSameKey() { byte[] bytes = Bytes.toBytes(getName()); KeyValue kv = new KeyValue(bytes, bytes, bytes, bytes); -this.memstore.add(kv); +this.memstore.add(kv, null); byte[] other = Bytes.toBytes("somethingelse"); KeyValue samekey = new KeyValue(bytes, bytes, bytes, other); -this.memstore.add(samekey); +this.memstore.add(samekey, null); Cell found = this.memstore.getActive().first(); assertEquals(1, this.memstore.getActive().getCellsCount()); assertTrue(Bytes.toString(found.getValueArray()), CellUtil.matchingValue(samekey, found)); @@ -118,23 +118,28 @@ public class TestDefaultMemStore { public void testPutSameCell() { byte[] bytes = Bytes.toBytes(getName()); KeyValue kv = new KeyValue(bytes, bytes, bytes, bytes); -long sizeChangeForFirstCell = this.memstore.add(kv); -long sizeChangeForSecondCell = this.memstore.add(kv); +MemstoreSize sizeChangeForFirstCell = new MemstoreSize(); +this.memstore.add(kv, sizeChangeForFirstCell); +MemstoreSize sizeChangeForSecondCell = new MemstoreSize(); +this.memstore.add(kv, sizeChangeForSecondCell); // make sure memstore size increase won't double-count MSLAB chunk size -assertEquals(AbstractMemStore.heapSizeChange(kv, true), sizeChangeForFirstCell); +assertEquals(Segment.getCellLength(kv), sizeChangeForFirstCell.getDataSize()); +assertEquals(this.memstore.active.heapOverheadChange(kv, true), +sizeChangeForFirstCell.getHeapOverhead()); Segment segment = this.memstore.getActive(); MemStoreLAB msLab = segment.getMemStoreLAB(); if (msLab != null) { // make sure memstore size increased even when writing the same cell, if using MSLAB - assertEquals(segment.getCellLength(kv), sizeChangeForSecondCell); + assertEquals(Segment.getCellLength(kv), sizeChangeForSecondCell.getDataSize()); // make sure chunk size increased even when writing the same cell, if using MSLAB if (msLab instanceof HeapMemStoreLAB) { -assertEquals(2 * segment.getCellLength(kv), +assertEquals(2 * Segment.getCellLength(kv), ((HeapMemStoreLAB) msLab).getCurrentChunk().getNextFreeOffset()); } } else { // make sure no memstore size change w/o MSLAB - assertEquals(0, sizeChangeForSecondCell); + assertEquals(0, sizeChangeForSecondCell.getDataSize()); + assertEquals(0, sizeChangeForSecondCell.getHeapOverhead()); } } @@ -244,8 +249,8 @@ public class TestDefaultMemStore { final KeyValue kv2 = new KeyValue(two, f, q, v); // use case 1: both kvs in kvset -this.memstore.add(kv1.clone()); -this.memstore.add(kv2.clone()); +this.memstore.add(kv1.clone(), null); +this.memstore.add(kv2.clone(), null); verifyScanAcrossSnapshot2(kv1, kv2); // use case 2: both kvs in snapshot @@ -254,9 +259,9 @@ public class TestDefaultMemStore { // use case 3: first in snapshot second in kvset this.memstore = new DefaultMemStore(); -this.memstore.add(kv1.clone()); +this.memstore.add(kv1.clone(), null); this.memstore.snapshot(); -this.memstore.add(kv2.clone()); +this.memstore.add(kv2.clone(), null); verifyScanAcrossSnapshot2(kv1, kv2); } @@ -302,7 +307,7 @@ public class TestDefaultMemStore { KeyValue kv1 = new KeyValue(row, f, q1, v); kv1.setSequenceId(w.getWriteNumber()); -memstore.add(kv1); +memstore.add(kv1, null); KeyValueScanner s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); assertScannerResults(s, new KeyValue[]{}); @@ -315,7 +320,7 @@ public class TestDefaultMemStore { w = mvcc.begin(); KeyValue kv2 = new KeyValue(row, f, q2, v); kv2.setSequenceId(w.getWriteNumber()); -memstore.add(kv2); +memstore.add(kv2, null); s = this.memstore.getScanners(mvcc.getReadPoint()).get(0); assertScannerResults(s, new KeyValue[]{kv1}); @@ -347,11 +352,11 @@ public class TestDefaultMemStore { KeyValue kv11 = new KeyValue(row, f, q1, v1); kv11.setSequenceId(w.getWriteNumber()); -memstore.add(kv11); +memstore.add(kv11, null); KeyValue kv12 = new KeyValue(row, f, q2, v1); kv12.setSequenceId(w.getWriteNumber()); -memstore.add(kv12); +
[3/4] hbase git commit: HBASE-16747 Track memstore data size and heap overhead separately.
http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6d9523/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java -- diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index bcaf3a2..b094476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -23,7 +23,6 @@ import java.util.List; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; -import org.apache.hadoop.hbase.io.HeapSize; /** * The MemStore holds in-memory modifications to the Store. Modifications are {@link Cell}s. @@ -33,7 +32,7 @@ import org.apache.hadoop.hbase.io.HeapSize; * */ @InterfaceAudience.Private -public interface MemStore extends HeapSize { +public interface MemStore { /** * Creates a snapshot of the current memstore. Snapshot must be cleared by call to @@ -58,27 +57,29 @@ public interface MemStore extends HeapSize { * * @return size of data that is going to be flushed */ - long getFlushableSize(); + MemstoreSize getFlushableSize(); /** * Return the size of the snapshot(s) if any * @return size of the memstore snapshot */ - long getSnapshotSize(); + MemstoreSize getSnapshotSize(); /** * Write an update * @param cell - * @return approximate size of the passed cell. + * @param memstoreSize The delta in memstore size will be passed back via this. + *This will include both data size and heap overhead delta. */ - long add(final Cell cell); + void add(final Cell cell, MemstoreSize memstoreSize); /** * Write the updates * @param cells - * @return approximate size of the passed cell. + * @param memstoreSize The delta in memstore size will be passed back via this. + *This will include both data size and heap overhead delta. */ - long add(Iterable cells); + void add(Iterable cells, MemstoreSize memstoreSize); /** * @return Oldest timestamp of all the Cells in the MemStore @@ -86,30 +87,6 @@ public interface MemStore extends HeapSize { long timeOfOldestEdit(); /** - * Write a delete - * @param deleteCell - * @return approximate size of the passed key and value. - */ - long delete(final Cell deleteCell); - - /** - * Given the specs of a column, update it, first by inserting a new record, - * then removing the old one. Since there is only 1 KeyValue involved, the memstoreTS - * will be set to 0, thus ensuring that they instantly appear to anyone. The underlying - * store will ensure that the insert/delete each are atomic. A scanner/reader will either - * get the new value, or the old value and all readers will eventually only see the new - * value after the old was removed. - * - * @param row - * @param family - * @param qualifier - * @param newValue - * @param now - * @return Timestamp - */ - long updateColumnValue(byte[] row, byte[] family, byte[] qualifier, long newValue, long now); - - /** * Update or insert the specified cells. * * For each Cell, insert into MemStore. This will atomically upsert the value for that @@ -122,9 +99,10 @@ public interface MemStore extends HeapSize { * only see each KeyValue update as atomic. * @param cells * @param readpoint readpoint below which we can safely remove duplicate Cells. - * @return change in memstore size + * @param memstoreSize The delta in memstore size will be passed back via this. + *This will include both data size and heap overhead delta. */ - long upsert(Iterable cells, long readpoint); + void upsert(Iterable cells, long readpoint, MemstoreSize memstoreSize); /** * @return scanner over the memstore. This might include scanner over the snapshot when one is @@ -133,13 +111,12 @@ public interface MemStore extends HeapSize { List getScanners(long readPt) throws IOException; /** - * @return Total memory occupied by this MemStore. This includes active segment size and heap size - * overhead of this memstore but won't include any size occupied by the snapshot. We - * assume the snapshot will get cleared soon. This is not thread safe and the memstore may - * be changed while computing its size. It is the responsibility of the caller to make - * sure this doesn't happen. + * @return Total memory occupied by this MemStore. This won't include any size occupied by the + * snapshot. We assume the snapshot will get cleared soon. This is not thread safe and + * the memstore may be changed while computing its size. It is the responsibility of
[1/4] hbase git commit: HBASE-16747 Track memstore data size and heap overhead separately.
Repository: hbase Updated Branches: refs/heads/master 6127753b6 -> ba6d95232 http://git-wip-us.apache.org/repos/asf/hbase/blob/ba6d9523/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java -- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 277eb48..35159b6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -127,10 +127,10 @@ public class TestWalAndCompactingMemStoreFlush { // Set up the configuration Configuration conf = HBaseConfiguration.create(); -conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024); +conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class.getName()); - conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 200 * 1024); + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); // set memstore to do data compaction conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); @@ -164,9 +164,9 @@ public class TestWalAndCompactingMemStoreFlush { long smallestSeqCF3PhaseI = region.getOldestSeqIdOfStore(FAMILY3); // Find the sizes of the memstores of each CF. -long cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); -long cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getMemStoreSize(); -long cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getMemStoreSize(); +MemstoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getSizeOfMemStore(); +MemstoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getSizeOfMemStore(); +MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore(); // Get the overall smallest LSN in the region's memstores. long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region) @@ -188,22 +188,18 @@ public class TestWalAndCompactingMemStoreFlush { // Some other sanity checks. assertTrue(smallestSeqCF1PhaseI < smallestSeqCF2PhaseI); assertTrue(smallestSeqCF2PhaseI < smallestSeqCF3PhaseI); -assertTrue(cf1MemstoreSizePhaseI > 0); -assertTrue(cf2MemstoreSizePhaseI > 0); -assertTrue(cf3MemstoreSizePhaseI > 0); +assertTrue(cf1MemstoreSizePhaseI.getDataSize() > 0); +assertTrue(cf2MemstoreSizePhaseI.getDataSize() > 0); +assertTrue(cf3MemstoreSizePhaseI.getDataSize() > 0); // The total memstore size should be the same as the sum of the sizes of // memstores of CF1, CF2 and CF3. String msg = "totalMemstoreSize="+totalMemstoreSize + -" DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + -" CompactingMemStore.DEEP_OVERHEAD="+CompactingMemStore.DEEP_OVERHEAD + " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; -assertEquals(msg, -totalMemstoreSize + 2 * (CompactingMemStore.DEEP_OVERHEAD + MutableSegment.DEEP_OVERHEAD) -+ (DefaultMemStore.DEEP_OVERHEAD + MutableSegment.DEEP_OVERHEAD), -cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI); +assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize() ++ cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize()); // Flush!! // We have big compacting memstore CF1 and two small memstores: @@ -219,9 +215,9 @@ public class TestWalAndCompactingMemStoreFlush { region.flush(false); // Recalculate everything -long cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getMemStoreSize(); -long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); -long cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getMemStoreSize(); +MemstoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getSizeOfMemStore(); +MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore(); +MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore(); long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region) .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -230,29 +226,21 @@ public class TestWalAndCompactingMemStoreFlush { long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2); long