Repository: hbase Updated Branches: refs/heads/master 8b63eb6fc -> 8ac430841
http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java deleted file mode 100644 index 66e107a..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java +++ /dev/null @@ -1,492 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import java.io.IOException; -import java.util.List; - - - -/** - * compacted memstore test case - */ -@Category({RegionServerTests.class, MediumTests.class}) -public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore { - - private static final Log LOG = LogFactory.getLog(TestCompactingToCellArrayMapMemStore.class); - - ////////////////////////////////////////////////////////////////////////////// - // Helpers - ////////////////////////////////////////////////////////////////////////////// - - @Override public void tearDown() throws Exception { - chunkCreator.clearChunksInPool(); - } - - @Override public void setUp() throws Exception { - compactingSetUp(); - Configuration conf = HBaseConfiguration.create(); - - // set memstore to do data compaction - conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(MemoryCompactionPolicy.EAGER)); - - this.memstore = - new CompactingMemStore(conf, CellComparator.COMPARATOR, store, - regionServicesForStores, MemoryCompactionPolicy.EAGER); - } - - ////////////////////////////////////////////////////////////////////////////// - // Compaction tests - ////////////////////////////////////////////////////////////////////////////// - public void testCompaction1Bucket() throws IOException { - int counter = 0; - String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 - - // test 1 bucket - long totalCellsLen = addRowsByKeys(memstore, keys1); - int oneCellOnCSLMHeapSize = 120; - int oneCellOnCAHeapSize = 88; - long totalHeapSize = 4 * oneCellOnCSLMHeapSize; - assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); - - assertEquals(4, memstore.getActive().getCellsCount()); - MemstoreSize size = memstore.getFlushableSize(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - assertEquals(0, memstore.getSnapshot().getCellsCount()); - // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting - // totalCellsLen - totalCellsLen = (totalCellsLen * 3) / 4; - assertEquals(totalCellsLen, regionServicesForStores.getMemstoreSize()); - totalHeapSize = 3 * oneCellOnCAHeapSize; - assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); - for ( Segment s : memstore.getSegments()) { - counter += s.getCellsCount(); - } - assertEquals(3, counter); - size = memstore.getFlushableSize(); - MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot - region.decrMemstoreSize(size); // simulate flusher - ImmutableSegment s = memstore.getSnapshot(); - assertEquals(3, s.getCellsCount()); - assertEquals(0, regionServicesForStores.getMemstoreSize()); - - memstore.clearSnapshot(snapshot.getId()); - } - - public void testCompaction2Buckets() throws IOException { - - String[] keys1 = { "A", "A", "B", "C" }; - String[] keys2 = { "A", "B", "D" }; - - long totalCellsLen1 = addRowsByKeys(memstore, keys1); - int oneCellOnCSLMHeapSize = 120; - int oneCellOnCAHeapSize = 88; - long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize; - assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); - MemstoreSize size = memstore.getFlushableSize(); - - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - int counter = 0; - for ( Segment s : memstore.getSegments()) { - counter += s.getCellsCount(); - } - assertEquals(3,counter); - assertEquals(0, memstore.getSnapshot().getCellsCount()); - // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting - // totalCellsLen - totalCellsLen1 = (totalCellsLen1 * 3) / 4; - totalHeapSize1 = 3 * oneCellOnCAHeapSize; - assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); - - long totalCellsLen2 = addRowsByKeys(memstore, keys2); - long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize; - assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); - - size = memstore.getFlushableSize(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - assertEquals(0, memstore.getSnapshot().getCellsCount()); - counter = 0; - for ( Segment s : memstore.getSegments()) { - counter += s.getCellsCount(); - } - assertEquals(4,counter); - totalCellsLen2 = totalCellsLen2 / 3;// 2 cells duplicated in set 2 - assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize()); - totalHeapSize2 = 1 * oneCellOnCAHeapSize; - assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); - - size = memstore.getFlushableSize(); - MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot - region.decrMemstoreSize(size); // simulate flusher - ImmutableSegment s = memstore.getSnapshot(); - assertEquals(4, s.getCellsCount()); - assertEquals(0, regionServicesForStores.getMemstoreSize()); - - memstore.clearSnapshot(snapshot.getId()); - } - - public void testCompaction3Buckets() throws IOException { - - String[] keys1 = { "A", "A", "B", "C" }; - String[] keys2 = { "A", "B", "D" }; - String[] keys3 = { "D", "B", "B" }; - - long totalCellsLen1 = addRowsByKeys(memstore, keys1); - int oneCellOnCSLMHeapSize = 120; - int oneCellOnCAHeapSize = 88; - long totalHeapSize1 = 4 * oneCellOnCSLMHeapSize; - assertEquals(totalCellsLen1, region.getMemstoreSize()); - assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); - - MemstoreSize size = memstore.getFlushableSize(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - - assertEquals(0, memstore.getSnapshot().getCellsCount()); - // One cell is duplicated and the compaction will remove it. All cells of same size so adjusting - // totalCellsLen - totalCellsLen1 = (totalCellsLen1 * 3) / 4; - totalHeapSize1 = 3 * oneCellOnCAHeapSize; - assertEquals(totalCellsLen1, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); - - long totalCellsLen2 = addRowsByKeys(memstore, keys2); - long totalHeapSize2 = 3 * oneCellOnCSLMHeapSize; - - assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); - - ((CompactingMemStore) memstore).disableCompaction(); - size = memstore.getFlushableSize(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction - assertEquals(0, memstore.getSnapshot().getCellsCount()); - assertEquals(totalCellsLen1 + totalCellsLen2, regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1 + totalHeapSize2, ((CompactingMemStore) memstore).heapSize()); - - long totalCellsLen3 = addRowsByKeys(memstore, keys3); - long totalHeapSize3 = 3 * oneCellOnCSLMHeapSize; - assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemstoreSize()); - assertEquals(totalHeapSize1 + totalHeapSize2 + totalHeapSize3, - ((CompactingMemStore) memstore).heapSize()); - - ((CompactingMemStore) memstore).enableCompaction(); - size = memstore.getFlushableSize(); - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { - Threads.sleep(10); - } - assertEquals(0, memstore.getSnapshot().getCellsCount()); - // active flushed to pipeline and all 3 segments compacted. Will get rid of duplicated cells. - // Out of total 10, only 4 cells are unique - totalCellsLen2 = totalCellsLen2 / 3;// 2 out of 3 cells are duplicated - totalCellsLen3 = 0;// All duplicated cells. - assertEquals(totalCellsLen1 + totalCellsLen2 + totalCellsLen3, - regionServicesForStores.getMemstoreSize()); - // Only 4 unique cells left - assertEquals(4 * oneCellOnCAHeapSize, ((CompactingMemStore) memstore).heapSize()); - - size = memstore.getFlushableSize(); - MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot - region.decrMemstoreSize(size); // simulate flusher - ImmutableSegment s = memstore.getSnapshot(); - assertEquals(4, s.getCellsCount()); - assertEquals(0, regionServicesForStores.getMemstoreSize()); - - memstore.clearSnapshot(snapshot.getId()); - - } - - ////////////////////////////////////////////////////////////////////////////// - // Merging tests - ////////////////////////////////////////////////////////////////////////////// - @Test - public void testMerging() throws IOException { - - String[] keys1 = { "A", "A", "B", "C", "F", "H"}; - String[] keys2 = { "A", "B", "D", "G", "I", "J"}; - String[] keys3 = { "D", "B", "B", "E" }; - - MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; - memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(compactionType)); - ((CompactingMemStore)memstore).initiateType(compactionType); - addRowsByKeys(memstore, keys1); - - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact - - while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { - Threads.sleep(10); - } - assertEquals(0, memstore.getSnapshot().getCellsCount()); - - addRowsByKeys(memstore, keys2); // also should only flatten - - int counter2 = 0; - for ( Segment s : memstore.getSegments()) { - counter2 += s.getCellsCount(); - } - assertEquals(12, counter2); - - ((CompactingMemStore) memstore).disableCompaction(); - - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without flattening - assertEquals(0, memstore.getSnapshot().getCellsCount()); - - int counter3 = 0; - for ( Segment s : memstore.getSegments()) { - counter3 += s.getCellsCount(); - } - assertEquals(12, counter3); - - addRowsByKeys(memstore, keys3); - - int counter4 = 0; - for ( Segment s : memstore.getSegments()) { - counter4 += s.getCellsCount(); - } - assertEquals(16, counter4); - - ((CompactingMemStore) memstore).enableCompaction(); - - - ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact - while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { - Threads.sleep(10); - } - assertEquals(0, memstore.getSnapshot().getCellsCount()); - - int counter = 0; - for ( Segment s : memstore.getSegments()) { - counter += s.getCellsCount(); - } - assertEquals(16,counter); - - MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot - ImmutableSegment s = memstore.getSnapshot(); - memstore.clearSnapshot(snapshot.getId()); - } - - @Test - public void testCountOfCellsAfterFlatteningByScan() throws IOException { - String[] keys1 = { "A", "B", "C" }; // A, B, C - addRowsByKeysWith50Cols(memstore, keys1); - // this should only flatten as there are no duplicates - ((CompactingMemStore) memstore).flushInMemory(); - while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { - Threads.sleep(10); - } - List<KeyValueScanner> scanners = memstore.getScanners(Long.MAX_VALUE); - // seek - int count = 0; - for(int i = 0; i < scanners.size(); i++) { - scanners.get(i).seek(KeyValue.LOWESTKEY); - while (scanners.get(i).next() != null) { - count++; - } - } - assertEquals("the count should be ", count, 150); - for(int i = 0; i < scanners.size(); i++) { - scanners.get(i).close(); - } - } - - @Test - public void testCountOfCellsAfterFlatteningByIterator() throws IOException { - String[] keys1 = { "A", "B", "C" }; // A, B, C - addRowsByKeysWith50Cols(memstore, keys1); - // this should only flatten as there are no duplicates - ((CompactingMemStore) memstore).flushInMemory(); - while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { - Threads.sleep(10); - } - // Just doing the cnt operation here - MemStoreSegmentsIterator itr = new MemStoreMergerSegmentsIterator( - ((CompactingMemStore) memstore).getImmutableSegments().getStoreSegments(), - CellComparator.COMPARATOR, 10); - int cnt = 0; - try { - while (itr.next() != null) { - cnt++; - } - } finally { - itr.close(); - } - assertEquals("the count should be ", cnt, 150); - } - - - private void addRowsByKeysWith50Cols(AbstractMemStore hmc, String[] keys) { - byte[] fam = Bytes.toBytes("testfamily"); - for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); - Threads.sleep(1); // to make sure each kv gets a different ts - byte[] row = Bytes.toBytes(keys[i]); - for(int j =0 ;j < 50; j++) { - byte[] qf = Bytes.toBytes("testqualifier"+j); - byte[] val = Bytes.toBytes(keys[i] + j); - KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); - hmc.add(kv, null); - } - } - } - - @Override - @Test - public void testPuttingBackChunksWithOpeningScanner() throws IOException { - byte[] row = Bytes.toBytes("testrow"); - byte[] fam = Bytes.toBytes("testfamily"); - byte[] qf1 = Bytes.toBytes("testqualifier1"); - byte[] qf2 = Bytes.toBytes("testqualifier2"); - byte[] qf3 = Bytes.toBytes("testqualifier3"); - byte[] qf4 = Bytes.toBytes("testqualifier4"); - byte[] qf5 = Bytes.toBytes("testqualifier5"); - byte[] qf6 = Bytes.toBytes("testqualifier6"); - byte[] qf7 = Bytes.toBytes("testqualifier7"); - byte[] val = Bytes.toBytes("testval"); - - // Setting up memstore - memstore.add(new KeyValue(row, fam, qf1, val), null); - memstore.add(new KeyValue(row, fam, qf2, val), null); - memstore.add(new KeyValue(row, fam, qf3, val), null); - - // Creating a snapshot - MemStoreSnapshot snapshot = memstore.snapshot(); - assertEquals(3, memstore.getSnapshot().getCellsCount()); - - // Adding value to "new" memstore - assertEquals(0, memstore.getActive().getCellsCount()); - memstore.add(new KeyValue(row, fam, qf4, val), null); - memstore.add(new KeyValue(row, fam, qf5, val), null); - assertEquals(2, memstore.getActive().getCellsCount()); - - // opening scanner before clear the snapshot - List<KeyValueScanner> scanners = memstore.getScanners(0); - // Shouldn't putting back the chunks to pool,since some scanners are opening - // based on their data - // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { - scanner.close(); - } - memstore.clearSnapshot(snapshot.getId()); - - assertTrue(chunkCreator.getPoolSize() == 0); - - // Chunks will be put back to pool after close scanners; - for (KeyValueScanner scanner : scanners) { - scanner.close(); - } - assertTrue(chunkCreator.getPoolSize() > 0); - - // clear chunks - chunkCreator.clearChunksInPool(); - - // Creating another snapshot - - snapshot = memstore.snapshot(); - // Adding more value - memstore.add(new KeyValue(row, fam, qf6, val), null); - memstore.add(new KeyValue(row, fam, qf7, val), null); - // opening scanners - scanners = memstore.getScanners(0); - // close scanners before clear the snapshot - for (KeyValueScanner scanner : scanners) { - scanner.close(); - } - // Since no opening scanner, the chunks of snapshot should be put back to - // pool - // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { - scanner.close(); - } - memstore.clearSnapshot(snapshot.getId()); - assertTrue(chunkCreator.getPoolSize() > 0); - } - - @Test - public void testPuttingBackChunksAfterFlushing() throws IOException { - byte[] row = Bytes.toBytes("testrow"); - byte[] fam = Bytes.toBytes("testfamily"); - byte[] qf1 = Bytes.toBytes("testqualifier1"); - byte[] qf2 = Bytes.toBytes("testqualifier2"); - byte[] qf3 = Bytes.toBytes("testqualifier3"); - byte[] qf4 = Bytes.toBytes("testqualifier4"); - byte[] qf5 = Bytes.toBytes("testqualifier5"); - byte[] val = Bytes.toBytes("testval"); - - // Setting up memstore - memstore.add(new KeyValue(row, fam, qf1, val), null); - memstore.add(new KeyValue(row, fam, qf2, val), null); - memstore.add(new KeyValue(row, fam, qf3, val), null); - - // Creating a snapshot - MemStoreSnapshot snapshot = memstore.snapshot(); - assertEquals(3, memstore.getSnapshot().getCellsCount()); - - // Adding value to "new" memstore - assertEquals(0, memstore.getActive().getCellsCount()); - memstore.add(new KeyValue(row, fam, qf4, val), null); - memstore.add(new KeyValue(row, fam, qf5, val), null); - assertEquals(2, memstore.getActive().getCellsCount()); - // close the scanners - for(KeyValueScanner scanner : snapshot.getScanners()) { - scanner.close(); - } - memstore.clearSnapshot(snapshot.getId()); - - int chunkCount = chunkCreator.getPoolSize(); - assertTrue(chunkCount > 0); - } - - - private long addRowsByKeys(final AbstractMemStore hmc, String[] keys) { - byte[] fam = Bytes.toBytes("testfamily"); - byte[] qf = Bytes.toBytes("testqualifier"); - MemstoreSize memstoreSize = new MemstoreSize(); - for (int i = 0; i < keys.length; i++) { - long timestamp = System.currentTimeMillis(); - Threads.sleep(1); // to make sure each kv gets a different ts - byte[] row = Bytes.toBytes(keys[i]); - byte[] val = Bytes.toBytes(keys[i] + i); - KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); - hmc.add(kv, memstoreSize); - LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp()); - } - regionServicesForStores.addMemstoreSize(memstoreSize); - return memstoreSize.getDataSize(); - } -} http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 3b15ff3..439f3d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -965,9 +965,12 @@ public class TestDefaultMemStore { conf, FSTableDescriptors.createMetaTableDescriptor(conf), wFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO. getEncodedNameAsBytes())); - HRegionInfo hri = new HRegionInfo(TableName.valueOf(name.getMethodName()), + // parameterized tests add [#] suffix get rid of [ and ]. + HRegionInfo hri = + new HRegionInfo(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")), Bytes.toBytes("row_0200"), Bytes.toBytes("row_0300")); - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf( + name.getMethodName().replaceAll("[\\[\\]]", "_"))); desc.addFamily(new HColumnDescriptor("foo".getBytes())); HRegion r = HRegion.createHRegion(hri, testDir, conf, desc, http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 5467c3f..f115b34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -787,7 +787,8 @@ public class TestHRegionReplayEvents { Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long newFlushableSize = store.getFlushableSize(); if (droppableMemstore) { - assertTrue(newFlushableSize == 0); // assert that the memstore is dropped + // assert that the memstore is dropped + assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD); } else { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped } @@ -877,7 +878,7 @@ public class TestHRegionReplayEvents { } Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long newFlushableSize = store.getFlushableSize(); - assertTrue(newFlushableSize == 0); + assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD); // assert that the region memstore is empty long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 6e5cbf8..3c0c1cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -194,7 +194,7 @@ public class TestPerColumnFamilyFlush { // We should have cleared out only CF1, since we chose the flush thresholds // and number of puts accordingly. assertEquals(0, cf1MemstoreSize.getDataSize()); - assertEquals(0, cf1MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSize.getHeapSize()); // Nothing should have happened to CF2, ... assertEquals(cf2MemstoreSize, oldCF2MemstoreSize); // ... or CF3 @@ -231,9 +231,9 @@ public class TestPerColumnFamilyFlush { // CF1 and CF2, both should be absent. assertEquals(0, cf1MemstoreSize.getDataSize()); - assertEquals(0, cf1MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSize.getHeapSize()); assertEquals(0, cf2MemstoreSize.getDataSize()); - assertEquals(0, cf2MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSize.getHeapSize()); // CF3 shouldn't have been touched. assertEquals(cf3MemstoreSize, oldCF3MemstoreSize); assertEquals(totalMemstoreSize, cf3MemstoreSize.getDataSize()); @@ -314,11 +314,11 @@ public class TestPerColumnFamilyFlush { // Everything should have been cleared assertEquals(0, cf1MemstoreSize.getDataSize()); - assertEquals(0, cf1MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSize.getHeapSize()); assertEquals(0, cf2MemstoreSize.getDataSize()); - assertEquals(0, cf2MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSize.getHeapSize()); assertEquals(0, cf3MemstoreSize.getDataSize()); - assertEquals(0, cf3MemstoreSize.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf3MemstoreSize.getHeapSize()); assertEquals(0, totalMemstoreSize); assertEquals(HConstants.NO_SEQNUM, smallestSeqInRegionCurrentMemstore); HBaseTestingUtility.closeRegionAndWAL(region); @@ -525,9 +525,9 @@ public class TestPerColumnFamilyFlush { }); LOG.info("Finished waiting on flush after too many WALs..."); // Individual families should have been flushed. - assertEquals(0, desiredRegion.getStore(FAMILY1).getMemStoreSize()); - assertEquals(0, desiredRegion.getStore(FAMILY2).getMemStoreSize()); - assertEquals(0, desiredRegion.getStore(FAMILY3).getMemStoreSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, desiredRegion.getStore(FAMILY1).getMemStoreSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, desiredRegion.getStore(FAMILY2).getMemStoreSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, desiredRegion.getStore(FAMILY3).getMemStoreSize()); // let WAL cleanOldLogs assertNull(getWAL(desiredRegion).rollWriter(true)); assertTrue(getNumRolledLogFiles(desiredRegion) < maxLogs); http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 2318414..8a8a1cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -251,7 +251,9 @@ public class TestStore { Assert.assertEquals(0, size.getDataSize()); LOG.info("Adding some data"); MemstoreSize kvSize = new MemstoreSize(); - store.add(new KeyValue(row, family, qf1, 1, (byte[])null), kvSize); + store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), kvSize); + // add the heap size of active (mutable) segment + kvSize.incMemstoreSize(0, MutableSegment.DEEP_OVERHEAD); size = store.memstore.getFlushableSize(); Assert.assertEquals(kvSize, size); // Flush. Bug #1 from HBASE-10466. Make sure size calculation on failed flush is right. @@ -262,10 +264,14 @@ public class TestStore { } catch (IOException ioe) { Assert.assertTrue(ioe.getMessage().contains("Fault injected")); } + // due to snapshot, change mutable to immutable segment + kvSize.incMemstoreSize(0, + CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD); size = store.memstore.getFlushableSize(); Assert.assertEquals(kvSize, size); MemstoreSize kvSize2 = new MemstoreSize(); store.add(new KeyValue(row, family, qf2, 2, (byte[])null), kvSize2); + kvSize2.incMemstoreSize(0, MutableSegment.DEEP_OVERHEAD); // Even though we add a new kv, we expect the flushable size to be 'same' since we have // not yet cleared the snapshot -- the above flush failed. Assert.assertEquals(kvSize, size); @@ -277,7 +283,7 @@ public class TestStore { flushStore(store, id++); size = store.memstore.getFlushableSize(); assertEquals(0, size.getDataSize()); - assertEquals(0, size.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, size.getHeapSize()); return null; } }); http://git-wip-us.apache.org/repos/asf/hbase/blob/8ac43084/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java ---------------------------------------------------------------------- diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 3b2ebe2..63ec13c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -241,7 +241,7 @@ public class TestWalAndCompactingMemStoreFlush { // CF2 should become empty assertEquals(0, cf2MemstoreSizePhaseII.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseII.getHeapSize()); // verify that CF3 was flushed to memory and was compacted (this is approximation check) assertTrue(cf3MemstoreSizePhaseI.getDataSize() > cf3MemstoreSizePhaseII.getDataSize()); @@ -302,7 +302,7 @@ public class TestWalAndCompactingMemStoreFlush { // CF2 should be flushed to disk assertTrue(cf1MemstoreSizePhaseIII.getDataSize() > cf1MemstoreSizePhaseIV.getDataSize()); assertEquals(0, cf2MemstoreSizePhaseIV.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseIV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseIV.getHeapSize()); // CF3 shouldn't have been touched. assertEquals(cf3MemstoreSizePhaseIV, cf3MemstoreSizePhaseII); @@ -326,11 +326,11 @@ public class TestWalAndCompactingMemStoreFlush { .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(0, cf1MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf1MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSizePhaseV.getHeapSize()); assertEquals(0, cf2MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseV.getHeapSize()); assertEquals(0, cf3MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf3MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf3MemstoreSizePhaseV.getHeapSize()); // What happens when we hit the memstore limit, but we are not able to find // any Column Family above the threshold? @@ -476,7 +476,7 @@ public class TestWalAndCompactingMemStoreFlush { assertTrue(cf1MemstoreSizePhaseII.getHeapSize() < cf1MemstoreSizePhaseI.getHeapSize()); // CF2 should become empty assertEquals(0, cf2MemstoreSizePhaseII.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseII.getHeapSize()); // verify that CF3 was flushed to memory and was not compacted (this is an approximation check) // if compacted CF# should be at least twice less because its every key was duplicated assertEquals(cf3MemstoreSizePhaseII.getDataSize() , cf3MemstoreSizePhaseI.getDataSize()); @@ -544,7 +544,7 @@ public class TestWalAndCompactingMemStoreFlush { // CF2 should remain empty assertTrue(cf1MemstoreSizePhaseIII.getDataSize() > cf1MemstoreSizePhaseIV.getDataSize()); assertEquals(0, cf2MemstoreSizePhaseIV.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseIV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseIV.getHeapSize()); // CF3 shouldn't have been touched. assertEquals(cf3MemstoreSizePhaseIV, cf3MemstoreSizePhaseII); // the smallest LSN of CF3 shouldn't change @@ -573,11 +573,11 @@ public class TestWalAndCompactingMemStoreFlush { /*------------------------------------------------------------------------------*/ /* PHASE V - validation */ assertEquals(0, cf1MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf1MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf1MemstoreSizePhaseV.getHeapSize()); assertEquals(0, cf2MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseV.getHeapSize()); assertEquals(0, cf3MemstoreSizePhaseV.getDataSize()); - assertEquals(0, cf3MemstoreSizePhaseV.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf3MemstoreSizePhaseV.getHeapSize()); // The total memstores size should be empty assertEquals(0, totalMemstoreSizePhaseV); // Because there is nothing in any memstore the WAL's LSN should be -1 @@ -699,7 +699,7 @@ public class TestWalAndCompactingMemStoreFlush { // CF2 should have been cleared assertEquals(0, cf2MemstoreSizePhaseII.getDataSize()); - assertEquals(0, cf2MemstoreSizePhaseII.getHeapSize()); + assertEquals(MutableSegment.DEEP_OVERHEAD, cf2MemstoreSizePhaseII.getHeapSize()); String s = "\n\n----------------------------------\n" + "Upon initial insert and flush, LSN of CF1 is:" @@ -875,9 +875,13 @@ public class TestWalAndCompactingMemStoreFlush { MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore(); assertEquals(2*cf1MemstoreSizePhaseI.getDataSize(), cf1MemstoreSizePhaseIV.getDataSize()); + // the decrease in the heap size due to usage of CellArrayMap instead of CSLM + // should be the same in flattening and in merge (first and second in-memory-flush) + // but in phase 1 we do not yet have immutable segment assertEquals( cf1MemstoreSizePhaseI.getHeapSize() - cf1MemstoreSizePhaseII.getHeapSize(), - cf1MemstoreSizePhaseIII.getHeapSize() - cf1MemstoreSizePhaseIV.getHeapSize()); + cf1MemstoreSizePhaseIII.getHeapSize() - cf1MemstoreSizePhaseIV.getHeapSize() + - CellArrayImmutableSegment.DEEP_OVERHEAD_CAM); assertEquals(3, // active, one in pipeline, snapshot ((CompactingMemStore) ((HStore)region.getStore(FAMILY1)).memstore).getSegments().size()); // CF2 should have been cleared
