anoopsjohn commented on a change in pull request #3215:
URL: https://github.com/apache/hbase/pull/3215#discussion_r633042493
##########
File path:
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
##########
@@ -890,4 +892,60 @@ public void testDBEShipped() throws IOException {
writer.close();
}
}
+
+ /**
+ * Test case for CombinedBlockCache with TinyLfu as L1 cache
+ */
+ @Test
+ public void testReaderWithTinyLfuCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(true);
+ }
+
+ /**
+ * Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
+ */
+ @Test
+ public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(false);
+ }
+
+ private void testReaderCombinedCache(final boolean isTinyLfu) throws
Exception {
+ int bufCount = 1024;
+ int blockSize = 64 * 1024;
+ ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0);
+ fillByteBuffAllocator(alloc, bufCount);
+ Path storeFilePath = writeStoreFile();
+ // Open the file reader with CombinedBlockCache
+ BlockCache combined = initCombinedBlockCache(isTinyLfu ? "TinyLfu" :
"AdaptiveLRU");
+ conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
+ CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
+ HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig,
true, conf);
+ long offset = 0;
+ while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
+ BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset);
+ HFileBlock block = reader.readBlock(offset, -1, true, true, false, true,
null, null);
+ offset += block.getOnDiskSizeWithHeader();
+ // Read the cached block.
+ Cacheable cachedBlock = combined.getBlock(key, false, false, true);
+ try {
+ Assert.assertNotNull(cachedBlock);
+ Assert.assertTrue(cachedBlock instanceof HFileBlock);
+ HFileBlock hfb = (HFileBlock) cachedBlock;
+ // Data block will be cached in BucketCache, so it should be an
off-heap block.
+ if (hfb.getBlockType().isData()) {
+ Assert.assertTrue(hfb.isSharedMem());
+ } else if (!isTinyLfu) {
+ Assert.assertFalse(hfb.isSharedMem());
Review comment:
What is this check? Means in TinyLFU case there is some diff compared to
other 2 types when it comes to non data blocks?
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java
##########
@@ -188,21 +196,58 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable
value, boolean inMemory
@Override
public void cacheBlock(BlockCacheKey key, Cacheable value) {
+ cacheBlockUtil(key, value, false);
+ }
+
+ private void cacheBlockUtil(BlockCacheKey key, Cacheable value, boolean
deepClonedOnHeap) {
Review comment:
These can be in cacheBlock() only as per previous comment.
##########
File path:
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
##########
@@ -890,4 +892,60 @@ public void testDBEShipped() throws IOException {
writer.close();
}
}
+
+ /**
+ * Test case for CombinedBlockCache with TinyLfu as L1 cache
+ */
+ @Test
+ public void testReaderWithTinyLfuCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(true);
+ }
+
+ /**
+ * Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
+ */
+ @Test
+ public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(false);
+ }
+
+ private void testReaderCombinedCache(final boolean isTinyLfu) throws
Exception {
+ int bufCount = 1024;
+ int blockSize = 64 * 1024;
+ ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0);
+ fillByteBuffAllocator(alloc, bufCount);
+ Path storeFilePath = writeStoreFile();
+ // Open the file reader with CombinedBlockCache
+ BlockCache combined = initCombinedBlockCache(isTinyLfu ? "TinyLfu" :
"AdaptiveLRU");
+ conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
+ CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
+ HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig,
true, conf);
+ long offset = 0;
+ while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
+ BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset);
+ HFileBlock block = reader.readBlock(offset, -1, true, true, false, true,
null, null);
+ offset += block.getOnDiskSizeWithHeader();
+ // Read the cached block.
+ Cacheable cachedBlock = combined.getBlock(key, false, false, true);
+ try {
+ Assert.assertNotNull(cachedBlock);
+ Assert.assertTrue(cachedBlock instanceof HFileBlock);
+ HFileBlock hfb = (HFileBlock) cachedBlock;
+ // Data block will be cached in BucketCache, so it should be an
off-heap block.
+ if (hfb.getBlockType().isData()) {
+ Assert.assertTrue(hfb.isSharedMem());
+ } else if (!isTinyLfu) {
+ Assert.assertFalse(hfb.isSharedMem());
+ }
+ } finally {
+ cachedBlock.release();
+ }
+ block.release(); // return back the ByteBuffer back to allocator.
+ }
+ reader.close();
+ combined.shutdown();
+ Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
+ alloc.clean();
Review comment:
We can assert for refCounts also?
##########
File path:
hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java
##########
@@ -890,4 +892,60 @@ public void testDBEShipped() throws IOException {
writer.close();
}
}
+
+ /**
+ * Test case for CombinedBlockCache with TinyLfu as L1 cache
+ */
+ @Test
+ public void testReaderWithTinyLfuCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(true);
+ }
+
+ /**
+ * Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
+ */
+ @Test
+ public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception {
+ testReaderCombinedCache(false);
+ }
+
+ private void testReaderCombinedCache(final boolean isTinyLfu) throws
Exception {
Review comment:
Whether here also u can pass String l1CachePolicy?
This seems doing extra asserts than testReaderWithCombinedBlockCache. Its
worth testing this test method with all 3 policies.
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java
##########
@@ -171,8 +177,10 @@ public Cacheable getBlock(BlockCacheKey cacheKey,
if ((value != null) && caching) {
if ((value instanceof HFileBlock) && ((HFileBlock)
value).isSharedMem()) {
value = HFileBlock.deepCloneOnHeap((HFileBlock) value);
+ cacheBlockUtil(cacheKey, value, true);
Review comment:
I think u can simplify this. No such if else is needed.
Can avoid deepCloneOnHeap and isSharedMem check here. From here call
cacheBlock only
In cacheBlock asReferencedHeapBlock() call is newly added and there we have
deepClone as needed.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]