wchevreuil commented on code in PR #5829:
URL: https://github.com/apache/hbase/pull/5829#discussion_r1576059968


##########
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java:
##########
@@ -245,6 +248,184 @@ public void testColdDataFiles() {
     }
   }
 
+  @Test
+  public void testPickColdDataFiles() {
+    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList();
+    assertEquals(1, coldDataFiles.size());
+    // hStoreFiles[3] is the cold file.
+    assert 
(coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName()));
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity. The hot file
+   * remains in the cache.
+   */
+  @Test
+  public void testBlockEvictions() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with cold data files and a block with hot data.
+    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }

Review Comment:
   Should we fail faster? Why this timeout needs to be so long?



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java:
##########
@@ -245,6 +248,184 @@ public void testColdDataFiles() {
     }
   }
 
+  @Test
+  public void testPickColdDataFiles() {
+    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList();
+    assertEquals(1, coldDataFiles.size());
+    // hStoreFiles[3] is the cold file.
+    assert 
(coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName()));
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity. The hot file
+   * remains in the cache.
+   */
+  @Test
+  public void testBlockEvictions() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with cold data files and a block with hot data.
+    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 2 hot blocks blocks only.
+    // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB 
+ an additional
+    // space.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0);

Review Comment:
   Why an eviction is expected here? Cache capacity is 64KB, the cache already 
had three blocks of 8KB and we are now adding a fourth 8KB size block, so total 
occupancy would be at 32KB, which should be ok, no?



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java:
##########
@@ -245,6 +248,184 @@ public void testColdDataFiles() {
     }
   }
 
+  @Test
+  public void testPickColdDataFiles() {
+    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList();
+    assertEquals(1, coldDataFiles.size());
+    // hStoreFiles[3] is the cold file.
+    assert 
(coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName()));
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity. The hot file
+   * remains in the cache.
+   */
+  @Test
+  public void testBlockEvictions() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with cold data files and a block with hot data.
+    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 2 hot blocks blocks only.
+    // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB 
+ an additional
+    // space.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0);
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity, but one cold
+   * block remains in the cache since the required space is freed.
+   */
+  @Test
+  public void testBlockEvictionsAllColdBlocks() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with three cold data blocks.
+    // hStoreFiles.get(3) is a cold data file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 1 cold block and a newly 
added hot block.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1);
+  }
+
+  /*
+   * Verify that a hot block evicted along with a cold block when bucket 
reaches its capacity.
+   */
+  @Test
+  public void testBlockEvictionsHotBlocks() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",

Review Comment:
   The comment says it's creating a lower capacity cache, yet it's using same 
64KB as in previous tests.



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java:
##########
@@ -245,6 +248,184 @@ public void testColdDataFiles() {
     }
   }
 
+  @Test
+  public void testPickColdDataFiles() {
+    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList();
+    assertEquals(1, coldDataFiles.size());
+    // hStoreFiles[3] is the cold file.
+    assert 
(coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName()));
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity. The hot file
+   * remains in the cache.
+   */
+  @Test
+  public void testBlockEvictions() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with cold data files and a block with hot data.
+    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 2 hot blocks blocks only.
+    // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB 
+ an additional
+    // space.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0);
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity, but one cold
+   * block remains in the cache since the required space is freed.
+   */
+  @Test
+  public void testBlockEvictionsAllColdBlocks() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with three cold data blocks.
+    // hStoreFiles.get(3) is a cold data file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+

Review Comment:
   Similar to previous test. Why do we expect evictions when cache is less than 
50% usage?



##########
hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java:
##########
@@ -245,6 +248,184 @@ public void testColdDataFiles() {
     }
   }
 
+  @Test
+  public void testPickColdDataFiles() {
+    Map<String, String> coldDataFiles = dataTieringManager.getColdFilesList();
+    assertEquals(1, coldDataFiles.size());
+    // hStoreFiles[3] is the cold file.
+    assert 
(coldDataFiles.containsKey(hStoreFiles.get(3).getFileInfo().getActiveFileName()));
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity. The hot file
+   * remains in the cache.
+   */
+  @Test
+  public void testBlockEvictions() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with cold data files and a block with hot data.
+    // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a 
hot file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 2 hot blocks blocks only.
+    // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB 
+ an additional
+    // space.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0);
+  }
+
+  /*
+   * Verify that two cold blocks(both) are evicted when bucket reaches its 
capacity, but one cold
+   * block remains in the cache since the required space is freed.
+   */
+  @Test
+  public void testBlockEvictionsAllColdBlocks() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with three cold data blocks.
+    // hStoreFiles.get(3) is a cold data file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block into cache with hot data which should trigger 
the eviction
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));
+
+    // Verify that the bucket cache now contains 1 cold block and a newly 
added hot block.
+    validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1);
+  }
+
+  /*
+   * Verify that a hot block evicted along with a cold block when bucket 
reaches its capacity.
+   */
+  @Test
+  public void testBlockEvictionsHotBlocks() throws Exception {
+    long capacitySize = 64 * 1024;
+    int writeThreads = 3;
+    int writerQLen = 64;
+    int[] bucketSizes = new int[] { 8 * 1024 + 1024 };
+
+    // Setup: Create a bucket cache with lower capacity
+    BucketCache bucketCache = new BucketCache("file:" + testDir + 
"/bucket.cache", capacitySize,
+      8192, bucketSizes, writeThreads, writerQLen, testDir + 
"/bucket.persistence",
+      DEFAULT_ERROR_TOLERATION_DURATION, defaultConf);
+
+    // Create three Cache keys with two hot data blocks and one cold data block
+    // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold 
data file.
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, 
BlockType.DATA));
+    cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, 
BlockType.DATA));
+
+    // Create dummy data to be cached and fill the cache completely.
+    CacheTestUtils.HFileBlockPair[] blocks = 
CacheTestUtils.generateHFileBlocks(8192, 3);
+
+    int blocksIter = 0;
+    for (BlockCacheKey key : cacheKeys) {
+      bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock());
+      // Ensure that the block is persisted to the file.
+      Waiter.waitFor(defaultConf, 1000000, 100,
+        () -> (bucketCache.getBackingMap().containsKey(key)));
+    }
+
+    // Verify that the bucket cache contains 3 blocks.
+    assertEquals(3, bucketCache.getBackingMap().keySet().size());
+
+    // Add an additional block which should evict the only cold block with an 
additional hot block.
+    BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, 
true, BlockType.DATA);
+    CacheTestUtils.HFileBlockPair[] newBlock = 
CacheTestUtils.generateHFileBlocks(8192, 1);
+
+    bucketCache.cacheBlock(newKey, newBlock[0].getBlock());
+    Waiter.waitFor(defaultConf, 1000000, 100,
+      () -> (bucketCache.getBackingMap().containsKey(newKey)));

Review Comment:
   Similar to previous tests. We expect evictions at less than 50% usage?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to