This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-3
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-3 by this push:
     new 84b1b3c41e6 HBASE-29862 Test case 
TestClearRegionBlockCache#testClearBlockCache failed (#7697)
84b1b3c41e6 is described below

commit 84b1b3c41e6e5c043db6b6b0d3e0331b22a445cc
Author: Peng Lu <[email protected]>
AuthorDate: Fri Feb 27 23:01:24 2026 +0800

    HBASE-29862 Test case TestClearRegionBlockCache#testClearBlockCache failed 
(#7697)
    
    Signed-off-by: Duo Zhang <[email protected]>
    Reviewed-by: Liu Xiao <[email protected]>
    (cherry picked from commit 41d415545ace820b351e344fe8ddc15f29e66011)
---
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 27 ++++++++++++++++++++--
 1 file changed, 25 insertions(+), 2 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index b8e63ad62c8..4839494ca62 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -1959,9 +1959,22 @@ public class BucketCache implements BlockCache, HeapSize 
{
   }
 
   private Set<BlockCacheKey> getAllCacheKeysForFile(String hfileName, long 
init, long end) {
+    Set<BlockCacheKey> cacheKeys = new HashSet<>();
+    // At this moment, Some Bucket Entries may be in the WriterThread queue, 
and not yet put into
+    // the backingMap. So, when executing this method, we should check both 
the RAMCache and
+    // backingMap to ensure all CacheKeys are obtained.
+    // For more details, please refer to HBASE-29862.
+    Set<BlockCacheKey> ramCacheKeySet = 
ramCache.getRamBlockCacheKeysForHFile(hfileName);
+    for (BlockCacheKey key : ramCacheKeySet) {
+      if (key.getOffset() >= init && key.getOffset() <= end) {
+        cacheKeys.add(key);
+      }
+    }
+
     // These keys are just for comparison and are short lived, so we need only 
file name and offset
-    return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true,
-      new BlockCacheKey(hfileName, end), true);
+    cacheKeys.addAll(blocksByHFile.subSet(new BlockCacheKey(hfileName, init), 
true,
+      new BlockCacheKey(hfileName, end), true));
+    return cacheKeys;
   }
 
   /**
@@ -2356,6 +2369,16 @@ public class BucketCache implements BlockCache, HeapSize 
{
       return delegate.keySet().stream().filter(key -> 
key.getHfileName().equals(fileName))
         .findFirst().isPresent();
     }
+
+    public Set<BlockCacheKey> getRamBlockCacheKeysForHFile(String fileName) {
+      Set<BlockCacheKey> ramCacheKeySet = new HashSet<>();
+      for (BlockCacheKey blockCacheKey : delegate.keySet()) {
+        if (blockCacheKey.getHfileName().equals(fileName)) {
+          ramCacheKeySet.add(blockCacheKey);
+        }
+      }
+      return ramCacheKeySet;
+    }
   }
 
   public Map<BlockCacheKey, BucketEntry> getBackingMap() {

Reply via email to