This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.6 by this push:
new b12685927db HBASE-29862 Test case
TestClearRegionBlockCache#testClearBlockCache failed (#7697)
b12685927db is described below
commit b12685927db169cf4b8095a61363dbb6f1c2d2a6
Author: Peng Lu <[email protected]>
AuthorDate: Fri Feb 27 23:01:24 2026 +0800
HBASE-29862 Test case TestClearRegionBlockCache#testClearBlockCache failed
(#7697)
Signed-off-by: Duo Zhang <[email protected]>
Reviewed-by: Liu Xiao <[email protected]>
(cherry picked from commit 41d415545ace820b351e344fe8ddc15f29e66011)
---
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 27 ++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 2a602f3e0b0..780de5bfa4e 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -1947,9 +1947,22 @@ public class BucketCache implements BlockCache, HeapSize
{
}
private Set<BlockCacheKey> getAllCacheKeysForFile(String hfileName, long
init, long end) {
+ Set<BlockCacheKey> cacheKeys = new HashSet<>();
+ // At this moment, Some Bucket Entries may be in the WriterThread queue,
and not yet put into
+ // the backingMap. So, when executing this method, we should check both
the RAMCache and
+ // backingMap to ensure all CacheKeys are obtained.
+ // For more details, please refer to HBASE-29862.
+ Set<BlockCacheKey> ramCacheKeySet =
ramCache.getRamBlockCacheKeysForHFile(hfileName);
+ for (BlockCacheKey key : ramCacheKeySet) {
+ if (key.getOffset() >= init && key.getOffset() <= end) {
+ cacheKeys.add(key);
+ }
+ }
+
// These keys are just for comparison and are short lived, so we need only
file name and offset
- return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true,
- new BlockCacheKey(hfileName, end), true);
+ cacheKeys.addAll(blocksByHFile.subSet(new BlockCacheKey(hfileName, init),
true,
+ new BlockCacheKey(hfileName, end), true));
+ return cacheKeys;
}
/**
@@ -2329,6 +2342,16 @@ public class BucketCache implements BlockCache, HeapSize
{
return delegate.keySet().stream().filter(key ->
key.getHfileName().equals(fileName))
.findFirst().isPresent();
}
+
+ public Set<BlockCacheKey> getRamBlockCacheKeysForHFile(String fileName) {
+ Set<BlockCacheKey> ramCacheKeySet = new HashSet<>();
+ for (BlockCacheKey blockCacheKey : delegate.keySet()) {
+ if (blockCacheKey.getHfileName().equals(fileName)) {
+ ramCacheKeySet.add(blockCacheKey);
+ }
+ }
+ return ramCacheKeySet;
+ }
}
public Map<BlockCacheKey, BucketEntry> getBackingMap() {