This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new d170ba9ac17 HBASE-29862 Test case
TestClearRegionBlockCache#testClearBlockCache failed (#7697)
d170ba9ac17 is described below
commit d170ba9ac170fdd15811e920aa0f97344bd1b805
Author: Peng Lu <[email protected]>
AuthorDate: Fri Feb 27 23:01:24 2026 +0800
HBASE-29862 Test case TestClearRegionBlockCache#testClearBlockCache failed
(#7697)
Signed-off-by: Duo Zhang <[email protected]>
Reviewed-by: Liu Xiao <[email protected]>
(cherry picked from commit 41d415545ace820b351e344fe8ddc15f29e66011)
---
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 27 ++++++++++++++++++++--
1 file changed, 25 insertions(+), 2 deletions(-)
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index eeeb204fc2d..1c9915b3239 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -1951,9 +1951,22 @@ public class BucketCache implements BlockCache, HeapSize
{
}
private Set<BlockCacheKey> getAllCacheKeysForFile(String hfileName, long
init, long end) {
+ Set<BlockCacheKey> cacheKeys = new HashSet<>();
+ // At this moment, Some Bucket Entries may be in the WriterThread queue,
and not yet put into
+ // the backingMap. So, when executing this method, we should check both
the RAMCache and
+ // backingMap to ensure all CacheKeys are obtained.
+ // For more details, please refer to HBASE-29862.
+ Set<BlockCacheKey> ramCacheKeySet =
ramCache.getRamBlockCacheKeysForHFile(hfileName);
+ for (BlockCacheKey key : ramCacheKeySet) {
+ if (key.getOffset() >= init && key.getOffset() <= end) {
+ cacheKeys.add(key);
+ }
+ }
+
// These keys are just for comparison and are short lived, so we need only
file name and offset
- return blocksByHFile.subSet(new BlockCacheKey(hfileName, init), true,
- new BlockCacheKey(hfileName, end), true);
+ cacheKeys.addAll(blocksByHFile.subSet(new BlockCacheKey(hfileName, init),
true,
+ new BlockCacheKey(hfileName, end), true));
+ return cacheKeys;
}
/**
@@ -2348,6 +2361,16 @@ public class BucketCache implements BlockCache, HeapSize
{
return delegate.keySet().stream().filter(key ->
key.getHfileName().equals(fileName))
.findFirst().isPresent();
}
+
+ public Set<BlockCacheKey> getRamBlockCacheKeysForHFile(String fileName) {
+ Set<BlockCacheKey> ramCacheKeySet = new HashSet<>();
+ for (BlockCacheKey blockCacheKey : delegate.keySet()) {
+ if (blockCacheKey.getHfileName().equals(fileName)) {
+ ramCacheKeySet.add(blockCacheKey);
+ }
+ }
+ return ramCacheKeySet;
+ }
}
public Map<BlockCacheKey, BucketEntry> getBackingMap() {