This is an automated email from the ASF dual-hosted git repository.
wchevreuil pushed a commit to branch branch-2.6
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.6 by this push:
new 1602c531b24 HBASE-28657 Backport HBASE-28246 Expose region cached size
over JMX metrics and report in the RS UI (#5565) (#5983)
1602c531b24 is described below
commit 1602c531b245b4d455b48161757cde2ec3d1930b
Author: szucsvillo <[email protected]>
AuthorDate: Mon Jun 17 11:43:52 2024 +0200
HBASE-28657 Backport HBASE-28246 Expose region cached size over JMX metrics
and report in the RS UI (#5565) (#5983)
Signed-off-by: Peter Somogyi <[email protected]>
---
.../regionserver/MetricsRegionServerSource.java | 2 +
.../hbase/regionserver/MetricsRegionWrapper.java | 5 ++
.../regionserver/MetricsRegionSourceImpl.java | 4 +
.../regionserver/TestMetricsRegionSourceImpl.java | 5 ++
.../hbase/tmpl/regionserver/RegionListTmpl.jamon | 3 +
.../apache/hadoop/hbase/io/hfile/BlockCache.java | 23 +++--
.../hadoop/hbase/io/hfile/BlockCacheKey.java | 15 ++++
.../hadoop/hbase/io/hfile/CombinedBlockCache.java | 11 ++-
.../hadoop/hbase/io/hfile/HFileReaderImpl.java | 2 +-
.../hadoop/hbase/io/hfile/bucket/BucketCache.java | 97 +++++++++++-----------
.../hadoop/hbase/regionserver/HRegionServer.java | 25 +++---
.../regionserver/MetricsRegionWrapperImpl.java | 17 +++-
.../io/hfile/TestPrefetchWithBucketCache.java | 27 +++++-
.../regionserver/MetricsRegionWrapperStub.java | 5 ++
14 files changed, 166 insertions(+), 75 deletions(-)
diff --git
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 5220f2d82b2..75269e57181 100644
---
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -632,4 +632,6 @@ public interface MetricsRegionServerSource extends
BaseSource, JvmPauseMonitorSo
String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
String SCANNER_LEASE_EXPIRED_COUNT_DESC =
"Count of scanners which were expired due to scanner lease timeout";
+ String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
+ String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching
completed for this region.";
}
diff --git
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 3115603aabf..4d8a028d89b 100644
---
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
long getStoreFileSize();
+ /**
+ * Gets the current cache % ratio for this region.
+ */
+ float getCurrentRegionCacheRatio();
+
/**
* Get the total number of read requests that have been issued against this
region
*/
diff --git
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 0c20456e8cb..92ecaa58088 100644
---
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -233,6 +233,10 @@ public class MetricsRegionSourceImpl implements
MetricsRegionSource {
this.regionWrapper.getNumReferenceFiles());
mrb.addGauge(Interns.info(regionNamePrefix +
MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC),
this.regionWrapper.getStoreFileSize());
+ mrb.addGauge(
+ Interns.info(regionNamePrefix +
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
+ MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
+ this.regionWrapper.getCurrentRegionCacheRatio());
mrb.addCounter(
Interns.info(regionNamePrefix +
MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
diff --git
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 3fe116a11a7..2c8205085d1 100644
---
a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++
b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
@@ -116,6 +116,11 @@ public class TestMetricsRegionSourceImpl {
return 0;
}
+ @Override
+ public float getCurrentRegionCacheRatio() {
+ return 0;
+ }
+
@Override
public long getReadRequestCount() {
return 0;
diff --git
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 0df4d2763b0..e77318437e0 100644
---
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -38,6 +38,7 @@
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
org.apache.hadoop.hbase.client.RegionReplicaUtil;
org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper;
+ org.apache.hadoop.util.StringUtils;
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
</%import>
<%if (onlineRegions != null && onlineRegions.size() > 0) %>
@@ -172,6 +173,7 @@
<th>Bloom Size</th>
<th>Data Locality</th>
<th>Len Of Biggest Cell</th>
+ <th>% Cached</th>
</tr>
</thead>
@@ -237,6 +239,7 @@
<td><% bloomSizeStr %></td>
<td><% load.getDataLocality() %></td>
<td><% String.format("%,1d", lenOfBiggestCellInRegion) %></td>
+ <td><%
StringUtils.formatPercent(load.getCurrentRegionCachedRatio(), 2) %></td>
</%if>
</tr>
</%for>
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index ed9e7dee5c1..5b11035ebe7 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -154,21 +154,16 @@ public interface BlockCache extends Iterable<CachedBlock>
{
/**
* Notifies the cache implementation that the given file has been fully
cached (all its blocks
* made into the cache).
- * @param fileName the file that has been completely cached.
+ * @param fileName the file that has been completely cached.
+ * @param totalBlockCount the total of blocks cached for this file.
+ * @param dataBlockCount number of DATA block type cached.
+ * @param size the size, in bytes, cached.
*/
default void notifyFileCachingCompleted(Path fileName, int totalBlockCount,
int dataBlockCount,
long size) {
// noop
}
- /**
- * Notifies the cache implementation that the given file had a block evicted
- * @param fileName the file had a block evicted.
- */
- default void notifyFileBlockEvicted(String fileName) {
- // noop
- }
-
/**
* Checks whether there's enough space left in the cache to accommodate the
passed block. This
* method may not be overridden by all implementing classes. In such cases,
the returned Optional
@@ -230,4 +225,14 @@ public interface BlockCache extends Iterable<CachedBlock> {
default Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return Optional.empty();
}
+
+ /**
+ * Returns an Optional containing a map of regions and the percentage of how
much of it has been
+ * cached so far.
+ * @return empty optional if this method is not supported, otherwise the
returned optional
+ * contains a map of current regions caching percentage.
+ */
+ default Optional<Map<String, Long>> getRegionCachedInfo() {
+ return Optional.empty();
+ }
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
index 1cfdc5868be..bf22d38e373 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.yetus.audience.InterfaceAudience;
@@ -31,6 +32,7 @@ public class BlockCacheKey implements HeapSize,
java.io.Serializable {
private final long offset;
private BlockType blockType;
private final boolean isPrimaryReplicaBlock;
+ private Path filePath;
/**
* Construct a new BlockCacheKey
@@ -49,6 +51,14 @@ public class BlockCacheKey implements HeapSize,
java.io.Serializable {
this.blockType = blockType;
}
+ public BlockCacheKey(Path hfilePath, long offset, boolean isPrimaryReplica,
BlockType blockType) {
+ this.filePath = hfilePath;
+ this.isPrimaryReplicaBlock = isPrimaryReplica;
+ this.hfileName = hfilePath.getName();
+ this.offset = offset;
+ this.blockType = blockType;
+ }
+
@Override
public int hashCode() {
return hfileName.hashCode() * 127 + (int) (offset ^ (offset >>> 32));
@@ -102,4 +112,9 @@ public class BlockCacheKey implements HeapSize,
java.io.Serializable {
public void setBlockType(BlockType blockType) {
this.blockType = blockType;
}
+
+ public Path getFilePath() {
+ return filePath;
+ }
+
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 2af21947598..3a2d4ccc25d 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -429,6 +429,11 @@ public class CombinedBlockCache implements
ResizableBlockCache, HeapSize {
return this.l2Cache.getFullyCachedFiles();
}
+ @Override
+ public Optional<Map<String, Long>> getRegionCachedInfo() {
+ return l2Cache.getRegionCachedInfo();
+ }
+
@Override
public void setMaxSize(long size) {
this.l1Cache.setMaxSize(size);
@@ -457,12 +462,6 @@ public class CombinedBlockCache implements
ResizableBlockCache, HeapSize {
}
- @Override
- public void notifyFileBlockEvicted(String fileName) {
- l1Cache.notifyFileBlockEvicted(fileName);
- l1Cache.notifyFileBlockEvicted(fileName);
- }
-
@Override
public Optional<Boolean> blockFitsIntoTheCache(HFileBlock block) {
if (isMetaBlock(block.getBlockType())) {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index caf875a89d6..ace662414f4 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -1290,7 +1290,7 @@ public abstract class HFileReaderImpl implements
HFile.Reader, Configurable {
// from doing).
BlockCacheKey cacheKey =
- new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(),
expectedBlockType);
+ new BlockCacheKey(path, dataBlockOffset, this.isPrimaryReplicaReader(),
expectedBlockType);
Attributes attributes = Attributes.of(BLOCK_CACHE_KEY_KEY,
cacheKey.toString());
boolean cacheable = cacheBlock && cacheIfCompactionsOff();
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index c8111522c65..643f3d8d93d 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -160,7 +160,7 @@ public class BucketCache implements BlockCache, HeapSize {
* Map of region -> total size of the region prefetched on this region
server. This is the total
* size of hFiles for this region prefetched on this region server
*/
- final Map<String, Long> regionCachedSizeMap = new ConcurrentHashMap<>();
+ final Map<String, Long> regionCachedSize = new ConcurrentHashMap<>();
private BucketCachePersister cachePersister;
@@ -334,7 +334,7 @@ public class BucketCache implements BlockCache, HeapSize {
fullyCachedFiles.clear();
backingMapValidated.set(true);
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
- regionCachedSizeMap.clear();
+ regionCachedSize.clear();
}
} else {
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
@@ -606,7 +606,7 @@ public class BucketCache implements BlockCache, HeapSize {
// the cache map state might differ from the actual cache. If we reach
this block,
// we should remove the cache key entry from the backing map
backingMap.remove(key);
- removeFileFromPrefetch(key.getHfileName());
+ fileNotFullyCached(key.getHfileName());
LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
@@ -631,7 +631,7 @@ public class BucketCache implements BlockCache, HeapSize {
if (decrementBlockNumber) {
this.blockNumber.decrement();
if (ioEngine.isPersistent()) {
- removeFileFromPrefetch(cacheKey.getHfileName());
+ fileNotFullyCached(cacheKey.getHfileName());
}
}
if (evictedByEvictionProcess) {
@@ -642,6 +642,42 @@ public class BucketCache implements BlockCache, HeapSize {
}
}
+ private void fileNotFullyCached(String hfileName) {
+ // Update the regionPrefetchedSizeMap before removing the file from
prefetchCompleted
+ if (fullyCachedFiles.containsKey(hfileName)) {
+ Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
+ String regionEncodedName = regionEntry.getFirst();
+ long filePrefetchSize = regionEntry.getSecond();
+ LOG.debug("Removing file {} for region {}", hfileName,
regionEncodedName);
+ regionCachedSize.computeIfPresent(regionEncodedName, (rn, pf) -> pf -
filePrefetchSize);
+ // If all the blocks for a region are evicted from the cache, remove the
entry for that region
+ if (
+ regionCachedSize.containsKey(regionEncodedName)
+ && regionCachedSize.get(regionEncodedName) == 0
+ ) {
+ regionCachedSize.remove(regionEncodedName);
+ }
+ }
+ fullyCachedFiles.remove(hfileName);
+ }
+
+ public void fileCacheCompleted(Path filePath, long size) {
+ Pair<String, Long> pair = new Pair<>();
+ // sets the region name
+ String regionName = filePath.getParent().getParent().getName();
+ pair.setFirst(regionName);
+ pair.setSecond(size);
+ fullyCachedFiles.put(filePath.getName(), pair);
+ }
+
+ private void updateRegionCachedSize(Path filePath, long cachedSize) {
+ if (filePath != null) {
+ String regionName = filePath.getParent().getParent().getName();
+ regionCachedSize.merge(regionName, cachedSize,
+ (previousSize, newBlockSize) -> previousSize + newBlockSize);
+ }
+ }
+
/**
* Free the {{@link BucketEntry} actually,which could only be invoked when
the
* {@link BucketEntry#refCnt} becoming 0.
@@ -1074,6 +1110,7 @@ public class BucketCache implements BlockCache, HeapSize {
protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry)
{
BucketEntry previousEntry = backingMap.put(key, bucketEntry);
blocksByHFile.add(key);
+ updateRegionCachedSize(key.getFilePath(), bucketEntry.getLength());
if (previousEntry != null && previousEntry != bucketEntry) {
previousEntry.withWriteLock(offsetLock, () -> {
blockEvicted(key, previousEntry, false, false);
@@ -1295,8 +1332,9 @@ public class BucketCache implements BlockCache, HeapSize {
return ioEngine.isPersistent() && persistencePath != null;
}
- public Map<String, Long> getRegionCachedInfo() {
- return Collections.unmodifiableMap(regionCachedSizeMap);
+ @Override
+ public Optional<Map<String, Long>> getRegionCachedInfo() {
+ return Optional.of(Collections.unmodifiableMap(regionCachedSize));
}
/**
@@ -1333,17 +1371,17 @@ public class BucketCache implements BlockCache,
HeapSize {
}
private void updateRegionSizeMapWhileRetrievingFromFile() {
- // Update the regionCachedSizeMap with the region size while restarting
the region server
+ // Update the regionCachedSize with the region size while restarting the
region server
if (LOG.isDebugEnabled()) {
LOG.debug("Updating region size map after retrieving cached file list");
dumpPrefetchList();
}
- regionCachedSizeMap.clear();
+ regionCachedSize.clear();
fullyCachedFiles.forEach((hFileName, hFileSize) -> {
// Get the region name for each file
String regionEncodedName = hFileSize.getFirst();
long cachedFileSize = hFileSize.getSecond();
- regionCachedSizeMap.merge(regionEncodedName, cachedFileSize,
+ regionCachedSize.merge(regionEncodedName, cachedFileSize,
(oldpf, fileSize) -> oldpf + fileSize);
});
}
@@ -1448,7 +1486,7 @@ public class BucketCache implements BlockCache, HeapSize {
} catch (IOException e1) {
LOG.debug("Check for key {} failed. Evicting.",
keyEntry.getKey());
evictBlock(keyEntry.getKey());
- removeFileFromPrefetch(keyEntry.getKey().getHfileName());
+ fileNotFullyCached(keyEntry.getKey().getHfileName());
}
}
backingMapValidated.set(true);
@@ -1505,7 +1543,7 @@ public class BucketCache implements BlockCache, HeapSize {
this.backingMap.clear();
this.blocksByHFile.clear();
this.fullyCachedFiles.clear();
- this.regionCachedSizeMap.clear();
+ this.regionCachedSize.clear();
}
}
@@ -1605,7 +1643,7 @@ public class BucketCache implements BlockCache, HeapSize {
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
- removeFileFromPrefetch(hfileName);
+ fileNotFullyCached(hfileName);
Set<BlockCacheKey> keySet = getAllCacheKeysForFile(hfileName);
int numEvicted = 0;
for (BlockCacheKey key : keySet) {
@@ -2089,11 +2127,6 @@ public class BucketCache implements BlockCache, HeapSize
{
}
}
- @Override
- public void notifyFileBlockEvicted(String fileName) {
- removeFileFromPrefetch(fileName);
- }
-
@Override
public Optional<Boolean> blockFitsIntoTheCache(HFileBlock block) {
long currentUsed = bucketAllocator.getUsedSize();
@@ -2122,34 +2155,4 @@ public class BucketCache implements BlockCache, HeapSize
{
}
}
-
- private void removeFileFromPrefetch(String hfileName) {
- // Update the regionPrefetchedSizeMap before removing the file from
prefetchCompleted
- if (fullyCachedFiles.containsKey(hfileName)) {
- Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
- String regionEncodedName = regionEntry.getFirst();
- long filePrefetchSize = regionEntry.getSecond();
- LOG.debug("Removing file {} for region {}", hfileName,
regionEncodedName);
- regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf -
filePrefetchSize);
- // If all the blocks for a region are evicted from the cache, remove the
entry for that region
- if (
- regionCachedSizeMap.containsKey(regionEncodedName)
- && regionCachedSizeMap.get(regionEncodedName) == 0
- ) {
- regionCachedSizeMap.remove(regionEncodedName);
- }
- }
- fullyCachedFiles.remove(hfileName);
- }
-
- public void fileCacheCompleted(Path filePath, long size) {
- Pair<String, Long> pair = new Pair<>();
- // sets the region name
- String regionName = filePath.getParent().getParent().getName();
- pair.setFirst(regionName);
- pair.setSecond(size);
- fullyCachedFiles.put(filePath.getName(), pair);
- regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf +
fileSize);
- }
-
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2b219898869..c50d964ca50 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1517,11 +1517,15 @@ public class HRegionServer extends Thread
serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build());
}
}
- computeIfPersistentBucketCache(bc -> {
- bc.getRegionCachedInfo().forEach((regionName, prefetchSize) -> {
- serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize,
unitMB));
+
+ getBlockCache().ifPresent(cache -> {
+ cache.getRegionCachedInfo().ifPresent(regionCachedInfo -> {
+ regionCachedInfo.forEach((regionName, prefetchSize) -> {
+ serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize,
unitMB));
+ });
});
});
+
serverLoad.setReportStartTime(reportStartTime);
serverLoad.setReportEndTime(reportEndTime);
if (this.infoServer != null) {
@@ -1904,13 +1908,14 @@ public class HRegionServer extends Thread
int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB);
int regionSizeMB = roundSize(totalRegionSize, unitMB);
final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f);
- computeIfPersistentBucketCache(bc -> {
- if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) {
- currentRegionCachedRatio.setValue(regionSizeMB == 0
- ? 0.0f
- : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName),
unitMB)
- / regionSizeMB);
- }
+ getBlockCache().ifPresent(bc -> {
+ bc.getRegionCachedInfo().ifPresent(regionCachedInfo -> {
+ if (regionCachedInfo.containsKey(regionEncodedName)) {
+ currentRegionCachedRatio.setValue(regionSizeMB == 0
+ ? 0.0f
+ : (float) roundSize(regionCachedInfo.get(regionEncodedName),
unitMB) / regionSizeMB);
+ }
+ });
});
HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution();
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
index 1402512fdc3..bce961e8f27 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -26,6 +26,7 @@ import java.util.OptionalLong;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang3.mutable.MutableLong;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -62,6 +63,8 @@ public class MetricsRegionWrapperImpl implements
MetricsRegionWrapper, Closeable
private ScheduledFuture<?> regionMetricsUpdateTask;
+ private float currentRegionCacheRatio;
+
public MetricsRegionWrapperImpl(HRegion region) {
this.region = region;
this.executor =
CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor();
@@ -121,6 +124,10 @@ public class MetricsRegionWrapperImpl implements
MetricsRegionWrapper, Closeable
return storeFileSize;
}
+ public float getCurrentRegionCacheRatio() {
+ return currentRegionCacheRatio;
+ }
+
@Override
public long getStoreRefCount() {
return storeRefCount;
@@ -310,7 +317,15 @@ public class MetricsRegionWrapperImpl implements
MetricsRegionWrapper, Closeable
readsOnlyFromMemstore.put(store.getColumnFamilyName(), tempVal);
}
}
-
+ MutableLong regionCachedAmount = new MutableLong(0);
+ region.getBlockCache().getRegionCachedInfo().ifPresent(regionCacheRatio
-> regionCachedAmount
+
.addAndGet(regionCacheRatio.getOrDefault(region.getRegionInfo().getEncodedName(),
0L)));
+ if (tempStoreFileSize > 0) {
+ LOG.debug("Region {}, had cached {} bytes from a total of {}",
+ region.getRegionInfo().getEncodedName(),
regionCachedAmount.getValue(),
+ tempStoreFileSize);
+ currentRegionCacheRatio = regionCachedAmount.floatValue() /
tempStoreFileSize;
+ }
numStoreFiles = tempNumStoreFiles;
storeRefCount = tempStoreRefCount;
maxCompactedStoreFileRefCount = tempMaxCompactedStoreFileRefCount;
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
index addea8297df..db8f2213d0c 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java
@@ -32,6 +32,7 @@ import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
+import org.apache.commons.lang3.mutable.MutableLong;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -185,6 +186,30 @@ public class TestPrefetchWithBucketCache {
assertTrue(bc.getStats().getEvictedCount() > 200);
}
+ @Test
+ public void testPrefetchMetricProgress() throws Exception {
+ conf.setLong(BUCKET_CACHE_SIZE_KEY, 200);
+ blockCache = BlockCacheFactory.createBlockCache(conf);
+ cacheConf = new CacheConfig(conf, blockCache);
+ Path storeFile = writeStoreFile("testPrefetchMetricsProgress", 100);
+ // Prefetches the file blocks
+ LOG.debug("First read should prefetch the blocks.");
+ readStoreFile(storeFile);
+ String regionName = storeFile.getParent().getParent().getName();
+ BucketCache bc =
BucketCache.getBucketCacheFromCacheConfig(cacheConf).get();
+ MutableLong regionCachedSize = new MutableLong(0);
+ // Our file should have 6 DATA blocks. We should wait for all of them to
be cached
+ long waitedTime = Waiter.waitFor(conf, 300, () -> {
+ if (bc.getBackingMap().size() > 0) {
+ long currentSize = bc.getRegionCachedInfo().get().get(regionName);
+ assertTrue(regionCachedSize.getValue() <= currentSize);
+ LOG.debug("Logging progress of region caching: {}", currentSize);
+ regionCachedSize.setValue(currentSize);
+ }
+ return bc.getBackingMap().size() == 6;
+ });
+ }
+
private void readStoreFile(Path storeFilePath) throws Exception {
readStoreFile(storeFilePath, (r, o) -> {
HFileBlock block = null;
@@ -216,6 +241,7 @@ public class TestPrefetchWithBucketCache {
Thread.sleep(1000);
}
long offset = 0;
+ long sizeForDataBlocks = 0;
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
HFileBlock block = readFunction.apply(reader, offset);
BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
offset);
@@ -276,5 +302,4 @@ public class TestPrefetchWithBucketCache {
return keyType;
}
}
-
}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
index a99212cb9de..0995b0faee0 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
@@ -82,6 +82,11 @@ public class MetricsRegionWrapperStub implements
MetricsRegionWrapper {
return 104;
}
+ @Override
+ public float getCurrentRegionCacheRatio() {
+ return 0;
+ }
+
@Override
public long getReadRequestCount() {
return 105;