This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-27389-rebase
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 993326e147d6e35a0e4be571ebeab29c54fbac0d
Author: Rahul Agarkar <[email protected]>
AuthorDate: Tue Aug 29 15:36:23 2023 +0530

    HBASE-27998 Enhance region metrics to include prefetch ratio for each… 
(#5342)
    
    Signed-off-by: Wellington Chevreuil <[email protected]>
---
 .../org/apache/hadoop/hbase/RegionMetrics.java     |  6 ++
 .../apache/hadoop/hbase/RegionMetricsBuilder.java  | 38 +++++++++-
 .../org/apache/hadoop/hbase/ServerMetrics.java     |  6 ++
 .../apache/hadoop/hbase/ServerMetricsBuilder.java  | 20 ++++-
 .../src/main/protobuf/PrefetchPersistence.proto    | 36 ---------
 .../src/main/protobuf/server/ClusterStatus.proto   | 11 +++
 .../main/protobuf/server/io/BucketCacheEntry.proto |  8 +-
 .../apache/hadoop/hbase/io/hfile/BlockCache.java   |  3 +-
 .../hadoop/hbase/io/hfile/CombinedBlockCache.java  |  3 +-
 .../hadoop/hbase/io/hfile/HFilePreadReader.java    |  6 +-
 .../hadoop/hbase/io/hfile/PrefetchProtoUtils.java  | 53 --------------
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  | 85 +++++++++++++++++++---
 .../hbase/io/hfile/bucket/BucketProtoUtils.java    | 26 ++++++-
 .../hadoop/hbase/regionserver/HRegionServer.java   | 40 +++++++++-
 .../org/apache/hadoop/hbase/TestServerMetrics.java | 18 +++--
 .../hbase/master/TestRegionsRecoveryChore.java     | 14 ++++
 16 files changed, 251 insertions(+), 122 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
index 47b36a7a151..b029d028856 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java
@@ -138,4 +138,10 @@ public interface RegionMetrics {
 
   /** Returns the compaction state of this region */
   CompactionState getCompactionState();
+
+  /** Returns the total size of the hfiles in the region */
+  Size getRegionSizeMB();
+
+  /** Returns current prefetch ratio of this region on this server */
+  float getCurrentRegionCachedRatio();
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
index 43b3a17aac1..d3361693079 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java
@@ -80,7 +80,8 @@ public final class RegionMetricsBuilder {
           ClusterStatusProtos.StoreSequenceId::getSequenceId)))
       .setUncompressedStoreFileSize(
         new Size(regionLoadPB.getStoreUncompressedSizeMB(), 
Size.Unit.MEGABYTE))
-      .build();
+      .setRegionSizeMB(new Size(regionLoadPB.getRegionSizeMB(), 
Size.Unit.MEGABYTE))
+      
.setCurrentRegionCachedRatio(regionLoadPB.getCurrentRegionCachedRatio()).build();
   }
 
   private static List<ClusterStatusProtos.StoreSequenceId>
@@ -120,7 +121,8 @@ public final class RegionMetricsBuilder {
       
.addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
       .setStoreUncompressedSizeMB(
         (int) 
regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
-      .build();
+      .setRegionSizeMB((int) 
regionMetrics.getRegionSizeMB().get(Size.Unit.MEGABYTE))
+      
.setCurrentRegionCachedRatio(regionMetrics.getCurrentRegionCachedRatio()).build();
   }
 
   public static RegionMetricsBuilder newBuilder(byte[] name) {
@@ -154,6 +156,8 @@ public final class RegionMetricsBuilder {
   private long blocksLocalWithSsdWeight;
   private long blocksTotalWeight;
   private CompactionState compactionState;
+  private Size regionSizeMB = Size.ZERO;
+  private float currentRegionCachedRatio;
 
   private RegionMetricsBuilder(byte[] name) {
     this.name = name;
@@ -289,6 +293,16 @@ public final class RegionMetricsBuilder {
     return this;
   }
 
+  public RegionMetricsBuilder setRegionSizeMB(Size value) {
+    this.regionSizeMB = value;
+    return this;
+  }
+
+  public RegionMetricsBuilder setCurrentRegionCachedRatio(float value) {
+    this.currentRegionCachedRatio = value;
+    return this;
+  }
+
   public RegionMetrics build() {
     return new RegionMetricsImpl(name, storeCount, storeFileCount, 
storeRefCount,
       maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, 
storeFileSize,
@@ -296,7 +310,7 @@ public final class RegionMetricsBuilder {
       uncompressedStoreFileSize, writeRequestCount, readRequestCount, 
cpRequestCount,
       filteredReadRequestCount, completedSequenceId, storeSequenceIds, 
dataLocality,
       lastMajorCompactionTimestamp, dataLocalityForSsd, blocksLocalWeight, 
blocksLocalWithSsdWeight,
-      blocksTotalWeight, compactionState);
+      blocksTotalWeight, compactionState, regionSizeMB, 
currentRegionCachedRatio);
   }
 
   private static class RegionMetricsImpl implements RegionMetrics {
@@ -327,6 +341,8 @@ public final class RegionMetricsBuilder {
     private final long blocksLocalWithSsdWeight;
     private final long blocksTotalWeight;
     private final CompactionState compactionState;
+    private final Size regionSizeMB;
+    private final float currentRegionCachedRatio;
 
     RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int 
storeRefCount,
       int maxCompactedStoreFileRefCount, final long compactingCellCount, long 
compactedCellCount,
@@ -336,7 +352,7 @@ public final class RegionMetricsBuilder {
       long filteredReadRequestCount, long completedSequenceId, Map<byte[], 
Long> storeSequenceIds,
       float dataLocality, long lastMajorCompactionTimestamp, float 
dataLocalityForSsd,
       long blocksLocalWeight, long blocksLocalWithSsdWeight, long 
blocksTotalWeight,
-      CompactionState compactionState) {
+      CompactionState compactionState, Size regionSizeMB, float 
currentRegionCachedRatio) {
       this.name = Preconditions.checkNotNull(name);
       this.storeCount = storeCount;
       this.storeFileCount = storeFileCount;
@@ -364,6 +380,8 @@ public final class RegionMetricsBuilder {
       this.blocksLocalWithSsdWeight = blocksLocalWithSsdWeight;
       this.blocksTotalWeight = blocksTotalWeight;
       this.compactionState = compactionState;
+      this.regionSizeMB = regionSizeMB;
+      this.currentRegionCachedRatio = currentRegionCachedRatio;
     }
 
     @Override
@@ -501,6 +519,16 @@ public final class RegionMetricsBuilder {
       return compactionState;
     }
 
+    @Override
+    public Size getRegionSizeMB() {
+      return regionSizeMB;
+    }
+
+    @Override
+    public float getCurrentRegionCachedRatio() {
+      return currentRegionCachedRatio;
+    }
+
     @Override
     public String toString() {
       StringBuilder sb =
@@ -541,6 +569,8 @@ public final class RegionMetricsBuilder {
       Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", 
blocksLocalWithSsdWeight);
       Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight);
       Strings.appendKeyValue(sb, "compactionState", compactionState);
+      Strings.appendKeyValue(sb, "regionSizeMB", regionSizeMB);
+      Strings.appendKeyValue(sb, "currentRegionCachedRatio", 
currentRegionCachedRatio);
       return sb.toString();
     }
   }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
index 2684886ba3d..2cf55a1abdc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java
@@ -106,4 +106,10 @@ public interface ServerMetrics {
   @Nullable
   List<ServerTask> getTasks();
 
+  /**
+   * Returns the region cache information for the regions hosted on this server
+   * @return map of region encoded name and the size of the region cached on 
this region server
+   *         rounded to MB
+   */
+  Map<String, Integer> getRegionCachedInfo();
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
index 7a0312f22fd..c7aea21e845 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java
@@ -85,6 +85,7 @@ public final class ServerMetricsBuilder {
         : null)
       
.setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask)
         .collect(Collectors.toList()))
+      .setRegionCachedInfo(serverLoadPB.getRegionCachedInfoMap())
       .setReportTimestamp(serverLoadPB.getReportEndTime())
       
.setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber)
       .setVersion(version).build();
@@ -111,6 +112,7 @@ public final class ServerMetricsBuilder {
         
.map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList()))
       .addAllTasks(
         
metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList()))
+      .putAllRegionCachedInfo(metrics.getRegionCachedInfo())
       .setReportStartTime(metrics.getLastReportTimestamp())
       .setReportEndTime(metrics.getReportTimestamp());
     if (metrics.getReplicationLoadSink() != null) {
@@ -142,6 +144,7 @@ public final class ServerMetricsBuilder {
   private long reportTimestamp = EnvironmentEdgeManager.currentTime();
   private long lastReportTimestamp = 0;
   private final List<ServerTask> tasks = new ArrayList<>();
+  private Map<String, Integer> regionCachedInfo = new HashMap<>();
 
   private ServerMetricsBuilder(ServerName serverName) {
     this.serverName = serverName;
@@ -232,11 +235,16 @@ public final class ServerMetricsBuilder {
     return this;
   }
 
+  public ServerMetricsBuilder setRegionCachedInfo(Map<String, Integer> value) {
+    this.regionCachedInfo = value;
+    return this;
+  }
+
   public ServerMetrics build() {
     return new ServerMetricsImpl(serverName, versionNumber, version, 
requestCountPerSecond,
       requestCount, readRequestCount, writeRequestCount, usedHeapSize, 
maxHeapSize, infoServerPort,
       sources, sink, regionStatus, coprocessorNames, reportTimestamp, 
lastReportTimestamp,
-      userMetrics, tasks);
+      userMetrics, tasks, regionCachedInfo);
   }
 
   private static class ServerMetricsImpl implements ServerMetrics {
@@ -259,13 +267,15 @@ public final class ServerMetricsBuilder {
     private final long lastReportTimestamp;
     private final Map<byte[], UserMetrics> userMetrics;
     private final List<ServerTask> tasks;
+    private final Map<String, Integer> regionCachedInfo;
 
     ServerMetricsImpl(ServerName serverName, int versionNumber, String version,
       long requestCountPerSecond, long requestCount, long readRequestsCount,
       long writeRequestsCount, Size usedHeapSize, Size maxHeapSize, int 
infoServerPort,
       List<ReplicationLoadSource> sources, ReplicationLoadSink sink,
       Map<byte[], RegionMetrics> regionStatus, Set<String> coprocessorNames, 
long reportTimestamp,
-      long lastReportTimestamp, Map<byte[], UserMetrics> userMetrics, 
List<ServerTask> tasks) {
+      long lastReportTimestamp, Map<byte[], UserMetrics> userMetrics, 
List<ServerTask> tasks,
+      Map<String, Integer> regionCachedInfo) {
       this.serverName = Preconditions.checkNotNull(serverName);
       this.versionNumber = versionNumber;
       this.version = version;
@@ -284,6 +294,7 @@ public final class ServerMetricsBuilder {
       this.reportTimestamp = reportTimestamp;
       this.lastReportTimestamp = lastReportTimestamp;
       this.tasks = tasks;
+      this.regionCachedInfo = regionCachedInfo;
     }
 
     @Override
@@ -386,6 +397,11 @@ public final class ServerMetricsBuilder {
       return tasks;
     }
 
+    @Override
+    public Map<String, Integer> getRegionCachedInfo() {
+      return Collections.unmodifiableMap(regionCachedInfo);
+    }
+
     @Override
     public String toString() {
       int storeCount = 0;
diff --git a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto 
b/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto
deleted file mode 100644
index a024b94baa6..00000000000
--- a/hbase-protocol-shaded/src/main/protobuf/PrefetchPersistence.proto
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-syntax = "proto2";
-
-package hbase.pb;
-
-option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
-option java_outer_classname = "PersistentPrefetchProtos";
-option java_generic_services = true;
-option java_generate_equals_and_hash = true;
-option optimize_for = SPEED;
-
-
-message PrefetchedHfileName {
-  map<string, RegionFileSizeMap> prefetched_files = 1;
-}
-
-message RegionFileSizeMap {
-  required string region_name = 1;
-  required uint64 region_prefetch_size = 2;
-}
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto
index 28cc5a865c2..58fd3c8d2a5 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto
@@ -177,6 +177,12 @@ message RegionLoad {
     MAJOR = 2;
     MAJOR_AND_MINOR = 3;
   }
+
+  /** Total region size in MB */
+  optional uint32 region_size_MB = 28;
+
+  /** Current region cache ratio on this server */
+  optional float current_region_cached_ratio = 29;
 }
 
 message UserLoad {
@@ -315,6 +321,11 @@ message ServerLoad {
    * The active monitored tasks
    */
   repeated ServerTask tasks = 15;
+
+  /**
+   * The metrics for region cached on this region server
+   */
+  map<string, uint32> regionCachedInfo = 16;
 }
 
 message LiveServerInfo {
diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto
index ae1980fe51e..80fc10ada78 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/io/BucketCacheEntry.proto
@@ -32,7 +32,7 @@ message BucketCacheEntry {
   map<int32, string> deserializers = 4;
   required BackingMap backing_map = 5;
   optional bytes checksum = 6;
-  map<string, bool> prefetched_files = 7;
+  map<string, RegionFileSizeMap> cached_files = 7;
 }
 
 message BackingMap {
@@ -81,3 +81,9 @@ enum BlockPriority {
   multi = 1;
   memory = 2;
 }
+
+message RegionFileSizeMap {
+  required string region_name = 1;
+  required uint64 region_cached_size = 2;
+}
+
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
index e480c9b5789..91ebaaabd42 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io.hfile;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -167,7 +168,7 @@ public interface BlockCache extends Iterable<CachedBlock> {
   /**
    * Returns the list of fully cached files
    */
-  default Optional<Map<String, Boolean>> getFullyCachedFiles() {
+  default Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
     return Optional.empty();
   }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index a421dfc83aa..1e0fe770929 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -22,6 +22,7 @@ import java.util.Map;
 import java.util.Optional;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -431,7 +432,7 @@ public class CombinedBlockCache implements 
ResizableBlockCache, HeapSize {
    * Returns the list of fully cached files
    */
   @Override
-  public Optional<Map<String, Boolean>> getFullyCachedFiles() {
+  public Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
     return this.l2Cache.getFullyCachedFiles();
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
index c9768bab034..7cdbd5aff48 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java
@@ -115,7 +115,8 @@ public class HFilePreadReader extends HFileReaderImpl {
                 block.release();
               }
             }
-            bucketCacheOptional.ifPresent(bc -> 
bc.fileCacheCompleted(path.getName()));
+            final long fileSize = offset;
+            bucketCacheOptional.ifPresent(bc -> bc.fileCacheCompleted(path, 
fileSize));
           } catch (IOException e) {
             // IOExceptions are probably due to region closes (relocation, 
etc.)
             if (LOG.isTraceEnabled()) {
@@ -132,8 +133,7 @@ public class HFilePreadReader extends HFileReaderImpl {
                 LOG.warn("Close prefetch stream reader failed, path: " + path, 
e);
               }
             }
-            String regionName = getRegionName(path);
-            PrefetchExecutor.complete(regionName, path, offset);
+            PrefetchExecutor.complete(path);
           }
         }
       });
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java
deleted file mode 100644
index df67e4429a2..00000000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/PrefetchProtoUtils.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.hbase.util.Pair;
-
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.PersistentPrefetchProtos;
-
-final class PrefetchProtoUtils {
-  private PrefetchProtoUtils() {
-  }
-
-  static PersistentPrefetchProtos.PrefetchedHfileName
-    toPB(Map<String, Pair<String, Long>> prefetchedHfileNames) {
-    Map<String, PersistentPrefetchProtos.RegionFileSizeMap> tmpMap = new 
HashMap<>();
-    prefetchedHfileNames.forEach((hFileName, regionPrefetchMap) -> {
-      PersistentPrefetchProtos.RegionFileSizeMap tmpRegionFileSize =
-        PersistentPrefetchProtos.RegionFileSizeMap.newBuilder()
-          .setRegionName(regionPrefetchMap.getFirst())
-          .setRegionPrefetchSize(regionPrefetchMap.getSecond()).build();
-      tmpMap.put(hFileName, tmpRegionFileSize);
-    });
-    return 
PersistentPrefetchProtos.PrefetchedHfileName.newBuilder().putAllPrefetchedFiles(tmpMap)
-      .build();
-  }
-
-  static Map<String, Pair<String, Long>>
-    fromPB(Map<String, PersistentPrefetchProtos.RegionFileSizeMap> 
prefetchHFileNames) {
-    Map<String, Pair<String, Long>> hFileMap = new HashMap<>();
-    prefetchHFileNames.forEach((hFileName, regionPrefetchMap) -> {
-      hFileMap.put(hFileName,
-        new Pair<>(regionPrefetchMap.getRegionName(), 
regionPrefetchMap.getRegionPrefetchSize()));
-    });
-    return hFileMap;
-  }
-}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index e3d74038308..eeed85ef0c3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -25,6 +25,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -51,6 +52,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Consumer;
 import java.util.function.Function;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.TableName;
@@ -79,6 +81,7 @@ import org.apache.hadoop.hbase.util.IdReadWriteLock;
 import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef;
 import org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool;
 import 
org.apache.hadoop.hbase.util.IdReadWriteLockWithObjectPool.ReferenceType;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
@@ -151,8 +154,17 @@ public class BucketCache implements BlockCache, HeapSize {
 
   private AtomicBoolean backingMapValidated = new AtomicBoolean(false);
 
-  /** Set of files for which prefetch is completed */
-  final Map<String, Boolean> fullyCachedFiles = new ConcurrentHashMap<>();
+  /**
+   * Map of hFile -> Region -> File size. This map is used to track all files 
completed prefetch,
+   * together with the region those belong to and the total cached size for the
+   * region.TestBlockEvictionOnRegionMovement
+   */
+  final Map<String, Pair<String, Long>> fullyCachedFiles = new 
ConcurrentHashMap<>();
+  /**
+   * Map of region -> total size of the region prefetched on this region 
server. This is the total
+   * size of hFiles for this region prefetched on this region server
+   */
+  final Map<String, Long> regionCachedSizeMap = new ConcurrentHashMap<>();
 
   private BucketCachePersister cachePersister;
 
@@ -546,7 +558,6 @@ public class BucketCache implements BlockCache, HeapSize {
     } else {
       this.blockNumber.increment();
       this.heapSize.add(cachedItem.heapSize());
-      blocksByHFile.add(cacheKey);
     }
   }
 
@@ -636,14 +647,11 @@ public class BucketCache implements BlockCache, HeapSize {
       cacheStats.evicted(bucketEntry.getCachedTime(), cacheKey.isPrimary());
     }
     if (ioEngine.isPersistent()) {
-      fullyCachedFiles.remove(cacheKey.getHfileName());
+      removeFileFromPrefetch(cacheKey.getHfileName());
       setCacheInconsistent(true);
     }
   }
 
-  public void fileCacheCompleted(String fileName) {
-    fullyCachedFiles.put(fileName, true);
-  }
 
   /**
    * Free the {{@link BucketEntry} actually,which could only be invoked when 
the
@@ -1300,6 +1308,10 @@ public class BucketCache implements BlockCache, HeapSize 
{
     return ioEngine.isPersistent() && persistencePath != null;
   }
 
+  public Map<String, Long> getRegionCachedInfo() {
+    return Collections.unmodifiableMap(regionCachedSizeMap);
+  }
+
   /**
    * @see #persistToFile()
    */
@@ -1337,6 +1349,29 @@ public class BucketCache implements BlockCache, HeapSize 
{
     }
   }
 
+  private void updateRegionSizeMapWhileRetrievingFromFile() {
+    // Update the regionCachedSizeMap with the region size while restarting 
the region server
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Updating region size map after retrieving cached file list");
+      dumpPrefetchList();
+    }
+    regionCachedSizeMap.clear();
+    fullyCachedFiles.forEach((hFileName, hFileSize) -> {
+      // Get the region name for each file
+      String regionEncodedName = hFileSize.getFirst();
+      long cachedFileSize = hFileSize.getSecond();
+      regionCachedSizeMap.merge(regionEncodedName, cachedFileSize,
+        (oldpf, fileSize) -> oldpf + fileSize);
+    });
+  }
+
+  private void dumpPrefetchList() {
+    for (Map.Entry<String, Pair<String, Long>> outerEntry : 
fullyCachedFiles.entrySet()) {
+      LOG.debug("Cached File Entry:<{},<{},{}>>", outerEntry.getKey(),
+        outerEntry.getValue().getFirst(), outerEntry.getValue().getSecond());
+    }
+  }
+
   /**
    * Create an input stream that deletes the file after reading it. Use in 
try-with-resources to
    * avoid this pattern where an exception thrown from a finally block may 
mask earlier exceptions:
@@ -1401,7 +1436,7 @@ public class BucketCache implements BlockCache, HeapSize {
     backingMap = BucketProtoUtils.fromPB(proto.getDeserializersMap(), 
proto.getBackingMap(),
       this::createRecycler);
     fullyCachedFiles.clear();
-    fullyCachedFiles.putAll(proto.getPrefetchedFilesMap());
+    
fullyCachedFiles.putAll(BucketProtoUtils.fromPB(proto.getCachedFilesMap()));
     if (proto.hasChecksum()) {
       try {
         ((PersistentIOEngine) 
ioEngine).verifyFileIntegrity(proto.getChecksum().toByteArray(),
@@ -1444,6 +1479,7 @@ public class BucketCache implements BlockCache, HeapSize {
       LOG.info("Persistent file is old format, it does not support verifying 
file integrity!");
       backingMapValidated.set(true);
     }
+    updateRegionSizeMapWhileRetrievingFromFile();
     verifyCapacityAndClasses(proto.getCacheCapacity(), proto.getIoClass(), 
proto.getMapClass());
   }
 
@@ -1581,7 +1617,7 @@ public class BucketCache implements BlockCache, HeapSize {
    */
   @Override
   public int evictBlocksByHfileName(String hfileName) {
-    this.fullyCachedFiles.remove(hfileName);
+    removeFileFromPrefetch(hfileName);
     Set<BlockCacheKey> keySet = blocksByHFile.subSet(new 
BlockCacheKey(hfileName, Long.MIN_VALUE),
       true, new BlockCacheKey(hfileName, Long.MAX_VALUE), true);
 
@@ -1966,7 +2002,7 @@ public class BucketCache implements BlockCache, HeapSize {
   }
 
   @Override
-  public Optional<Map<String, Boolean>> getFullyCachedFiles() {
+  public Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
     return Optional.of(fullyCachedFiles);
   }
 
@@ -1985,4 +2021,33 @@ public class BucketCache implements BlockCache, HeapSize 
{
     return Optional.empty();
   }
 
+  private void removeFileFromPrefetch(String hfileName) {
+    // Update the regionPrefetchedSizeMap before removing the file from 
prefetchCompleted
+    if (fullyCachedFiles.containsKey(hfileName)) {
+      Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
+      String regionEncodedName = regionEntry.getFirst();
+      long filePrefetchSize = regionEntry.getSecond();
+      LOG.debug("Removing file {} for region {}", hfileName, 
regionEncodedName);
+      regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf - 
filePrefetchSize);
+      // If all the blocks for a region are evicted from the cache, remove the 
entry for that region
+      if (
+        regionCachedSizeMap.containsKey(regionEncodedName)
+          && regionCachedSizeMap.get(regionEncodedName) == 0
+      ) {
+        regionCachedSizeMap.remove(regionEncodedName);
+      }
+    }
+    fullyCachedFiles.remove(hfileName);
+  }
+
+  public void fileCacheCompleted(Path filePath, long size) {
+    Pair<String, Long> pair = new Pair<>();
+    // sets the region name
+    String regionName = filePath.getParent().getParent().getName();
+    pair.setFirst(regionName);
+    pair.setSecond(size);
+    fullyCachedFiles.put(filePath.getName(), pair);
+    regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + 
fileSize);
+  }
+
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
index 8830e5d3255..7cc5050506e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.function.Function;
@@ -28,6 +29,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockPriority;
 import org.apache.hadoop.hbase.io.hfile.BlockType;
 import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
@@ -45,7 +47,7 @@ final class BucketProtoUtils {
       .setIoClass(cache.ioEngine.getClass().getName())
       .setMapClass(cache.backingMap.getClass().getName())
       .putAllDeserializers(CacheableDeserializerIdManager.save())
-      .putAllPrefetchedFiles(cache.fullyCachedFiles)
+      .putAllCachedFiles(toCachedPB(cache.fullyCachedFiles))
       .setBackingMap(BucketProtoUtils.toPB(cache.backingMap))
       .setChecksum(ByteString
         .copyFrom(((PersistentIOEngine) 
cache.ioEngine).calculateChecksum(cache.getAlgorithm())))
@@ -185,4 +187,26 @@ final class BucketProtoUtils {
         throw new Error("Unrecognized BlockType.");
     }
   }
+
+  static Map<String, BucketCacheProtos.RegionFileSizeMap>
+    toCachedPB(Map<String, Pair<String, Long>> prefetchedHfileNames) {
+    Map<String, BucketCacheProtos.RegionFileSizeMap> tmpMap = new HashMap<>();
+    prefetchedHfileNames.forEach((hfileName, regionPrefetchMap) -> {
+      BucketCacheProtos.RegionFileSizeMap tmpRegionFileSize =
+        
BucketCacheProtos.RegionFileSizeMap.newBuilder().setRegionName(regionPrefetchMap.getFirst())
+          .setRegionCachedSize(regionPrefetchMap.getSecond()).build();
+      tmpMap.put(hfileName, tmpRegionFileSize);
+    });
+    return tmpMap;
+  }
+
+  static Map<String, Pair<String, Long>>
+    fromPB(Map<String, BucketCacheProtos.RegionFileSizeMap> 
prefetchHFileNames) {
+    Map<String, Pair<String, Long>> hfileMap = new HashMap<>();
+    prefetchHFileNames.forEach((hfileName, regionPrefetchMap) -> {
+      hfileMap.put(hfileName,
+        new Pair<>(regionPrefetchMap.getRegionName(), 
regionPrefetchMap.getRegionCachedSize()));
+    });
+    return hfileMap;
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 85721a35497..3042a2eae45 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -65,10 +65,12 @@ import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
 import java.util.stream.Collectors;
 import javax.management.MalformedObjectNameException;
 import javax.servlet.http.HttpServlet;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.mutable.MutableFloat;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -108,7 +110,9 @@ import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.http.InfoServer;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
+import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
 import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.RpcClient;
@@ -239,6 +243,9 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
 
   private static final Logger LOG = 
LoggerFactory.getLogger(HRegionServer.class);
 
+  int unitMB = 1024 * 1024;
+  int unitKB = 1024;
+
   /**
    * For testing only! Set to true to skip notifying region assignment to 
master .
    */
@@ -1211,6 +1218,11 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
         
serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build());
       }
     }
+    computeIfPersistentBucketCache(bc -> {
+      bc.getRegionCachedInfo().forEach((regionName, prefetchSize) -> {
+        serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, 
unitMB));
+      });
+    });
     serverLoad.setReportStartTime(reportStartTime);
     serverLoad.setReportEndTime(reportEndTime);
     if (this.infoServer != null) {
@@ -1510,6 +1522,15 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
     }
   }
 
+  private void computeIfPersistentBucketCache(Consumer<BucketCache> 
computation) {
+    if (blockCache instanceof CombinedBlockCache) {
+      BlockCache l2 = ((CombinedBlockCache) blockCache).getSecondLevelCache();
+      if (l2 instanceof BucketCache && ((BucketCache) l2).isCachePersistent()) 
{
+        computation.accept((BucketCache) l2);
+      }
+    }
+  }
+
   /**
    * @param r               Region to get RegionLoad for.
    * @param regionLoadBldr  the RegionLoad.Builder, can be null
@@ -1519,6 +1540,7 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
   RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder 
regionLoadBldr,
     RegionSpecifier.Builder regionSpecifier) throws IOException {
     byte[] name = r.getRegionInfo().getRegionName();
+    String regionEncodedName = r.getRegionInfo().getEncodedName();
     int stores = 0;
     int storefiles = 0;
     int storeRefCount = 0;
@@ -1531,6 +1553,7 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
     long totalStaticBloomSize = 0L;
     long totalCompactingKVs = 0L;
     long currentCompactedKVs = 0L;
+    long totalRegionSize = 0L;
     List<HStore> storeList = r.getStores();
     stores += storeList.size();
     for (HStore store : storeList) {
@@ -1542,6 +1565,7 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
         Math.max(maxCompactedStoreFileRefCount, 
currentMaxCompactedStoreFileRefCount);
       storeUncompressedSize += store.getStoreSizeUncompressed();
       storefileSize += store.getStorefilesSize();
+      totalRegionSize += store.getHFilesSize();
       // TODO: storefileIndexSizeKB is same with rootLevelIndexSizeKB?
       storefileIndexSize += store.getStorefilesRootLevelIndexSize();
       CompactionProgress progress = store.getCompactionProgress();
@@ -1554,9 +1578,6 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
       totalStaticBloomSize += store.getTotalStaticBloomSize();
     }
 
-    int unitMB = 1024 * 1024;
-    int unitKB = 1024;
-
     int memstoreSizeMB = roundSize(r.getMemStoreDataSize(), unitMB);
     int storeUncompressedSizeMB = roundSize(storeUncompressedSize, unitMB);
     int storefileSizeMB = roundSize(storefileSize, unitMB);
@@ -1564,6 +1585,16 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
     int rootLevelIndexSizeKB = roundSize(rootLevelIndexSize, unitKB);
     int totalStaticIndexSizeKB = roundSize(totalStaticIndexSize, unitKB);
     int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB);
+    int regionSizeMB = roundSize(totalRegionSize, unitMB);
+    final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f);
+    computeIfPersistentBucketCache(bc -> {
+      if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) {
+        currentRegionCachedRatio.setValue(regionSizeMB == 0
+          ? 0.0f
+          : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName), 
unitMB)
+            / regionSizeMB);
+      }
+    });
 
     HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution();
     float dataLocality = 
hdfsBd.getBlockLocalityIndex(serverName.getHostname());
@@ -1594,7 +1625,8 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
       
.setDataLocalityForSsd(dataLocalityForSsd).setBlocksLocalWeight(blocksLocalWeight)
       
.setBlocksLocalWithSsdWeight(blocksLocalWithSsdWeight).setBlocksTotalWeight(blocksTotalWeight)
       
.setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad(r.getCompactionState()))
-      .setLastMajorCompactionTs(r.getOldestHfileTs(true));
+      
.setLastMajorCompactionTs(r.getOldestHfileTs(true)).setRegionSizeMB(regionSizeMB)
+      .setCurrentRegionCachedRatio(currentRegionCachedRatio.floatValue());
     r.setCompleteSequenceId(regionLoadBldr);
     return regionLoadBldr.build();
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
index 8bcf3e600f8..8dfb8b1a463 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerMetrics.java
@@ -60,6 +60,10 @@ public class TestServerMetrics {
       metrics.getRegionMetrics().values().stream().mapToLong(v -> 
v.getCpRequestCount()).sum());
     assertEquals(300, metrics.getRegionMetrics().values().stream()
       .mapToLong(v -> v.getFilteredReadRequestCount()).sum());
+    assertEquals(2, metrics.getRegionMetrics().values().stream()
+      .mapToLong(v -> (long) v.getCurrentRegionCachedRatio()).count());
+    assertEquals(150, metrics.getRegionMetrics().values().stream()
+      .mapToDouble(v -> v.getRegionSizeMB().get(Size.Unit.MEGABYTE)).sum(), 0);
   }
 
   @Test
@@ -99,12 +103,14 @@ public class TestServerMetrics {
       
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
         
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
         
.setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
-        
.setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
-    ClusterStatusProtos.RegionLoad rlTwo = 
ClusterStatusProtos.RegionLoad.newBuilder()
-      
.setRegionSpecifier(rSpecTwo).setStores(3).setStorefiles(13).setStoreUncompressedSizeMB(23)
-      
.setStorefileSizeMB(300).setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40)
-      .setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE)
-      
.setWriteRequestsCount(Integer.MAX_VALUE).setCpRequestsCount(100).build();
+        
.setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
+        .setRegionSizeMB(100).setCurrentRegionCachedRatio(0.9f).build();
+    ClusterStatusProtos.RegionLoad rlTwo =
+      
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
+        
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
+        
.setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
+        
.setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
+        
.setCpRequestsCount(100).setRegionSizeMB(50).setCurrentRegionCachedRatio(1.0f).build();
 
     ClusterStatusProtos.ServerLoad sl = 
ClusterStatusProtos.ServerLoad.newBuilder()
       .addRegionLoads(rlOne).addRegionLoads(rlTwo).build();
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
index 594db4d7c30..31fcf9fd47f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java
@@ -400,6 +400,10 @@ public class TestRegionsRecoveryChore {
         return null;
       }
 
+      @Override
+      public Map<String, Integer> getRegionCachedInfo() {
+        return new HashMap<>();
+      }
     };
     return serverMetrics;
   }
@@ -541,6 +545,16 @@ public class TestRegionsRecoveryChore {
       public CompactionState getCompactionState() {
         return null;
       }
+
+      @Override
+      public Size getRegionSizeMB() {
+        return null;
+      }
+
+      @Override
+      public float getCurrentRegionCachedRatio() {
+        return 0.0f;
+      }
     };
     return regionMetrics;
   }


Reply via email to