virajjasani commented on a change in pull request #600: HBASE-22460 : Reopen 
regions with very high Store Ref Counts
URL: https://github.com/apache/hbase/pull/600#discussion_r326124979
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionsRecoveryChore.java
 ##########
 @@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterMetrics;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.ServerMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
+
+/**
+ * This chore, every time it runs, will try to recover regions with high store 
ref count
+ * by reopening them
+ */
+@InterfaceAudience.Private
+public class RegionsRecoveryChore extends ScheduledChore {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(RegionsRecoveryChore.class);
+
+  private static final String REGIONS_RECOVERY_CHORE_NAME = 
"RegionsRecoveryChore";
+
+  private static final String REGIONS_RECOVERY_INTERVAL =
+    "hbase.master.regions.recovery.interval";
+  private static final String STORE_REF_COUNT_THRESHOLD = 
"hbase.regions.recovery.store.count";
+
+  private static final int DEFAULT_REGIONS_RECOVERY_INTERVAL = 1200 * 1000; // 
Default 20 min ?
+  private static final int DEFAULT_STORE_REF_COUNT_THRESHOLD = 256;
+
+  private static final String ERROR_REOPEN_REIONS_MSG =
+    "Error reopening regions with high storeRefCount. ";
+
+  private final HMaster hMaster;
+  private final int storeRefCountThreshold;
+
+  private static final PerClientRandomNonceGenerator NONCE_GENERATOR =
+    PerClientRandomNonceGenerator.get();
+
+  /**
+   * Construct RegionsRecoveryChore with provided params
+   *
+   * @param stopper When {@link Stoppable#isStopped()} is true, this chore 
will cancel and cleanup
+   * @param configuration The configuration params to be used
+   * @param hMaster HMaster instance to initiate RegionTableRegions
+   */
+  RegionsRecoveryChore(final Stoppable stopper, final Configuration 
configuration,
+      final HMaster hMaster) {
+
+    super(REGIONS_RECOVERY_CHORE_NAME, stopper, 
configuration.getInt(REGIONS_RECOVERY_INTERVAL,
+      DEFAULT_REGIONS_RECOVERY_INTERVAL));
+    this.hMaster = hMaster;
+    this.storeRefCountThreshold = 
configuration.getInt(STORE_REF_COUNT_THRESHOLD,
+      DEFAULT_STORE_REF_COUNT_THRESHOLD);
+
+  }
+
+  @Override
+  protected void chore() {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Starting up Regions Recovery by reopening regions based on 
storeRefCount...");
+    }
+    try {
+      final ClusterMetrics clusterMetrics = hMaster.getClusterMetrics();
+      final Map<ServerName, ServerMetrics> serverMetricsMap =
+        clusterMetrics.getLiveServerMetrics();
+      final Map<TableName, List<byte[]>> tableToReopenRegionsMap =
+        getTableToRegionsByRefCount(serverMetricsMap);
+      if (MapUtils.isNotEmpty(tableToReopenRegionsMap)) {
+        tableToReopenRegionsMap.forEach((tableName, regionNames) -> {
+          try {
+            LOG.warn("Reopening regions due to high refCount. TableName: {} , 
noOfRegions: {}",
+              tableName, regionNames.size());
+            hMaster.reopenRegions(tableName, regionNames, 
NONCE_GENERATOR.getNonceGroup(),
+              NONCE_GENERATOR.newNonce());
+          } catch (IOException e) {
+            LOG.error("{} tableName: {}, regionNames: {}", 
ERROR_REOPEN_REIONS_MSG,
+              tableName, regionNames, e);
+          }
+        });
+      }
+    } catch (Exception e) {
+      LOG.error("Error while reopening regions based on storeRefCount 
threshold", e);
+    }
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Exiting Regions Recovery by reopening regions based on 
storeRefCount...");
+    }
+  }
+
+  private Map<TableName, List<byte[]>> getTableToRegionsByRefCount(
+      final Map<ServerName, ServerMetrics> serverMetricsMap) {
+
+    final Map<TableName, List<byte[]>> tableToReopenRegionsMap = new 
HashMap<>();
+    for (ServerMetrics serverMetrics : serverMetricsMap.values()) {
+      Map<byte[], RegionMetrics> regionMetricsMap = 
serverMetrics.getRegionMetrics();
+      for (RegionMetrics regionMetrics : regionMetricsMap.values()) {
+        // For each region, each store file can have different ref counts
+        // We need to find maximum of all such ref counts and if that max count
+        // is beyond a threshold value, we should reopen the region.
+        // Here, we take max ref count of all stores and not the cumulative 
count
+        // of all stores.
+        final int maxStoreRefCount = regionMetrics.getMaxStoreRefCount();
+        if (maxStoreRefCount > storeRefCountThreshold) {
 
 Review comment:
   Sure, better we exclude META? In your opinion, what default count (instead 
of 256) should be ideal?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to