saintstack commented on a change in pull request #978: HBASE-22285 A normalizer 
which merges small size regions with adjacen…
URL: https://github.com/apache/hbase/pull/978#discussion_r362260193
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/BaseNormalizer.java
 ##########
 @@ -0,0 +1,214 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.normalizer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hbase.RegionMetrics;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Size;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterRpcServices;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public abstract class BaseNormalizer implements RegionNormalizer {
+  private static final Logger LOG = 
LoggerFactory.getLogger(BaseNormalizer.class);
+  protected MasterServices masterServices;
+  protected MasterRpcServices masterRpcServices;
+
+  /**
+   * Set the master service.
+   * @param masterServices inject instance of MasterServices
+   */
+  @Override
+  public void setMasterServices(MasterServices masterServices) {
+    this.masterServices = masterServices;
+  }
+
+  @Override
+  public void setMasterRpcServices(MasterRpcServices masterRpcServices) {
+    this.masterRpcServices = masterRpcServices;
+  }
+
+  protected long getRegionSize(RegionInfo hri) {
+    ServerName sn = masterServices.getAssignmentManager().getRegionStates().
+      getRegionServerOfRegion(hri);
+    RegionMetrics regionLoad = masterServices.getServerManager().getLoad(sn).
+      getRegionMetrics().get(hri.getRegionName());
+    if (regionLoad == null) {
+      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");
+      return -1;
+    }
+    return (long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE);
+  }
+
+  protected boolean isMergeEnabled() {
+    boolean mergeEnabled = true;
+    try {
+      mergeEnabled = masterRpcServices
+        .isSplitOrMergeEnabled(null,
+          
RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.MERGE))
+        .getEnabled();
+    } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException 
e) {
+      LOG.warn("Unable to determine whether merge is enabled", e);
+    }
+    return mergeEnabled;
+  }
+
+  protected boolean isSplitEnabled() {
+    boolean splitEnabled = true;
+    try {
+      splitEnabled = masterRpcServices
+          .isSplitOrMergeEnabled(null,
+            
RequestConverter.buildIsSplitOrMergeEnabledRequest(MasterSwitchType.SPLIT))
+          .getEnabled();
+    } catch (ServiceException se) {
+      LOG.warn("Unable to determine whether split is enabled", se);
+    }
+    return splitEnabled;
+  }
+
+  /**
+   *
+   * @param tableRegions
+   * @return average region size depending on
+   * 
{org.apache.hadoop.hbase.client.TableDescriptor#getNormalizerTargetRegionCount()}
+   *
+   * Also make sure we are sending regions of the same table
+   */
+  protected double getAvgRegionSize(List<RegionInfo> tableRegions) {
+    long totalSizeMb = 0;
+    int acutalRegionCnt = 0;
+    for (RegionInfo hri : tableRegions) {
+      long regionSize = getRegionSize(hri);
+      // don't consider regions that are in bytes for averaging the size.
+      if (regionSize > 0) {
+        acutalRegionCnt++;
+        totalSizeMb += regionSize;
+      }
+    }
+    TableName table = tableRegions.get(0).getTable();
+    int targetRegionCount = -1;
+    long targetRegionSize = -1;
+    try {
+      TableDescriptor tableDescriptor = 
masterServices.getTableDescriptors().get(table);
+      if(tableDescriptor != null) {
+        targetRegionCount =
+          tableDescriptor.getNormalizerTargetRegionCount();
+        targetRegionSize =
+          tableDescriptor.getNormalizerTargetRegionSize();
+        LOG.debug("Table {}:  target region count is {}, target region size is 
{}", table,
+          targetRegionCount, targetRegionSize);
+      }
+    } catch (IOException e) {
+      LOG.warn(
+        "cannot get the target number and target size of table {}, they will 
be default value -1.",
+        table);
+    }
+
+    double avgRegionSize;
+    if (targetRegionSize > 0) {
+      avgRegionSize = targetRegionSize;
+    } else if (targetRegionCount > 0) {
+      avgRegionSize = totalSizeMb / (double) targetRegionCount;
+    } else {
+      avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) 
acutalRegionCnt;
+    }
+
+    LOG.debug("Table " + table + ", total aggregated regions size: " + 
totalSizeMb);
+    LOG.debug("Table " + table + ", average region size: " + avgRegionSize);
+    return avgRegionSize;
+  }
+
+  /**
+   * Computes the merge plans that should be executed for this table to 
converge
+   * average region towards target average or target region count
+   *
+   * @param table
+   * @return list of merge normalization plans
+   */
+  protected List<NormalizationPlan> getMergeNormalizationPlan(TableName table) 
{
+    List<NormalizationPlan> plans = new ArrayList<>();
+    List<RegionInfo> tableRegions =
+        
masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(table);
+    double avgRegionSize = getAvgRegionSize(tableRegions);
+    LOG.debug("Table {}, average region size: {}", table, avgRegionSize);
+    LOG.debug("Computing normalization plan for table: {}, number of regions: 
{}", table,
+      tableRegions.size());
+
+    int candidateIdx = 0;
+    while (candidateIdx < tableRegions.size()) {
+      if (candidateIdx == tableRegions.size() - 1) {
+        break;
+      }
+      RegionInfo hri = tableRegions.get(candidateIdx);
+      long regionSize = getRegionSize(hri);
+      RegionInfo hri2 = tableRegions.get(candidateIdx + 1);
+      long regionSize2 = getRegionSize(hri2);
+      if (regionSize >= 0 && regionSize2 >= 0 && regionSize + regionSize2 < 
avgRegionSize) {
+        // atleast one of the two regions should be older than 
MIN_REGION_DURATION days
+        plans.add(new MergeNormalizationPlan(hri, hri2));
+        candidateIdx++;
+      } else {
+        LOG.debug("Skipping region {} of table {} with size {}", 
hri.getRegionId(), table,
+          regionSize);
+      }
+      candidateIdx++;
+    }
+    return plans;
+  }
+
+  /**
+   * Computes the split plans that should be executed for this table to 
converge
+   * average region size towards target average or target region count
+   *
+   * @param table
+   * @return list of split normalization plans
+   */
+  protected List<NormalizationPlan> getSplitNormalizationPlan(TableName table) 
{
+    List<NormalizationPlan> plans = new ArrayList<>();
+    List<RegionInfo> tableRegions =
+        
masterServices.getAssignmentManager().getRegionStates().getRegionsOfTable(table);
+    double avgRegionSize = getAvgRegionSize(tableRegions);
+    LOG.debug("Table {}, average region size: {}", table, avgRegionSize);
+
+    int candidateIdx = 0;
+    while (candidateIdx < tableRegions.size()) {
+      RegionInfo hri = tableRegions.get(candidateIdx);
+      long regionSize = getRegionSize(hri);
+      // if the region is > 2 times larger than average, we split it, split
+      // is more high priority normalization action than merge.
+      if (regionSize > 2 * avgRegionSize) {
+        LOG.info("Table {}, large region {} has size {}, more than twice avg 
size, splitting",
+          table, hri.getRegionNameAsString(), regionSize);
+        plans.add(new SplitNormalizationPlan(hri, null));
+      }
+      candidateIdx++;
+    }
+    return plans;
+  }
+}
 
 Review comment:
   Could this class be a utility class that implementations make use of rather 
than an abstract each must implement?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to