This is an automated email from the ASF dual-hosted git repository.

junegunn pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2 by this push:
     new 633527e16b6 HBASE-29279 Allow throttling alter operation via table 
configuration (#7100)
633527e16b6 is described below

commit 633527e16b62536f7148a531a3de45d5106d5c8b
Author: Junegunn Choi <juneg...@apache.org>
AuthorDate: Fri Jun 13 23:04:00 2025 +0900

    HBASE-29279 Allow throttling alter operation via table configuration (#7100)
---
 .../master/procedure/ModifyTableProcedure.java     | 15 ++-------
 .../procedure/ReopenTableRegionsProcedure.java     | 36 +++++++++++++++++++---
 .../TestReopenTableRegionsProcedureBatching.java   | 36 ++++++++++++++++++++++
 3 files changed, 69 insertions(+), 18 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 57cd09ef5fa..d35a49169a5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -17,11 +17,6 @@
  */
 package org.apache.hadoop.hbase.master.procedure;
 
-import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
-import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
-import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_DISABLED;
-import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;
-
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
@@ -30,7 +25,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ConcurrentTableModificationException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -231,13 +225,8 @@ public class ModifyTableProcedure extends 
AbstractStateMachineTableProcedure<Mod
           break;
         case MODIFY_TABLE_REOPEN_ALL_REGIONS:
           if (isTableEnabled(env)) {
-            Configuration conf = env.getMasterConfiguration();
-            long backoffMillis = 
conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
-              PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT);
-            int batchSizeMax =
-              conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, 
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
-            addChildProcedure(
-              new ReopenTableRegionsProcedure(getTableName(), backoffMillis, 
batchSizeMax));
+            
addChildProcedure(ReopenTableRegionsProcedure.throttled(env.getMasterConfiguration(),
+              
env.getMasterServices().getTableDescriptors().get(getTableName())));
           }
           setNextState(ModifyTableState.MODIFY_TABLE_ASSIGN_NEW_REPLICAS);
           break;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
index 2752179d9bb..03f04792af5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
@@ -22,9 +22,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Optional;
 import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.conf.ConfigKey;
 import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
@@ -55,12 +59,11 @@ public class ReopenTableRegionsProcedure
   private static final Logger LOG = 
LoggerFactory.getLogger(ReopenTableRegionsProcedure.class);
 
   public static final String PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY =
-    "hbase.reopen.table.regions.progressive.batch.backoff.ms";
+    ConfigKey.LONG("hbase.reopen.table.regions.progressive.batch.backoff.ms");
   public static final long PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT = 0L;
   public static final String PROGRESSIVE_BATCH_SIZE_MAX_KEY =
-    "hbase.reopen.table.regions.progressive.batch.size.max";
+    ConfigKey.INT("hbase.reopen.table.regions.progressive.batch.size.max");
   public static final int PROGRESSIVE_BATCH_SIZE_MAX_DISABLED = -1;
-  private static final int PROGRESSIVE_BATCH_SIZE_MAX_DEFAULT_VALUE = 
Integer.MAX_VALUE;
 
   // this minimum prevents a max which would break this procedure
   private static final int MINIMUM_BATCH_SIZE_MAX = 1;
@@ -83,6 +86,23 @@ public class ReopenTableRegionsProcedure
   private long regionsReopened = 0;
   private long batchesProcessed = 0;
 
+  /**
+   * Create a new ReopenTableRegionsProcedure respecting the throttling 
configuration for the table.
+   * First check the table descriptor, then fall back to the global 
configuration. Only used in
+   * ModifyTableProcedure.
+   */
+  public static ReopenTableRegionsProcedure throttled(final Configuration conf,
+    final TableDescriptor desc) {
+    long backoffMillis = 
Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY))
+      .map(Long::parseLong).orElseGet(() -> 
conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
+        PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT));
+    int batchSizeMax = 
Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY))
+      .map(Integer::parseInt).orElseGet(
+        () -> conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, 
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED));
+
+    return new ReopenTableRegionsProcedure(desc.getTableName(), backoffMillis, 
batchSizeMax);
+  }
+
   public ReopenTableRegionsProcedure() {
     this(null);
   }
@@ -96,12 +116,12 @@ public class ReopenTableRegionsProcedure
       PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
   }
 
-  public ReopenTableRegionsProcedure(final TableName tableName, long 
reopenBatchBackoffMillis,
+  ReopenTableRegionsProcedure(final TableName tableName, long 
reopenBatchBackoffMillis,
     int reopenBatchSizeMax) {
     this(tableName, Collections.emptyList(), reopenBatchBackoffMillis, 
reopenBatchSizeMax);
   }
 
-  public ReopenTableRegionsProcedure(final TableName tableName, final 
List<byte[]> regionNames,
+  private ReopenTableRegionsProcedure(final TableName tableName, final 
List<byte[]> regionNames,
     long reopenBatchBackoffMillis, int reopenBatchSizeMax) {
     this.tableName = tableName;
     this.regionNames = regionNames;
@@ -137,6 +157,12 @@ public class ReopenTableRegionsProcedure
     return batchesProcessed;
   }
 
+  @RestrictedApi(explanation = "Should only be called in tests", link = "",
+      allowedOnPath = ".*/src/test/.*")
+  long getReopenBatchBackoffMillis() {
+    return reopenBatchBackoffMillis;
+  }
+
   @RestrictedApi(explanation = "Should only be called internally or in tests", 
link = "",
       allowedOnPath = ".*(/src/test/.*|ReopenTableRegionsProcedure).java")
   protected int progressBatchSize() {
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java
index 32b1d38b280..2cdb0e9624c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureBatching.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hbase.master.procedure;
 
+import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT;
+import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY;
+import static 
org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -26,9 +29,11 @@ import java.util.Set;
 import java.util.stream.Collectors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -160,6 +165,37 @@ public class TestReopenTableRegionsProcedureBatching {
     }
   }
 
+  @Test
+  public void testBackoffConfigurationFromTableDescriptor() {
+    Configuration conf = HBaseConfiguration.create();
+    TableDescriptorBuilder tbd = TableDescriptorBuilder.newBuilder(TABLE_NAME);
+
+    // Default (no batching, no backoff)
+    ReopenTableRegionsProcedure proc = 
ReopenTableRegionsProcedure.throttled(conf, tbd.build());
+    assertEquals(PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT, 
proc.getReopenBatchBackoffMillis());
+    assertEquals(Integer.MAX_VALUE, proc.progressBatchSize());
+
+    // From Configuration (backoff: 100ms, max: 6)
+    conf.setLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, 100);
+    conf.setInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY, 6);
+    proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
+    assertEquals(100, proc.getReopenBatchBackoffMillis());
+    assertEquals(2, proc.progressBatchSize());
+    assertEquals(4, proc.progressBatchSize());
+    assertEquals(6, proc.progressBatchSize());
+    assertEquals(6, proc.progressBatchSize());
+
+    // From TableDescriptor (backoff: 200ms, max: 7)
+    tbd.setValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY, "200");
+    tbd.setValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY, "7");
+    proc = ReopenTableRegionsProcedure.throttled(conf, tbd.build());
+    assertEquals(200, proc.getReopenBatchBackoffMillis());
+    assertEquals(2, proc.progressBatchSize());
+    assertEquals(4, proc.progressBatchSize());
+    assertEquals(7, proc.progressBatchSize());
+    assertEquals(7, proc.progressBatchSize());
+  }
+
   private void confirmBatchSize(int expectedBatchSize, Set<StuckRegion> 
stuckRegions,
     ReopenTableRegionsProcedure proc) {
     while (true) {

Reply via email to