This is an automated email from the ASF dual-hosted git repository.
cconnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 6e14c22aae2 HBASE-29782 Expose public Admin API to reopen table
regions without moving (#7563)
6e14c22aae2 is described below
commit 6e14c22aae2b433d4c173f26c886310b9d2d517f
Author: Alex Hughes <[email protected]>
AuthorDate: Tue Jan 6 21:10:51 2026 +0000
HBASE-29782 Expose public Admin API to reopen table regions without moving
(#7563)
* HBASE-29782 Add reopen regions functionality to admin
* Add integration tests
* HBASE-29782 Remove unnecessary comment
* HBASE-29782 Spotless
* HBASE-29782 Add integration test
* HBASE-29782 Add comment about testing constructors
* HBASE-29782 Spotless
---------
Co-authored-by: Alex Hughes <[email protected]>
---
.../java/org/apache/hadoop/hbase/client/Admin.java | 41 ++
.../hadoop/hbase/client/AdminOverAsyncAdmin.java | 11 +
.../org/apache/hadoop/hbase/client/AsyncAdmin.java | 18 +
.../hadoop/hbase/client/AsyncHBaseAdmin.java | 10 +
.../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 29 ++
.../hbase/shaded/protobuf/RequestConverter.java | 26 ++
.../src/main/protobuf/server/master/Master.proto | 18 +
.../org/apache/hadoop/hbase/master/HMaster.java | 48 +++
.../hadoop/hbase/master/MasterRpcServices.java | 25 ++
.../procedure/ReopenTableRegionsProcedure.java | 167 +++++---
.../TestReopenTableRegionsIntegration.java | 335 ++++++++++++++++
...ReopenTableRegionsProcedureSpecificRegions.java | 442 +++++++++++++++++++++
.../hbase/rsgroup/VerifyingRSGroupAdmin.java | 11 +
.../hadoop/hbase/thrift2/client/ThriftAdmin.java | 10 +
14 files changed, 1133 insertions(+), 58 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 1c08ec3b26f..65b3abcd413 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1077,6 +1077,47 @@ public interface Admin extends Abortable, Closeable {
*/
Future<Void> modifyTableAsync(TableDescriptor td, boolean reopenRegions)
throws IOException;
+ /**
+ * Reopen all regions of a table. This is useful after calling
+ * {@link #modifyTableAsync(TableDescriptor, boolean)} with
reopenRegions=false to gradually roll
+ * out table descriptor changes to regions. Regions are reopened in-place
(no move).
+ * @param tableName table whose regions to reopen
+ * @throws IOException if a remote or network exception occurs
+ */
+ default void reopenTableRegions(TableName tableName) throws IOException {
+ get(reopenTableRegionsAsync(tableName), getSyncWaitTimeout(),
TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Reopen specific regions of a table. Useful for canary testing table
descriptor changes on a
+ * subset of regions before rolling out to the entire table.
+ * @param tableName table whose regions to reopen
+ * @param regions specific regions to reopen
+ * @throws IOException if a remote or network exception occurs
+ */
+ default void reopenTableRegions(TableName tableName, List<RegionInfo>
regions)
+ throws IOException {
+ get(reopenTableRegionsAsync(tableName, regions), getSyncWaitTimeout(),
TimeUnit.MILLISECONDS);
+ }
+
+ /**
+ * Asynchronously reopen all regions of a table.
+ * @param tableName table whose regions to reopen
+ * @return Future for tracking completion
+ * @throws IOException if a remote or network exception occurs
+ */
+ Future<Void> reopenTableRegionsAsync(TableName tableName) throws IOException;
+
+ /**
+ * Asynchronously reopen specific regions of a table.
+ * @param tableName table whose regions to reopen
+ * @param regions specific regions to reopen
+ * @return Future for tracking completion
+ * @throws IOException if a remote or network exception occurs
+ */
+ Future<Void> reopenTableRegionsAsync(TableName tableName, List<RegionInfo>
regions)
+ throws IOException;
+
/**
* Change the store file tracker of the given table.
* @param tableName the table you want to change
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index e6bf6c3d28e..7117fd4fd33 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -511,6 +511,17 @@ class AdminOverAsyncAdmin implements Admin {
return admin.modifyTable(td, reopenRegions);
}
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName) throws
IOException {
+ return admin.reopenTableRegions(tableName).toCompletableFuture();
+ }
+
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName,
List<RegionInfo> regions)
+ throws IOException {
+ return admin.reopenTableRegions(tableName, regions).toCompletableFuture();
+ }
+
@Override
public Future<Void> modifyTableStoreFileTrackerAsync(TableName tableName,
String dstSFT)
throws IOException {
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ec0556f20ac..56211cedc49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -213,6 +213,24 @@ public interface AsyncAdmin {
*/
CompletableFuture<Void> modifyTable(TableDescriptor desc, boolean
reopenRegions);
+ /**
+ * Reopen all regions of a table. This is useful after calling
+ * {@link #modifyTable(TableDescriptor, boolean)} with reopenRegions=false
to gradually roll out
+ * table descriptor changes to regions. Regions are reopened in-place (no
move).
+ * @param tableName table whose regions to reopen
+ * @return CompletableFuture that completes when all regions have been
reopened
+ */
+ CompletableFuture<Void> reopenTableRegions(TableName tableName);
+
+ /**
+ * Reopen specific regions of a table. Useful for canary testing table
descriptor changes on a
+ * subset of regions before rolling out to the entire table.
+ * @param tableName table whose regions to reopen
+ * @param regions specific regions to reopen
+ * @return CompletableFuture that completes when specified regions have been
reopened
+ */
+ CompletableFuture<Void> reopenTableRegions(TableName tableName,
List<RegionInfo> regions);
+
/**
* Change the store file tracker of the given table.
* @param tableName the table you want to change
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index b1fb2be1354..8132b184809 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -161,6 +161,16 @@ class AsyncHBaseAdmin implements AsyncAdmin {
return wrap(rawAdmin.modifyTable(desc, reopenRegions));
}
+ @Override
+ public CompletableFuture<Void> reopenTableRegions(TableName tableName) {
+ return wrap(rawAdmin.reopenTableRegions(tableName));
+ }
+
+ @Override
+ public CompletableFuture<Void> reopenTableRegions(TableName tableName,
List<RegionInfo> regions) {
+ return wrap(rawAdmin.reopenTableRegions(tableName, regions));
+ }
+
@Override
public CompletableFuture<Void> modifyTableStoreFileTracker(TableName
tableName, String dstSFT) {
return wrap(rawAdmin.modifyTableStoreFileTracker(tableName, dstSFT));
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 83780a4a121..a3c177577df 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -263,6 +263,8 @@ import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineReg
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest;
@@ -754,6 +756,21 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
new ModifyTableProcedureBiConsumer(this, desc.getTableName()));
}
+ @Override
+ public CompletableFuture<Void> reopenTableRegions(TableName tableName) {
+ return reopenTableRegions(tableName, Collections.emptyList());
+ }
+
+ @Override
+ public CompletableFuture<Void> reopenTableRegions(TableName tableName,
List<RegionInfo> regions) {
+ List<byte[]> regionNames =
regions.stream().map(RegionInfo::getRegionName).toList();
+ return this.<ReopenTableRegionsRequest, ReopenTableRegionsResponse>
procedureCall(tableName,
+ RequestConverter.buildReopenTableRegionsRequest(tableName, regionNames,
ng.getNonceGroup(),
+ ng.newNonce()),
+ (s, c, req, done) -> s.reopenTableRegions(c, req, done), (resp) ->
resp.getProcId(),
+ new ReopenTableRegionsProcedureBiConsumer(this, tableName));
+ }
+
@Override
public CompletableFuture<Void> modifyTableStoreFileTracker(TableName
tableName, String dstSFT) {
return this.<ModifyTableStoreFileTrackerRequest,
@@ -2833,6 +2850,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
}
}
+ private static class ReopenTableRegionsProcedureBiConsumer extends
TableProcedureBiConsumer {
+
+ ReopenTableRegionsProcedureBiConsumer(AsyncAdmin admin, TableName
tableName) {
+ super(tableName);
+ }
+
+ @Override
+ String getOperationType() {
+ return "REOPEN_TABLE_REGIONS";
+ }
+ }
+
private static class ModifyTableStoreFileTrackerProcedureBiConsumer
extends TableProcedureBiConsumer {
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 37fdb1ba6fe..61d8da6e144 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -139,6 +139,7 @@ import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeR
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
@@ -1128,6 +1129,31 @@ public final class RequestConverter {
return builder.build();
}
+ /**
+ * Creates a protocol buffer ReopenTableRegionsRequest
+ * @param tableName table whose regions to reopen
+ * @param regionNames specific regions to reopen (empty = all regions)
+ * @param nonceGroup nonce group
+ * @param nonce nonce
+ * @return a ReopenTableRegionsRequest
+ */
+ public static ReopenTableRegionsRequest buildReopenTableRegionsRequest(final
TableName tableName,
+ final List<byte[]> regionNames, final long nonceGroup, final long nonce) {
+ ReopenTableRegionsRequest.Builder builder =
ReopenTableRegionsRequest.newBuilder();
+ builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+
+ if (regionNames != null && !regionNames.isEmpty()) {
+ for (byte[] regionName : regionNames) {
+ builder.addRegionNames(UnsafeByteOperations.unsafeWrap(regionName));
+ }
+ }
+
+ builder.setNonceGroup(nonceGroup);
+ builder.setNonce(nonce);
+
+ return builder.build();
+ }
+
public static ModifyTableStoreFileTrackerRequest
buildModifyTableStoreFileTrackerRequest(
final TableName tableName, final String dstSFT, final long nonceGroup,
final long nonce) {
ModifyTableStoreFileTrackerRequest.Builder builder =
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
index 6dd6ee723b0..f475d26060d 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
@@ -211,6 +211,17 @@ message ModifyTableResponse {
optional uint64 proc_id = 1;
}
+message ReopenTableRegionsRequest {
+ required TableName table_name = 1;
+ repeated bytes region_names = 2; // empty = all regions
+ optional uint64 nonce_group = 3 [default = 0];
+ optional uint64 nonce = 4 [default = 0];
+}
+
+message ReopenTableRegionsResponse {
+ optional uint64 proc_id = 1;
+}
+
message FlushTableRequest {
required TableName table_name = 1;
repeated bytes column_family = 2;
@@ -910,6 +921,13 @@ service MasterService {
rpc ModifyTable(ModifyTableRequest)
returns(ModifyTableResponse);
+ /**
+ * Reopen regions of a table. Regions are reopened in-place without moving.
+ * Useful for rolling out table descriptor changes after
modifyTable(reopenRegions=false).
+ */
+ rpc ReopenTableRegions(ReopenTableRegionsRequest)
+ returns(ReopenTableRegionsResponse);
+
/** Creates a new table asynchronously */
rpc CreateTable(CreateTableRequest)
returns(CreateTableResponse);
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 22d3ab69b51..9e7cfa4dba6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -4251,6 +4251,54 @@ public class HMaster extends
HBaseServerBase<MasterRpcServices> implements Maste
}
+ /**
+ * Reopen regions provided in the argument. Applies throttling to the
procedure to avoid
+ * overwhelming the system. This is used by the reopenTableRegions methods
in the Admin API via
+ * HMaster.
+ * @param tableName The current table name
+ * @param regionNames The region names of the regions to reopen
+ * @param nonceGroup Identifier for the source of the request, a client or
process
+ * @param nonce A unique identifier for this operation from the client
or process identified
+ * by <code>nonceGroup</code> (the source must ensure
each operation gets a
+ * unique id).
+ * @return procedure Id
+ * @throws IOException if reopening region fails while running procedure
+ */
+ long reopenRegionsThrottled(final TableName tableName, final List<byte[]>
regionNames,
+ final long nonceGroup, final long nonce) throws IOException {
+
+ checkInitialized();
+
+ if (!tableStateManager.isTablePresent(tableName)) {
+ throw new TableNotFoundException(tableName);
+ }
+
+ return MasterProcedureUtil
+ .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this,
nonceGroup, nonce) {
+ @Override
+ protected void run() throws IOException {
+ ReopenTableRegionsProcedure proc;
+ if (regionNames.isEmpty()) {
+ proc = ReopenTableRegionsProcedure.throttled(getConfiguration(),
+ getTableDescriptors().get(tableName));
+ } else {
+ proc = ReopenTableRegionsProcedure.throttled(getConfiguration(),
+ getTableDescriptors().get(tableName), regionNames);
+ }
+
+ LOG.info("{} throttled reopening {} regions for table {}",
getClientIdAuditPrefix(),
+ regionNames.isEmpty() ? "all" : regionNames.size(), tableName);
+
+ submitProcedure(proc);
+ }
+
+ @Override
+ protected String getDescription() {
+ return "Throttled ReopenTableRegionsProcedure for " + tableName;
+ }
+ });
+ }
+
@Override
public ReplicationPeerManager getReplicationPeerManager() {
return replicationPeerManager;
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index e9e0f970ef8..e9ca086863f 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -320,6 +320,8 @@ import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineReg
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RegionSpecifierAndState;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RollAllWALWritersRequest;
@@ -1554,6 +1556,29 @@ public class MasterRpcServices extends
HBaseRpcServicesBase<HMaster>
}
}
+ @Override
+ public ReopenTableRegionsResponse reopenTableRegions(RpcController
controller,
+ ReopenTableRegionsRequest request) throws ServiceException {
+ try {
+ server.checkInitialized();
+
+ final TableName tableName =
ProtobufUtil.toTableName(request.getTableName());
+ final List<byte[]> regionNames = request.getRegionNamesList().stream()
+ .map(ByteString::toByteArray).collect(Collectors.toList());
+
+ LOG.info("Reopening regions for table={}, regionCount={}", tableName,
+ regionNames.isEmpty() ? "all" : regionNames.size());
+
+ long procId = server.reopenRegionsThrottled(tableName, regionNames,
request.getNonceGroup(),
+ request.getNonce());
+
+ return ReopenTableRegionsResponse.newBuilder().setProcId(procId).build();
+
+ } catch (IOException ioe) {
+ throw new ServiceException(ioe);
+ }
+ }
+
@Override
public ModifyTableStoreFileTrackerResponse
modifyTableStoreFileTracker(RpcController controller,
ModifyTableStoreFileTrackerRequest req) throws ServiceException {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
index 6e3491a24a8..00d1b1ad29b 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.java
@@ -25,8 +25,10 @@ import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.conf.ConfigKey;
import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
@@ -89,7 +91,7 @@ public class ReopenTableRegionsProcedure
/**
* Create a new ReopenTableRegionsProcedure respecting the throttling
configuration for the table.
* First check the table descriptor, then fall back to the global
configuration. Only used in
- * ModifyTableProcedure.
+ * ModifyTableProcedure and in {@link HMaster#reopenRegionsThrottled}.
*/
public static ReopenTableRegionsProcedure throttled(final Configuration conf,
final TableDescriptor desc) {
@@ -103,6 +105,24 @@ public class ReopenTableRegionsProcedure
return new ReopenTableRegionsProcedure(desc.getTableName(), backoffMillis,
batchSizeMax);
}
+ /**
+ * Create a new ReopenTableRegionsProcedure for specific regions, respecting
the throttling
+ * configuration for the table. First check the table descriptor, then fall
back to the global
+ * configuration. Only used in {@link HMaster#reopenRegionsThrottled}.
+ */
+ public static ReopenTableRegionsProcedure throttled(final Configuration conf,
+ final TableDescriptor desc, final List<byte[]> regionNames) {
+ long backoffMillis =
Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY))
+ .map(Long::parseLong).orElseGet(() ->
conf.getLong(PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
+ PROGRESSIVE_BATCH_BACKOFF_MILLIS_DEFAULT));
+ int batchSizeMax =
Optional.ofNullable(desc.getValue(PROGRESSIVE_BATCH_SIZE_MAX_KEY))
+ .map(Integer::parseInt).orElseGet(
+ () -> conf.getInt(PROGRESSIVE_BATCH_SIZE_MAX_KEY,
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED));
+
+ return new ReopenTableRegionsProcedure(desc.getTableName(), regionNames,
backoffMillis,
+ batchSizeMax);
+ }
+
public ReopenTableRegionsProcedure() {
this(null);
}
@@ -116,12 +136,18 @@ public class ReopenTableRegionsProcedure
PROGRESSIVE_BATCH_SIZE_MAX_DISABLED);
}
- ReopenTableRegionsProcedure(final TableName tableName, long
reopenBatchBackoffMillis,
+ /**
+ * Visible for testing purposes - prefer the above methods to construct
+ */
+ public ReopenTableRegionsProcedure(final TableName tableName, long
reopenBatchBackoffMillis,
int reopenBatchSizeMax) {
this(tableName, Collections.emptyList(), reopenBatchBackoffMillis,
reopenBatchSizeMax);
}
- private ReopenTableRegionsProcedure(final TableName tableName, final
List<byte[]> regionNames,
+ /**
+ * Visible for testing purposes - prefer the above methods to construct
+ */
+ public ReopenTableRegionsProcedure(final TableName tableName, final
List<byte[]> regionNames,
long reopenBatchBackoffMillis, int reopenBatchSizeMax) {
this.tableName = tableName;
this.regionNames = regionNames;
@@ -190,67 +216,78 @@ public class ReopenTableRegionsProcedure
@Override
protected Flow executeFromState(MasterProcedureEnv env,
ReopenTableRegionsState state)
throws ProcedureSuspendedException, ProcedureYieldException,
InterruptedException {
- switch (state) {
- case REOPEN_TABLE_REGIONS_GET_REGIONS:
- if (!isTableEnabled(env)) {
- LOG.info("Table {} is disabled, give up reopening its regions",
tableName);
- return Flow.NO_MORE_STATE;
- }
- List<HRegionLocation> tableRegions =
-
env.getAssignmentManager().getRegionStates().getRegionsOfTableForReopen(tableName);
- regions = getRegionLocationsForReopen(tableRegions);
-
setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_REOPEN_REGIONS);
- return Flow.HAS_MORE_STATE;
- case REOPEN_TABLE_REGIONS_REOPEN_REGIONS:
- // if we didn't finish reopening the last batch yet, let's keep trying
until we do.
- // at that point, the batch will be empty and we can generate a new
batch
- if (!regions.isEmpty() && currentRegionBatch.isEmpty()) {
- currentRegionBatch =
regions.stream().limit(reopenBatchSize).collect(Collectors.toList());
- batchesProcessed++;
- }
- for (HRegionLocation loc : currentRegionBatch) {
- RegionStateNode regionNode =
-
env.getAssignmentManager().getRegionStates().getRegionStateNode(loc.getRegion());
- // this possible, maybe the region has already been merged or split,
see HBASE-20921
- if (regionNode == null) {
- continue;
+ try {
+ switch (state) {
+ case REOPEN_TABLE_REGIONS_GET_REGIONS:
+ if (!isTableEnabled(env)) {
+ LOG.info("Table {} is disabled, give up reopening its regions",
tableName);
+ return Flow.NO_MORE_STATE;
+ }
+ List<HRegionLocation> tableRegions =
+
env.getAssignmentManager().getRegionStates().getRegionsOfTableForReopen(tableName);
+ regions = getRegionLocationsForReopen(tableRegions);
+
setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_REOPEN_REGIONS);
+ return Flow.HAS_MORE_STATE;
+ case REOPEN_TABLE_REGIONS_REOPEN_REGIONS:
+ // if we didn't finish reopening the last batch yet, let's keep
trying until we do.
+ // at that point, the batch will be empty and we can generate a new
batch
+ if (!regions.isEmpty() && currentRegionBatch.isEmpty()) {
+ currentRegionBatch =
+
regions.stream().limit(reopenBatchSize).collect(Collectors.toList());
+ batchesProcessed++;
}
- TransitRegionStateProcedure proc;
- regionNode.lock();
- try {
- if (regionNode.getProcedure() != null) {
+ for (HRegionLocation loc : currentRegionBatch) {
+ RegionStateNode regionNode =
+
env.getAssignmentManager().getRegionStates().getRegionStateNode(loc.getRegion());
+ // this possible, maybe the region has already been merged or
split, see HBASE-20921
+ if (regionNode == null) {
continue;
}
- proc = TransitRegionStateProcedure.reopen(env,
regionNode.getRegionInfo());
- regionNode.setProcedure(proc);
- } finally {
- regionNode.unlock();
+ TransitRegionStateProcedure proc;
+ regionNode.lock();
+ try {
+ if (regionNode.getProcedure() != null) {
+ continue;
+ }
+ proc = TransitRegionStateProcedure.reopen(env,
regionNode.getRegionInfo());
+ regionNode.setProcedure(proc);
+ } finally {
+ regionNode.unlock();
+ }
+ addChildProcedure(proc);
+ regionsReopened++;
+ }
+
setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_CONFIRM_REOPENED);
+ return Flow.HAS_MORE_STATE;
+ case REOPEN_TABLE_REGIONS_CONFIRM_REOPENED:
+ // update region lists based on what's been reopened
+ regions = filterReopened(env, regions);
+ currentRegionBatch = filterReopened(env, currentRegionBatch);
+
+ // existing batch didn't fully reopen, so try to resolve that first.
+ // since this is a retry, don't do the batch backoff
+ if (!currentRegionBatch.isEmpty()) {
+ return reopenIfSchedulable(env, currentRegionBatch, false);
}
- addChildProcedure(proc);
- regionsReopened++;
- }
-
setNextState(ReopenTableRegionsState.REOPEN_TABLE_REGIONS_CONFIRM_REOPENED);
- return Flow.HAS_MORE_STATE;
- case REOPEN_TABLE_REGIONS_CONFIRM_REOPENED:
- // update region lists based on what's been reopened
- regions = filterReopened(env, regions);
- currentRegionBatch = filterReopened(env, currentRegionBatch);
-
- // existing batch didn't fully reopen, so try to resolve that first.
- // since this is a retry, don't do the batch backoff
- if (!currentRegionBatch.isEmpty()) {
- return reopenIfSchedulable(env, currentRegionBatch, false);
- }
- if (regions.isEmpty()) {
- return Flow.NO_MORE_STATE;
- }
+ if (regions.isEmpty()) {
+ return Flow.NO_MORE_STATE;
+ }
- // current batch is finished, schedule more regions
- return reopenIfSchedulable(env, regions, true);
- default:
- throw new UnsupportedOperationException("unhandled state=" + state);
+ // current batch is finished, schedule more regions
+ return reopenIfSchedulable(env, regions, true);
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ if (isRollbackSupported(state) || e instanceof DoNotRetryIOException) {
+ setFailure("master-reopen-table-regions", e);
+ } else {
+ LOG.warn("Retriable error trying to reopen regions for table={} (in
state={})", tableName,
+ state, e);
+ }
}
+ return Flow.HAS_MORE_STATE;
}
private List<HRegionLocation> filterReopened(MasterProcedureEnv env,
@@ -296,19 +333,33 @@ public class ReopenTableRegionsProcedure
}
private List<HRegionLocation>
- getRegionLocationsForReopen(List<HRegionLocation> tableRegionsForReopen) {
+ getRegionLocationsForReopen(List<HRegionLocation> tableRegionsForReopen)
throws IOException {
List<HRegionLocation> regionsToReopen = new ArrayList<>();
if (
CollectionUtils.isNotEmpty(regionNames) &&
CollectionUtils.isNotEmpty(tableRegionsForReopen)
) {
+ List<byte[]> notFoundRegions = new ArrayList<>();
+
for (byte[] regionName : regionNames) {
+ boolean found = false;
for (HRegionLocation hRegionLocation : tableRegionsForReopen) {
if (Bytes.equals(regionName,
hRegionLocation.getRegion().getRegionName())) {
regionsToReopen.add(hRegionLocation);
+ found = true;
break;
}
}
+ if (!found) {
+ notFoundRegions.add(regionName);
+ }
+ }
+
+ if (!notFoundRegions.isEmpty()) {
+ String regionNamesStr =
+
notFoundRegions.stream().map(Bytes::toStringBinary).collect(Collectors.joining(",
"));
+ throw new UnknownRegionException(
+ "The following regions do not belong to table " + tableName + ": " +
regionNamesStr);
}
} else {
regionsToReopen = tableRegionsForReopen;
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsIntegration.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsIntegration.java
new file mode 100644
index 00000000000..8c7a1ec8028
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsIntegration.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.MetricsRegionWrapperImpl;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestReopenTableRegionsIntegration {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestReopenTableRegionsIntegration.class);
+
+ private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
+ private static final TableName TABLE_NAME =
TableName.valueOf("testLazyUpdateReopen");
+ private static final byte[] CF = Bytes.toBytes("cf");
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ Configuration conf = UTIL.getConfiguration();
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testLazyUpdateThenReopenUpdatesTableDescriptorHash() throws
Exception {
+ // Step 1: Create table with column family and 3 regions
+ ColumnFamilyDescriptor cfd =
+ ColumnFamilyDescriptorBuilder.newBuilder(CF).setMaxVersions(1).build();
+
+ TableDescriptor td =
TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(cfd)
+ .setMaxFileSize(100 * 1024 * 1024L).build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 3);
+ UTIL.waitTableAvailable(TABLE_NAME);
+
+ try {
+ // Step 2: Capture initial tableDescriptorHash from all regions
+ List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ assertEquals("Expected 3 regions", 3, regions.size());
+
+ Map<byte[], String> initialHashes = new HashMap<>();
+
+ for (HRegion region : regions) {
+ String hash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ hash = wrapper.getTableDescriptorHash();
+ }
+ initialHashes.put(region.getRegionInfo().getRegionName(), hash);
+ }
+
+ // Verify all regions have same hash
+ Set<String> uniqueHashes = new HashSet<>(initialHashes.values());
+ assertEquals("All regions should have same hash", 1,
uniqueHashes.size());
+ String initialHash = uniqueHashes.iterator().next();
+
+ // Step 3: Perform lazy table descriptor update
+ ColumnFamilyDescriptor newCfd =
+
ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(5).build();
+
+ TableDescriptor newTd =
TableDescriptorBuilder.newBuilder(td).modifyColumnFamily(newCfd)
+ .setMaxFileSize(200 * 1024 * 1024L).build();
+
+ // Perform lazy update (reopenRegions = false)
+ UTIL.getAdmin().modifyTableAsync(newTd, false).get();
+
+ // Wait for modification to complete
+ UTIL.waitFor(30000, () -> {
+ try {
+ TableDescriptor currentTd =
UTIL.getAdmin().getDescriptor(TABLE_NAME);
+ return currentTd.getMaxFileSize() == 200 * 1024 * 1024L;
+ } catch (Exception e) {
+ return false;
+ }
+ });
+
+ // Step 4: Verify tableDescriptorHash has NOT changed in region metrics
+ List<HRegion> regionsAfterLazyUpdate =
UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ for (HRegion region : regionsAfterLazyUpdate) {
+ String currentHash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ currentHash = wrapper.getTableDescriptorHash();
+ }
+ assertEquals("Hash should NOT change without region reopen",
+ initialHashes.get(region.getRegionInfo().getRegionName()),
currentHash);
+ }
+
+ // Verify the table descriptor itself has changed
+ TableDescriptor currentTd = UTIL.getAdmin().getDescriptor(TABLE_NAME);
+ String newDescriptorHash = currentTd.getDescriptorHash();
+ assertNotEquals("Table descriptor should have new hash", initialHash,
newDescriptorHash);
+
+ // Step 5: Use new Admin API to reopen all regions
+ UTIL.getAdmin().reopenTableRegions(TABLE_NAME);
+
+ // Wait for all regions to be reopened
+ UTIL.waitFor(60000, () -> {
+ try {
+ List<HRegion> currentRegions =
UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ if (currentRegions.size() != 3) {
+ return false;
+ }
+
+ // Check if all regions now have the new hash
+ for (HRegion region : currentRegions) {
+ String hash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ hash = wrapper.getTableDescriptorHash();
+ }
+ if (hash.equals(initialHash)) {
+ return false;
+ }
+ }
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ });
+
+ // Step 6: Verify tableDescriptorHash HAS changed in all region metrics
+ List<HRegion> reopenedRegions =
UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ assertEquals("Should still have 3 regions", 3, reopenedRegions.size());
+
+ for (HRegion region : reopenedRegions) {
+ String currentHash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ currentHash = wrapper.getTableDescriptorHash();
+ }
+ assertNotEquals("Hash SHOULD change after region reopen", initialHash,
currentHash);
+ assertEquals("Hash should match current table descriptor",
newDescriptorHash, currentHash);
+ }
+
+ // Verify all regions show the same new hash
+ Set<String> newHashes = new HashSet<>();
+ for (HRegion region : reopenedRegions) {
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ newHashes.add(wrapper.getTableDescriptorHash());
+ }
+ }
+ assertEquals("All regions should have same new hash", 1,
newHashes.size());
+
+ } finally {
+ UTIL.deleteTable(TABLE_NAME);
+ }
+ }
+
+ @Test
+ public void testLazyUpdateThenReopenSpecificRegions() throws Exception {
+ TableName tableName = TableName.valueOf("testSpecificRegionsReopen");
+
+ // Step 1: Create table with 5 regions
+ ColumnFamilyDescriptor cfd =
+ ColumnFamilyDescriptorBuilder.newBuilder(CF).setMaxVersions(1).build();
+
+ TableDescriptor td =
TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(cfd)
+ .setMaxFileSize(100 * 1024 * 1024L).build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5);
+ UTIL.waitTableAvailable(tableName);
+
+ try {
+ // Step 2: Capture initial hashes
+ List<HRegion> regions = UTIL.getHBaseCluster().getRegions(tableName);
+ assertEquals("Expected 5 regions", 5, regions.size());
+
+ Map<byte[], String> initialHashes = new HashMap<>();
+
+ for (HRegion region : regions) {
+ String hash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ hash = wrapper.getTableDescriptorHash();
+ }
+ initialHashes.put(region.getRegionInfo().getRegionName(), hash);
+ }
+
+ String initialHash = initialHashes.values().iterator().next();
+
+ // Step 3: Perform lazy update
+ ColumnFamilyDescriptor newCfd =
+
ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(10).build();
+
+ TableDescriptor newTd =
TableDescriptorBuilder.newBuilder(td).modifyColumnFamily(newCfd)
+ .setMaxFileSize(300 * 1024 * 1024L).build();
+
+ UTIL.getAdmin().modifyTableAsync(newTd, false).get();
+
+ UTIL.waitFor(30000, () -> {
+ try {
+ TableDescriptor currentTd = UTIL.getAdmin().getDescriptor(tableName);
+ return currentTd.getMaxFileSize() == 300 * 1024 * 1024L;
+ } catch (Exception e) {
+ return false;
+ }
+ });
+
+ String newDescriptorHash =
UTIL.getAdmin().getDescriptor(tableName).getDescriptorHash();
+
+ // Step 4: Reopen only first 2 regions
+ List<RegionInfo> regionsToReopen = new ArrayList<>();
+ regionsToReopen.add(regions.get(0).getRegionInfo());
+ regionsToReopen.add(regions.get(1).getRegionInfo());
+
+ UTIL.getAdmin().reopenTableRegions(tableName, regionsToReopen);
+
+ // Wait for those regions to reopen
+ UTIL.waitFor(60000, () -> {
+ try {
+ List<HRegion> currentRegions =
UTIL.getHBaseCluster().getRegions(tableName);
+ int newHashCount = 0;
+ for (HRegion region : currentRegions) {
+ String hash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ hash = wrapper.getTableDescriptorHash();
+ }
+ if (!hash.equals(initialHash)) {
+ newHashCount++;
+ }
+ }
+ return newHashCount >= 2;
+ } catch (Exception e) {
+ return false;
+ }
+ });
+
+ // Step 5: Verify only reopened regions have new hash
+ List<HRegion> regionsAfterFirstReopen =
UTIL.getHBaseCluster().getRegions(tableName);
+ int newHashCount = 0;
+ int oldHashCount = 0;
+
+ for (HRegion region : regionsAfterFirstReopen) {
+ String currentHash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ currentHash = wrapper.getTableDescriptorHash();
+ }
+
+ if (currentHash.equals(newDescriptorHash)) {
+ newHashCount++;
+ } else if (currentHash.equals(initialHash)) {
+ oldHashCount++;
+ }
+ }
+
+ assertEquals("Should have 2 regions with new hash", 2, newHashCount);
+ assertEquals("Should have 3 regions with old hash", 3, oldHashCount);
+
+ // Step 6: Reopen remaining regions
+ List<RegionInfo> remainingRegions = new ArrayList<>();
+ for (int i = 2; i < regions.size(); i++) {
+ remainingRegions.add(regions.get(i).getRegionInfo());
+ }
+
+ UTIL.getAdmin().reopenTableRegions(tableName, remainingRegions);
+
+ // Wait for all regions to have new hash
+ UTIL.waitFor(60000, () -> {
+ try {
+ List<HRegion> currentRegions =
UTIL.getHBaseCluster().getRegions(tableName);
+ for (HRegion region : currentRegions) {
+ String hash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ hash = wrapper.getTableDescriptorHash();
+ }
+ if (!hash.equals(newDescriptorHash)) {
+ return false;
+ }
+ }
+ return true;
+ } catch (Exception e) {
+ return false;
+ }
+ });
+
+ // Step 7: Verify all regions now have new hash
+ List<HRegion> finalRegions =
UTIL.getHBaseCluster().getRegions(tableName);
+ for (HRegion region : finalRegions) {
+ String currentHash;
+ try (MetricsRegionWrapperImpl wrapper = new
MetricsRegionWrapperImpl(region)) {
+ currentHash = wrapper.getTableDescriptorHash();
+ }
+
+ assertEquals("All regions should now have new hash",
newDescriptorHash, currentHash);
+ }
+
+ } finally {
+ UTIL.deleteTable(tableName);
+ }
+ }
+}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureSpecificRegions.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureSpecificRegions.java
new file mode 100644
index 00000000000..a38ec490186
--- /dev/null
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestReopenTableRegionsProcedureSpecificRegions.java
@@ -0,0 +1,442 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestReopenTableRegionsProcedureSpecificRegions {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+
HBaseClassTestRule.forClass(TestReopenTableRegionsProcedureSpecificRegions.class);
+
+ private static final HBaseTestingUtil UTIL = new HBaseTestingUtil();
+ private static final byte[] CF = Bytes.toBytes("cf");
+
+ private static SingleProcessHBaseCluster singleProcessHBaseCluster;
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ Configuration conf = UTIL.getConfiguration();
+ conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+ singleProcessHBaseCluster = UTIL.startMiniCluster(1);
+ }
+
+ @AfterClass
+ public static void tearDown() throws Exception {
+ UTIL.shutdownMiniCluster();
+ if (Objects.nonNull(singleProcessHBaseCluster)) {
+ singleProcessHBaseCluster.close();
+ }
+ }
+
+ private ProcedureExecutor<MasterProcedureEnv> getProcExec() {
+ return UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
+ }
+
+ @Test
+ public void testInvalidRegionNamesThrowsException() throws Exception {
+ TableName tableName = TableName.valueOf("TestInvalidRegions");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
+ assertFalse("Table should have at least one region", regions.isEmpty());
+
+ List<byte[]> invalidRegionNames =
+ Collections.singletonList(Bytes.toBytes("non-existent-region-name"));
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, invalidRegionNames, 0L,
Integer.MAX_VALUE);
+
+ long procId = getProcExec().submitProcedure(proc);
+ UTIL.waitFor(60000, proc::isFailed);
+
+ Throwable cause = ProcedureTestingUtility.getExceptionCause(proc);
+ assertTrue("Expected UnknownRegionException, got: " +
cause.getClass().getName(),
+ cause instanceof UnknownRegionException);
+ assertTrue("Error message should contain region name",
+ cause.getMessage().contains("non-existent-region-name"));
+ assertTrue("Error message should contain table name",
+ cause.getMessage().contains(tableName.getNameAsString()));
+ }
+ }
+
+ @Test
+ public void testMixedValidInvalidRegions() throws Exception {
+ TableName tableName = TableName.valueOf("TestMixedRegions");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<RegionInfo> actualRegions = UTIL.getAdmin().getRegions(tableName);
+ assertFalse("Table should have at least one region",
actualRegions.isEmpty());
+
+ List<byte[]> mixedRegionNames = new ArrayList<>();
+ mixedRegionNames.add(actualRegions.get(0).getRegionName());
+ mixedRegionNames.add(Bytes.toBytes("invalid-region-1"));
+ mixedRegionNames.add(Bytes.toBytes("invalid-region-2"));
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, mixedRegionNames, 0L,
Integer.MAX_VALUE);
+
+ long procId = getProcExec().submitProcedure(proc);
+ UTIL.waitFor(60000, proc::isFailed);
+
+ Throwable cause = ProcedureTestingUtility.getExceptionCause(proc);
+ assertTrue("Expected UnknownRegionException", cause instanceof
UnknownRegionException);
+ assertTrue("Error message should contain first invalid region",
+ cause.getMessage().contains("invalid-region-1"));
+ assertTrue("Error message should contain second invalid region",
+ cause.getMessage().contains("invalid-region-2"));
+ }
+ }
+
+ @Test
+ public void testSpecificRegionsReopenWithThrottling() throws Exception {
+ TableName tableName = TableName.valueOf("TestSpecificThrottled");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF))
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"100")
+ .setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY,
"2").build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5);
+
+ List<RegionInfo> allRegions = UTIL.getAdmin().getRegions(tableName);
+ assertEquals(5, allRegions.size());
+
+ List<byte[]> specificRegionNames =
+ allRegions.subList(0,
3).stream().map(RegionInfo::getRegionName).collect(Collectors.toList());
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure.throttled(
+ UTIL.getConfiguration(), UTIL.getAdmin().getDescriptor(tableName),
specificRegionNames);
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should succeed", proc.isFailed());
+ assertEquals("Should reopen exactly 3 regions", 3,
proc.getRegionsReopened());
+ assertTrue("Should process multiple batches with batch size 2",
+ proc.getBatchesProcessed() >= 2);
+ }
+
+ @Test
+ public void testEmptyRegionListReopensAll() throws Exception {
+ TableName tableName = TableName.valueOf("TestEmptyList");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5);
+
+ List<RegionInfo> allRegions = UTIL.getAdmin().getRegions(tableName);
+ assertEquals(5, allRegions.size());
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName));
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should succeed", proc.isFailed());
+ assertEquals("Should reopen all 5 regions", 5, proc.getRegionsReopened());
+ }
+
+ @Test
+ public void testDisabledTableSkipsReopen() throws Exception {
+ TableName tableName = TableName.valueOf("TestDisabledTable");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+ UTIL.getAdmin().disableTable(tableName);
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName));
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should succeed", proc.isFailed());
+ assertEquals("Should not reopen any regions for disabled table", 0,
+ proc.getRegionsReopened());
+ }
+ }
+
+ @Test
+ public void testReopenRegionsThrottledWithLargeTable() throws Exception {
+ TableName tableName = TableName.valueOf("TestLargeTable");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF))
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"50")
+ .setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY,
"3").build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"),
10);
+
+ List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
+ assertEquals(10, regions.size());
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName));
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should succeed", proc.isFailed());
+ assertEquals("Should reopen all 10 regions", 10,
proc.getRegionsReopened());
+ assertTrue("Should process multiple batches", proc.getBatchesProcessed()
>= 4);
+ }
+
+ @Test
+ public void testConfigurationPrecedence() throws Exception {
+ TableName tableName = TableName.valueOf("TestConfigPrecedence");
+
+ Configuration conf = UTIL.getConfiguration();
+
conf.setLong(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
1000);
+ conf.setInt(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY, 5);
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF))
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"2000")
+ .setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY,
"2").build();
+
+ UTIL.getAdmin().createTable(td);
+
+ ReopenTableRegionsProcedure proc =
+ ReopenTableRegionsProcedure.throttled(conf,
UTIL.getAdmin().getDescriptor(tableName));
+
+ assertEquals("Table descriptor config should override global config", 2000,
+ proc.getReopenBatchBackoffMillis());
+ }
+
+ @Test
+ public void testThrottledVsUnthrottled() throws Exception {
+ TableName tableName = TableName.valueOf("TestThrottledVsUnthrottled");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF))
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"1000")
+ .setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY,
"2").build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5);
+
+ List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
+ List<byte[]> regionNames =
+
regions.stream().map(RegionInfo::getRegionName).collect(Collectors.toList());
+
+ ReopenTableRegionsProcedure unthrottledProc =
+ new ReopenTableRegionsProcedure(tableName, regionNames);
+ assertEquals("Unthrottled should use default (0ms)", 0,
+ unthrottledProc.getReopenBatchBackoffMillis());
+
+ ReopenTableRegionsProcedure throttledProc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName), regionNames);
+ assertEquals("Throttled should use table config (1000ms)", 1000,
+ throttledProc.getReopenBatchBackoffMillis());
+ }
+
+ @Test
+ public void testExceptionInProcedureExecution() throws Exception {
+ TableName tableName = TableName.valueOf("TestExceptionInExecution");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<byte[]> invalidRegionNames =
+ Collections.singletonList(Bytes.toBytes("nonexistent-region"));
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, invalidRegionNames, 0L,
Integer.MAX_VALUE);
+
+ long procId = getProcExec().submitProcedure(proc);
+ UTIL.waitFor(60000, () -> getProcExec().isFinished(procId));
+
+ Procedure<?> result = getProcExec().getResult(procId);
+ assertTrue("Procedure should have failed", result.isFailed());
+
+ Throwable cause = ProcedureTestingUtility.getExceptionCause(result);
+ assertTrue("Should be UnknownRegionException", cause instanceof
UnknownRegionException);
+ }
+ }
+
+ @Test
+ public void testSerializationWithRegionNames() throws Exception {
+ TableName tableName = TableName.valueOf("TestSerialization");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
+ List<byte[]> regionNames =
+
regions.stream().map(RegionInfo::getRegionName).collect(Collectors.toList());
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, regionNames, 500L, 3);
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertEquals("TableName should be preserved", tableName,
proc.getTableName());
+ assertEquals("Backoff should be preserved", 500L,
proc.getReopenBatchBackoffMillis());
+ }
+ }
+
+ @Test
+ public void testAllRegionsWithValidNames() throws Exception {
+ TableName tableName = TableName.valueOf("TestAllValidRegions");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<RegionInfo> actualRegions = UTIL.getAdmin().getRegions(tableName);
+ assertFalse("Table should have regions", actualRegions.isEmpty());
+
+ List<byte[]> validRegionNames =
+
actualRegions.stream().map(RegionInfo::getRegionName).collect(Collectors.toList());
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, validRegionNames, 0L,
Integer.MAX_VALUE);
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should succeed with all valid regions",
proc.isFailed());
+ assertEquals("Should reopen all specified regions", actualRegions.size(),
+ proc.getRegionsReopened());
+ }
+ }
+
+ @Test
+ public void testSingleInvalidRegion() throws Exception {
+ TableName tableName = TableName.valueOf("TestSingleInvalid");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<byte[]> invalidRegionNames =
+ Collections.singletonList(Bytes.toBytes("totally-fake-region"));
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, invalidRegionNames, 0L,
Integer.MAX_VALUE);
+
+ long procId = getProcExec().submitProcedure(proc);
+ UTIL.waitFor(60000, proc::isFailed);
+
+ Throwable cause = ProcedureTestingUtility.getExceptionCause(proc);
+ assertTrue("Expected UnknownRegionException", cause instanceof
UnknownRegionException);
+ assertTrue("Error message should list the invalid region",
+ cause.getMessage().contains("totally-fake-region"));
+ }
+ }
+
+ @Test
+ public void testRecoveryAfterValidationFailure() throws Exception {
+ TableName tableName = TableName.valueOf("TestRecoveryValidation");
+ try (Table ignored = UTIL.createTable(tableName, CF)) {
+
+ List<byte[]> invalidRegionNames =
+ Collections.singletonList(Bytes.toBytes("invalid-for-recovery"));
+
+ ReopenTableRegionsProcedure proc =
+ new ReopenTableRegionsProcedure(tableName, invalidRegionNames, 0L,
Integer.MAX_VALUE);
+
+ ProcedureExecutor<MasterProcedureEnv> procExec = getProcExec();
+ long procId = procExec.submitProcedure(proc);
+
+ UTIL.waitFor(60000, () -> procExec.isFinished(procId));
+
+ Procedure<?> result = procExec.getResult(procId);
+ assertTrue("Procedure should fail validation", result.isFailed());
+
+ Throwable cause = ProcedureTestingUtility.getExceptionCause(result);
+ assertTrue("Should be UnknownRegionException", cause instanceof
UnknownRegionException);
+ assertTrue("Error should mention the invalid region",
+ cause.getMessage().contains("invalid-for-recovery"));
+ }
+ }
+
+ @Test
+ public void testEmptyTableWithNoRegions() throws Exception {
+ TableName tableName = TableName.valueOf("TestEmptyTable");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF)).build();
+
+ UTIL.getAdmin().createTable(td);
+
+ List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
+ int regionCount = regions.size();
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName));
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should complete successfully even with no regions",
proc.isFailed());
+ assertEquals("Should handle empty table gracefully", regionCount,
proc.getRegionsReopened());
+ }
+
+ @Test
+ public void testConfigChangeDoesNotAffectRunningProcedure() throws Exception
{
+ TableName tableName = TableName.valueOf("TestConfigChange");
+
+ TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(CF))
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"1000")
+ .setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_SIZE_MAX_KEY,
"2").build();
+
+ UTIL.getAdmin().createTable(td, Bytes.toBytes("a"), Bytes.toBytes("z"), 5);
+
+ ReopenTableRegionsProcedure proc = ReopenTableRegionsProcedure
+ .throttled(UTIL.getConfiguration(),
UTIL.getAdmin().getDescriptor(tableName));
+
+ assertEquals("Initial config should be 1000ms", 1000L,
proc.getReopenBatchBackoffMillis());
+
+ TableDescriptor modifiedTd = TableDescriptorBuilder.newBuilder(td)
+
.setValue(ReopenTableRegionsProcedure.PROGRESSIVE_BATCH_BACKOFF_MILLIS_KEY,
"5000").build();
+ UTIL.getAdmin().modifyTable(modifiedTd);
+
+ assertEquals("Running procedure should keep original config", 1000L,
+ proc.getReopenBatchBackoffMillis());
+
+ long procId = getProcExec().submitProcedure(proc);
+ ProcedureTestingUtility.waitProcedure(getProcExec(), procId);
+
+ assertFalse("Procedure should complete successfully", proc.isFailed());
+ }
+}
diff --git
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
index a59b2966b89..ad5cd5eda79 100644
---
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
+++
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
@@ -431,6 +431,17 @@ public class VerifyingRSGroupAdmin implements Admin,
Closeable {
return admin.modifyTableAsync(td, reopenRegions);
}
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName) throws
IOException {
+ return admin.reopenTableRegionsAsync(tableName);
+ }
+
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName,
List<RegionInfo> regions)
+ throws IOException {
+ return admin.reopenTableRegionsAsync(tableName, regions);
+ }
+
public void shutdown() throws IOException {
admin.shutdown();
}
diff --git
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index 3d5a7e502e0..76a8b41481b 100644
---
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -750,6 +750,16 @@ public class ThriftAdmin implements Admin {
throw new NotImplementedException("modifyTableAsync not supported in
ThriftAdmin");
}
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName) {
+ throw new NotImplementedException("reopenTableRegionsAsync not supported
in ThriftAdmin");
+ }
+
+ @Override
+ public Future<Void> reopenTableRegionsAsync(TableName tableName,
List<RegionInfo> regions) {
+ throw new NotImplementedException("reopenTableRegionsAsync not supported
in ThriftAdmin");
+ }
+
@Override
public void shutdown() {
throw new NotImplementedException("shutdown not supported in ThriftAdmin");