This is an automated email from the ASF dual-hosted git repository.

andor pushed a commit to branch HBASE-29081_rebased
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 76ee9f12a024b612c1e736d6b4bbcbc09a9004fd
Author: Anuj Sharma <[email protected]>
AuthorDate: Tue Sep 16 00:49:05 2025 +0530

    HBASE-29328: Implement new HBase command: refresh_hfiles (#7149)
    
    * CDPD-84463 Add ruby shell commands for refresh_hfiles
    
    * [CDPD-84466] Add hbase-client API code to refresh_hfiles
    
    * CDPD-84465 Add protobuf messages for refresh_hfiles
    
    * Add refreshHfile function in master rpc service and make call to its 
function
    
    * CDPD-82553 Add function in Region Server to refresh Hfiles
    
    * Add nonceGroup and nonce for the Master RPC request
    
    * Refactor code with proper name for function
    
    * Add region Server Procedure and callables
    
    * Remove the refreshHFiles function which was intended to call as RS RPC
    
    As we will be calling it through procedure framework
    
    * Remove the unwanted comments
    
    * Add line mistakenly removed in admin.proto
    
    * Correct the wrong comment in Event Types
    
    * Apply Spotless
    
    * Address the review comments having small code changes
    
    * Add separate function for master service caller
    
    * Add retry mechanism for refresh_hfiles, send exception if retry threshold 
get breached
    
    Also handle scenario in case the region is not online
    
    * Add tablename into RefreshHFilesTableProcedureStateData
    
    * CDPD-88507, CDPD-88508 Add procdure suspport for namespace as parameter 
and no parameter
    
    * nit: Add meaningful name to method and remove comments
    
    * Return exception if user is updating system table or reserved namespaces
    
    * Send exception if tablename or namespace is invalid
    
    Also remove redundant TODOs
    
    * Add gatekeeper method to prevent execution because command before master 
initialize
    
    * Return exception in case both TABLE_NAME and NAMESPACE is provided in 
arguments
    
    * Run Spotless
    
    * Add unit tests for refreshHfiles Procedure and admin calls
    
    * Make the newly added HFiles available for reading immediately
    
    * Revert "Make the newly added HFiles available for reading immediately"
    
    This reverts commit c25cc9a070ab240a406756413d33236e98a7a630.
    
    * Address review comments
    
    * Create test base class to avoid code duplication
    
    * Add integration test which enable readonly mode before refresh
    
    * Added test rule and rebased the upstream
    
    * Apply spotless
---
 .../java/org/apache/hadoop/hbase/client/Admin.java |  23 +++
 .../hadoop/hbase/client/AdminOverAsyncAdmin.java   |  15 ++
 .../org/apache/hadoop/hbase/client/AsyncAdmin.java |  15 ++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java       |  15 ++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java    |  50 +++++
 .../src/main/protobuf/server/master/Master.proto   |  14 ++
 .../protobuf/server/master/MasterProcedure.proto   |  19 ++
 .../apache/hadoop/hbase/executor/EventType.java    |   8 +-
 .../apache/hadoop/hbase/executor/ExecutorType.java |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java    |  74 ++++++++
 .../hadoop/hbase/master/MasterRpcServices.java     |  20 ++
 .../procedure/RefreshHFilesRegionProcedure.java    | 208 +++++++++++++++++++++
 .../procedure/RefreshHFilesTableProcedure.java     | 165 ++++++++++++++++
 .../master/procedure/TableProcedureInterface.java  |   3 +-
 .../hadoop/hbase/master/procedure/TableQueue.java  |   1 +
 .../hadoop/hbase/regionserver/HRegionServer.java   |   4 +
 .../hbase/regionserver/RefreshHFilesCallable.java  |  69 +++++++
 .../apache/hadoop/hbase/TestRefreshHFilesBase.java | 161 ++++++++++++++++
 .../hbase/client/TestRefreshHFilesFromClient.java  | 139 ++++++++++++++
 .../procedure/TestRefreshHFilesProcedure.java      | 116 ++++++++++++
 ...TestRefreshHFilesProcedureWithReadOnlyConf.java | 124 ++++++++++++
 .../hbase/rsgroup/VerifyingRSGroupAdmin.java       |  15 ++
 hbase-shell/src/main/ruby/hbase/admin.rb           |  20 ++
 hbase-shell/src/main/ruby/shell.rb                 |   1 +
 .../src/main/ruby/shell/commands/refresh_hfiles.rb |  64 +++++++
 .../hadoop/hbase/thrift2/client/ThriftAdmin.java   |  15 ++
 26 files changed, 1358 insertions(+), 3 deletions(-)

diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 8622e1c8877..7370983d01a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2708,6 +2708,29 @@ public interface Admin extends Abortable, Closeable {
    */
   Long refreshMeta() throws IOException;
 
+  /**
+   * Refresh HFiles for the table
+   * @param tableName table to refresh HFiles for
+   * @return ID of the procedure started for refreshing HFiles
+   * @throws IOException if a remote or network exception occurs
+   */
+  Long refreshHFiles(final TableName tableName) throws IOException;
+
+  /**
+   * Refresh HFiles for all the tables under given namespace
+   * @param namespace Namespace for which we should call refresh HFiles for 
all tables under it
+   * @return ID of the procedure started for refreshing HFiles
+   * @throws IOException if a remote or network exception occurs
+   */
+  Long refreshHFiles(final String namespace) throws IOException;
+
+  /**
+   * Refresh HFiles for all the tables
+   * @return ID of the procedure started for refreshing HFiles
+   * @throws IOException if a remote or network exception occurs
+   */
+  Long refreshHFiles() throws IOException;
+
   @InterfaceAudience.Private
   void restoreBackupSystemTable(String snapshotName) throws IOException;
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index 37d46feb41a..ca0ffb67329 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -1158,6 +1158,21 @@ class AdminOverAsyncAdmin implements Admin {
     return get(admin.refreshMeta());
   }
 
+  @Override
+  public Long refreshHFiles(final TableName tableName) throws IOException {
+    return get(admin.refreshHFiles(tableName));
+  }
+
+  @Override
+  public Long refreshHFiles(final String namespace) throws IOException {
+    return get(admin.refreshHFiles(namespace));
+  }
+
+  @Override
+  public Long refreshHFiles() throws IOException {
+    return get(admin.refreshHFiles());
+  }
+
   @Override
   public void restoreBackupSystemTable(String snapshotName) throws IOException 
{
     get(admin.restoreBackupSystemTable(snapshotName));
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index e1d3aadfed3..114d103ce03 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1895,6 +1895,21 @@ public interface AsyncAdmin {
    */
   CompletableFuture<Long> refreshMeta();
 
+  /**
+   * Refresh HFiles for the table
+   */
+  CompletableFuture<Long> refreshHFiles(final TableName tableName);
+
+  /**
+   * Refresh HFiles for all the tables under given namespace
+   */
+  CompletableFuture<Long> refreshHFiles(final String namespace);
+
+  /**
+   * Refresh HFiles for all the tables
+   */
+  CompletableFuture<Long> refreshHFiles();
+
   @InterfaceAudience.Private
   CompletableFuture<Void> restoreBackupSystemTable(String snapshotName);
 }
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 26abe68402c..4cf02d91f0e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -1026,6 +1026,21 @@ class AsyncHBaseAdmin implements AsyncAdmin {
     return wrap(rawAdmin.refreshMeta());
   }
 
+  @Override
+  public CompletableFuture<Long> refreshHFiles(final TableName tableName) {
+    return wrap(rawAdmin.refreshHFiles(tableName));
+  }
+
+  @Override
+  public CompletableFuture<Long> refreshHFiles(final String namespace) {
+    return wrap(rawAdmin.refreshHFiles(namespace));
+  }
+
+  @Override
+  public CompletableFuture<Long> refreshHFiles() {
+    return wrap(rawAdmin.refreshHFiles());
+  }
+
   @Override
   public CompletableFuture<Void> restoreBackupSystemTable(String snapshotName) 
{
     return wrap(rawAdmin.restoreBackupSystemTable(snapshotName));
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 9454d282653..58409251cef 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -263,6 +263,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineReg
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest;
@@ -4700,6 +4702,14 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
         new RestoreBackupSystemTableProcedureBiConsumer());
   }
 
+  private CompletableFuture<Long> internalRefershHFiles(RefreshHFilesRequest 
request) {
+    return this.<Long> newMasterCaller()
+      .action((controller, stub) -> this.<RefreshHFilesRequest, 
RefreshHFilesResponse, Long> call(
+        controller, stub, request, MasterService.Interface::refreshHFiles,
+        RefreshHFilesResponse::getProcId))
+      .call();
+  }
+
   @Override
   public CompletableFuture<Long> refreshMeta() {
     RefreshMetaRequest.Builder request = RefreshMetaRequest.newBuilder();
@@ -4710,4 +4720,44 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
         RefreshMetaResponse::getProcId))
       .call();
   }
+
+  @Override
+  public CompletableFuture<Long> refreshHFiles(final TableName tableName) {
+    if (tableName.isSystemTable()) {
+      LOG.warn("Refreshing HFiles for system table {} is not allowed", 
tableName.getNameAsString());
+      throw new IllegalArgumentException(
+        "Not allowed to refresh HFiles for system table '" + 
tableName.getNameAsString() + "'");
+    }
+    // Request builder
+    RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
+    request.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
+    return internalRefershHFiles(request.build());
+  }
+
+  @Override
+  public CompletableFuture<Long> refreshHFiles(final String namespace) {
+    if (
+      namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)
+        || namespace.equals(NamespaceDescriptor.BACKUP_NAMESPACE_NAME_STR)
+    ) {
+      LOG.warn("Refreshing HFiles for reserve namespace {} is not allowed", 
namespace);
+      throw new IllegalArgumentException(
+        "Not allowed to refresh HFiles for reserve namespace '" + namespace + 
"'");
+    }
+    // Request builder
+    RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
+    request.setNamespace(namespace);
+    request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
+    return internalRefershHFiles(request.build());
+  }
+
+  @Override
+  public CompletableFuture<Long> refreshHFiles() {
+    // Request builder
+    RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
+    // Set nonce
+    request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
+    return internalRefershHFiles(request.build());
+  }
 }
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
index 40f6a1518f1..c774a93605a 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto
@@ -807,6 +807,17 @@ message ModifyColumnStoreFileTrackerResponse {
   optional uint64 proc_id = 1;
 }
 
+message RefreshHFilesRequest {
+  optional TableName table_name = 1;
+  optional string namespace = 2;
+  optional uint64 nonce_group = 3 [default = 0];
+  optional uint64 nonce = 4 [default = 0];
+}
+
+message RefreshHFilesResponse {
+  optional uint64 proc_id = 1;
+}
+
 message FlushMasterStoreRequest {}
 message FlushMasterStoreResponse {}
 
@@ -1314,6 +1325,9 @@ service MasterService {
 
   rpc RefreshMeta(RefreshMetaRequest)
     returns(RefreshMetaResponse);
+
+  rpc RefreshHFiles(RefreshHFilesRequest)
+    returns(RefreshHFilesResponse);
 }
 
 // HBCK Service definitions.
diff --git 
a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto 
b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
index aa79ff474c3..56086aed29e 100644
--- 
a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
+++ 
b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto
@@ -876,3 +876,22 @@ enum RefreshMetaState {
 
 message RefreshMetaStateData {
 }
+
+enum RefreshHFilesTableProcedureState {
+  REFRESH_HFILES_PREPARE = 1;
+  REFRESH_HFILES_REFRESH_REGION = 2;
+  REFRESH_HFILES_FINISH = 3;
+}
+
+message RefreshHFilesTableProcedureStateData {
+  optional TableName table_name = 1;
+  optional string namespace_name = 2;
+}
+
+message RefreshHFilesRegionProcedureStateData {
+  required RegionInfo region = 1;
+}
+
+message RefreshHFilesRegionParameter {
+  required RegionInfo region = 1;
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index fee132b7a4d..90985923b72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -309,7 +309,13 @@ public enum EventType {
    * RS log roll.<br>
    * RS_LOG_ROLL
    */
-  RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL);
+  RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL),
+
+  /**
+   * RS refresh hfiles for a region.<br>
+   * RS_REFRESH_HFILES
+   */
+  RS_REFRESH_HFILES(92, ExecutorType.RS_REFRESH_HFILES);
 
   private final int code;
   private final ExecutorType executor;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index 668cd701c0d..e2d357fbee6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -57,7 +57,8 @@ public enum ExecutorType {
   RS_SNAPSHOT_OPERATIONS(36),
   RS_FLUSH_OPERATIONS(37),
   RS_RELOAD_QUOTAS_OPERATIONS(38),
-  RS_LOG_ROLL(39);
+  RS_LOG_ROLL(39),
+  RS_REFRESH_HFILES(39);
 
   ExecutorType(int value) {
   }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index caca0fcef73..a6b64bd1021 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -84,6 +84,7 @@ import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.PleaseRestartMasterException;
 import org.apache.hadoop.hbase.RegionMetrics;
@@ -170,6 +171,7 @@ import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
+import org.apache.hadoop.hbase.master.procedure.RefreshHFilesTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.RefreshMetaProcedure;
 import org.apache.hadoop.hbase.master.procedure.ReloadQuotasProcedure;
 import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
@@ -4668,4 +4670,76 @@ public class HMaster extends 
HBaseServerBase<MasterRpcServices> implements Maste
         }
       });
   }
+
+  public Long refreshHfiles(final TableName tableName, final long nonceGroup, 
final long nonce)
+    throws IOException {
+    checkInitialized();
+
+    if (!tableDescriptors.exists(tableName)) {
+      LOG.info("RefreshHfilesProcedure failed because table {} does not exist",
+        tableName.getNameAsString());
+      throw new TableNotFoundException(tableName);
+    }
+
+    return MasterProcedureUtil
+      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, 
nonceGroup, nonce) {
+        @Override
+        protected void run() throws IOException {
+          LOG.info("Submitting RefreshHfilesTableProcedure for table {}",
+            tableName.getNameAsString());
+          submitProcedure(
+            new 
RefreshHFilesTableProcedure(procedureExecutor.getEnvironment(), tableName));
+        }
+
+        @Override
+        protected String getDescription() {
+          return "RefreshHfilesProcedure for a table";
+        }
+      });
+  }
+
+  public Long refreshHfiles(final String namespace, final long nonceGroup, 
final long nonce)
+    throws IOException {
+    checkInitialized();
+
+    try {
+      this.clusterSchemaService.getNamespace(namespace);
+    } catch (IOException e) {
+      LOG.info("RefreshHfilesProcedure failed because namespace {} does not 
exist", namespace);
+      throw new NamespaceNotFoundException(namespace);
+    }
+
+    return MasterProcedureUtil
+      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, 
nonceGroup, nonce) {
+        @Override
+        protected void run() throws IOException {
+          LOG.info("Submitting RefreshHfilesProcedure for namespace {}", 
namespace);
+          submitProcedure(
+            new 
RefreshHFilesTableProcedure(procedureExecutor.getEnvironment(), namespace));
+        }
+
+        @Override
+        protected String getDescription() {
+          return "RefreshHfilesProcedure for namespace";
+        }
+      });
+  }
+
+  public Long refreshHfiles(final long nonceGroup, final long nonce) throws 
IOException {
+    checkInitialized();
+
+    return MasterProcedureUtil
+      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, 
nonceGroup, nonce) {
+        @Override
+        protected void run() throws IOException {
+          LOG.info("Submitting RefreshHfilesProcedure for all tables");
+          submitProcedure(new 
RefreshHFilesTableProcedure(procedureExecutor.getEnvironment()));
+        }
+
+        @Override
+        protected String getDescription() {
+          return "RefreshHfilesProcedure for all tables";
+        }
+      });
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 472a8f86398..a020378d7e2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -3753,4 +3753,24 @@ public class MasterRpcServices extends 
HBaseRpcServicesBase<HMaster>
       throw new ServiceException(ioe);
     }
   }
+
+  @Override
+  public MasterProtos.RefreshHFilesResponse refreshHFiles(RpcController 
controller,
+    MasterProtos.RefreshHFilesRequest request) throws ServiceException {
+    try {
+      Long procId;
+      if (request.hasTableName()) {
+        procId = 
server.refreshHfiles(ProtobufUtil.toTableName(request.getTableName()),
+          request.getNonceGroup(), request.getNonce());
+      } else if (request.hasNamespace()) {
+        procId =
+          server.refreshHfiles(request.getNamespace(), 
request.getNonceGroup(), request.getNonce());
+      } else {
+        procId = server.refreshHfiles(request.getNonceGroup(), 
request.getNonce());
+      }
+      return 
MasterProtos.RefreshHFilesResponse.newBuilder().setProcId(procId).build();
+    } catch (IOException ioe) {
+      throw new ServiceException(ioe);
+    }
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java
new file mode 100644
index 00000000000..0680826a4c2
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import java.util.Optional;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStateNode;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.procedure2.FailedRemoteDispatchException;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.regionserver.RefreshHFilesCallable;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+
+/**
+ * A master-side procedure that handles refreshing HFiles (store files) for a 
specific region in
+ * HBase. It performs remote procedure dispatch to the RegionServer hosting 
the region and manages
+ * retries, suspensions, and timeouts as needed. This procedure ensures safe 
execution by verifying
+ * the region state, handling remote operation results, and applying retry 
mechanisms in case of
+ * failures. It gives the call to {@link RefreshHFilesCallable} which gets 
executed on region
+ * server.
+ */
+
[email protected]
+public class RefreshHFilesRegionProcedure extends Procedure<MasterProcedureEnv>
+  implements TableProcedureInterface,
+  RemoteProcedureDispatcher.RemoteProcedure<MasterProcedureEnv, ServerName> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesRegionProcedure.class);
+  private RegionInfo region;
+  private ProcedureEvent<?> event;
+  private boolean dispatched;
+  private boolean succ;
+  private RetryCounter retryCounter;
+
+  public RefreshHFilesRegionProcedure() {
+  }
+
+  public RefreshHFilesRegionProcedure(RegionInfo region) {
+    this.region = region;
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.class);
+    this.region = ProtobufUtil.toRegionInfo(data.getRegion());
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.Builder 
builder =
+      MasterProcedureProtos.RefreshHFilesRegionProcedureStateData.newBuilder();
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected boolean abort(MasterProcedureEnv env) {
+    return false;
+  }
+
+  @Override
+  protected void rollback(MasterProcedureEnv env) throws IOException, 
InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  private void setTimeoutForSuspend(MasterProcedureEnv env, String reason) {
+    if (retryCounter == null) {
+      retryCounter = 
ProcedureUtil.createRetryCounter(env.getMasterConfiguration());
+    }
+    long backoff = retryCounter.getBackoffTimeAndIncrementAttempts();
+    LOG.warn("{} can not run currently because {}, wait {} ms to retry", this, 
reason, backoff);
+    setTimeout(Math.toIntExact(backoff));
+    setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+    skipPersistence();
+  }
+
+  @Override
+  protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+    throws ProcedureYieldException, ProcedureSuspendedException, 
InterruptedException {
+    if (dispatched) {
+      if (succ) {
+        return null;
+      }
+      dispatched = false;
+    }
+
+    RegionStates regionStates = env.getAssignmentManager().getRegionStates();
+    RegionStateNode regionNode = regionStates.getRegionStateNode(region);
+
+    if (regionNode.getProcedure() != null) {
+      setTimeoutForSuspend(env, String.format("region %s has a TRSP attached 
%s",
+        region.getRegionNameAsString(), regionNode.getProcedure()));
+      throw new ProcedureSuspendedException();
+    }
+
+    if (!regionNode.isInState(RegionState.State.OPEN)) {
+      LOG.warn("State of region {} is not OPEN. Skip {} ...", region, this);
+      setTimeoutForSuspend(env, String.format("region state of %s is %s",
+        region.getRegionNameAsString(), regionNode.getState()));
+      throw new ProcedureSuspendedException();
+    }
+
+    ServerName targetServer = regionNode.getRegionLocation();
+    if (targetServer == null) {
+      setTimeoutForSuspend(env,
+        String.format("target server of region %s is null", 
region.getRegionNameAsString()));
+      throw new ProcedureSuspendedException();
+    }
+
+    try {
+      env.getRemoteDispatcher().addOperationToNode(targetServer, this);
+      dispatched = true;
+      event = new ProcedureEvent<>(this);
+      event.suspendIfNotReady(this);
+      throw new ProcedureSuspendedException();
+    } catch (FailedRemoteDispatchException e) {
+      setTimeoutForSuspend(env, "Failed send request to " + targetServer);
+      throw new ProcedureSuspendedException();
+    }
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.REFRESH_HFILES;
+  }
+
+  @Override
+  public TableName getTableName() {
+    return region.getTable();
+  }
+
+  @Override
+  public void remoteOperationFailed(MasterProcedureEnv env, 
RemoteProcedureException error) {
+    complete(env, error);
+  }
+
+  @Override
+  public void remoteOperationCompleted(MasterProcedureEnv env) {
+    complete(env, null);
+  }
+
+  @Override
+  public void remoteCallFailed(MasterProcedureEnv env, ServerName serverName, 
IOException e) {
+    complete(env, e);
+  }
+
+  private void complete(MasterProcedureEnv env, Throwable error) {
+    if (isFinished()) {
+      LOG.info("This procedure {} is already finished. Skip the rest of the 
processes",
+        this.getProcId());
+      return;
+    }
+    if (event == null) {
+      LOG.warn("procedure event for {} is null, maybe the procedure is created 
when recovery",
+        getProcId());
+      return;
+    }
+    if (error == null) {
+      succ = true;
+    }
+    event.wake(env.getProcedureScheduler());
+    event = null;
+  }
+
+  @Override
+  public Optional<RemoteProcedureDispatcher.RemoteOperation> 
remoteCallBuild(MasterProcedureEnv env,
+    ServerName serverName) {
+    MasterProcedureProtos.RefreshHFilesRegionParameter.Builder builder =
+      MasterProcedureProtos.RefreshHFilesRegionParameter.newBuilder();
+    builder.setRegion(ProtobufUtil.toRegionInfo(region));
+    return Optional
+      .of(new RSProcedureDispatcher.ServerOperation(this, getProcId(), 
RefreshHFilesCallable.class,
+        builder.build().toByteArray(), 
env.getMasterServices().getMasterActiveTime()));
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesTableProcedure.java
new file mode 100644
index 00000000000..cb225d23b0d
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesTableProcedure.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshHFilesTableProcedureState;
+
[email protected]
+public class RefreshHFilesTableProcedure
+  extends AbstractStateMachineTableProcedure<RefreshHFilesTableProcedureState> 
{
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesTableProcedure.class);
+
+  private TableName tableName;
+  private String namespaceName;
+
+  public RefreshHFilesTableProcedure() {
+    super();
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env) {
+    super(env);
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env, TableName 
tableName) {
+    super(env);
+    this.tableName = tableName;
+  }
+
+  public RefreshHFilesTableProcedure(MasterProcedureEnv env, String 
namespaceName) {
+    super(env);
+    this.namespaceName = namespaceName;
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.REFRESH_HFILES;
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    super.serializeStateData(serializer);
+    MasterProcedureProtos.RefreshHFilesTableProcedureStateData.Builder builder 
=
+      MasterProcedureProtos.RefreshHFilesTableProcedureStateData.newBuilder();
+    if (tableName != null && namespaceName == null) {
+      builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    } else if (tableName == null && namespaceName != null) {
+      builder.setNamespaceName(namespaceName);
+    }
+    serializer.serialize(builder.build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) 
throws IOException {
+    super.deserializeStateData(serializer);
+    MasterProcedureProtos.RefreshHFilesTableProcedureStateData data =
+      
serializer.deserialize(MasterProcedureProtos.RefreshHFilesTableProcedureStateData.class);
+    if (data.hasTableName() && !data.hasNamespaceName()) {
+      this.tableName = ProtobufUtil.toTableName(data.getTableName());
+    } else if (!data.hasTableName() && data.hasNamespaceName()) {
+      this.namespaceName = data.getNamespaceName();
+    }
+  }
+
+  @Override
+  public TableName getTableName() {
+    if (tableName != null && namespaceName == null) {
+      return tableName;
+    }
+    return DUMMY_NAMESPACE_TABLE_NAME;
+  }
+
+  @Override
+  protected RefreshHFilesTableProcedureState getInitialState() {
+    return RefreshHFilesTableProcedureState.REFRESH_HFILES_PREPARE;
+  }
+
+  @Override
+  protected int getStateId(RefreshHFilesTableProcedureState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected RefreshHFilesTableProcedureState getState(int stateId) {
+    return RefreshHFilesTableProcedureState.forNumber(stateId);
+  }
+
+  @Override
+  protected void rollbackState(MasterProcedureEnv env, 
RefreshHFilesTableProcedureState state)
+    throws IOException, InterruptedException {
+    // Refresh HFiles is idempotent operation hence rollback is not needed
+    LOG.trace("Rollback not implemented for RefreshHFilesTableProcedure state: 
{}", state);
+  }
+
+  @Override
+  protected Flow executeFromState(MasterProcedureEnv env, 
RefreshHFilesTableProcedureState state) {
+    LOG.info("Executing RefreshHFilesTableProcedureState state: {}", state);
+
+    try {
+      return switch (state) {
+        case REFRESH_HFILES_PREPARE -> prepare(env);
+        case REFRESH_HFILES_REFRESH_REGION -> refreshHFiles(env);
+        case REFRESH_HFILES_FINISH -> finish();
+        default -> throw new UnsupportedOperationException("Unhandled state: " 
+ state);
+      };
+    } catch (Exception ex) {
+      LOG.error("Error in RefreshHFilesTableProcedure state {}", state, ex);
+      setFailure("RefreshHFilesTableProcedure", ex);
+      return Flow.NO_MORE_STATE;
+    }
+  }
+
+  private Flow prepare(final MasterProcedureEnv env) {
+    
setNextState(RefreshHFilesTableProcedureState.REFRESH_HFILES_REFRESH_REGION);
+    return Flow.HAS_MORE_STATE;
+  }
+
+  private void refreshHFilesForTable(final MasterProcedureEnv env, TableName 
tableName) {
+    addChildProcedure(env.getAssignmentManager().getTableRegions(tableName, 
true).stream()
+      
.map(RefreshHFilesRegionProcedure::new).toArray(RefreshHFilesRegionProcedure[]::new));
+  }
+
+  private Flow refreshHFiles(final MasterProcedureEnv env) throws IOException {
+    if (tableName != null && namespaceName == null) {
+      refreshHFilesForTable(env, tableName);
+    } else if (tableName == null && namespaceName != null) {
+      env.getMasterServices().listTableNamesByNamespace(namespaceName)
+        .forEach(table -> refreshHFilesForTable(env, table));
+    } else {
+      env.getMasterServices().getTableDescriptors().getAll().values().stream()
+        .map(TableDescriptor::getTableName).filter(table -> 
!table.isSystemTable())
+        .forEach(table -> refreshHFilesForTable(env, table));
+    }
+
+    setNextState(RefreshHFilesTableProcedureState.REFRESH_HFILES_FINISH);
+    return Flow.HAS_MORE_STATE;
+  }
+
+  private Flow finish() {
+    return Flow.NO_MORE_STATE;
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
index c5c7ec602ea..7014b7cf01f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -51,7 +51,8 @@ public interface TableProcedureInterface {
     REGION_GC,
     MERGED_REGIONS_GC/* region operations */,
     REGION_TRUNCATE,
-    RESTORE_BACKUP_SYSTEM_TABLE
+    RESTORE_BACKUP_SYSTEM_TABLE,
+    REFRESH_HFILES,
   }
 
   /** Returns the name of the table the procedure is operating on */
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
index 7be4c4b1810..a58db4d5ed6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
@@ -73,6 +73,7 @@ class TableQueue extends Queue<TableName> {
       case MERGED_REGIONS_GC:
       case REGION_SNAPSHOT:
       case REGION_TRUNCATE:
+      case REFRESH_HFILES:
         return false;
       default:
         break;
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6e6717d4238..00ec12e8992 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1972,6 +1972,10 @@ public class HRegionServer extends 
HBaseServerBase<RSRpcServices>
     final int logRollThreads = 
conf.getInt("hbase.regionserver.executor.log.roll.threads", 1);
     executorService.startExecutorService(executorService.new ExecutorConfig()
       
.setExecutorType(ExecutorType.RS_LOG_ROLL).setCorePoolSize(logRollThreads));
+    final int rsRefreshHFilesThreads =
+      conf.getInt("hbase.regionserver.executor.refresh.hfiles.threads", 3);
+    executorService.startExecutorService(executorService.new ExecutorConfig()
+      
.setExecutorType(ExecutorType.RS_REFRESH_HFILES).setCorePoolSize(rsRefreshHFilesThreads));
 
     Threads.setDaemonThreadRunning(this.walRoller, getName() + ".logRoller",
       uncaughtExceptionHandler);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RefreshHFilesCallable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RefreshHFilesCallable.java
new file mode 100644
index 00000000000..f864bf4c2c0
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RefreshHFilesCallable.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
+
+/**
+ * This is a RegionServer-side callable used by the HBase Master Procedure 
framework to perform the
+ * actual HFiles refresh operation on a specific region. It is dispatched from 
the
+ * {@link 
org.apache.hadoop.hbase.master.procedure.RefreshHFilesRegionProcedure} to the 
RegionServer
+ * and executes the logic to refresh store files in each store of the region.
+ */
+
[email protected]
+public class RefreshHFilesCallable extends BaseRSProcedureCallable {
+  private static final Logger LOG = 
LoggerFactory.getLogger(RefreshHFilesCallable.class);
+
+  private RegionInfo regionInfo;
+
+  @Override
+  protected void doCall() throws Exception {
+    HRegion region = rs.getRegion(regionInfo.getEncodedName());
+    LOG.debug("Starting refreshHfiles operation on region {}", region);
+
+    try {
+      for (Store store : region.getStores()) {
+        store.refreshStoreFiles();
+      }
+    } catch (IOException ioe) {
+      LOG.warn("Exception while trying to refresh store files: ", ioe);
+    }
+  }
+
+  @Override
+  protected void initParameter(byte[] parameter) throws Exception {
+    MasterProcedureProtos.RefreshHFilesRegionParameter param =
+      MasterProcedureProtos.RefreshHFilesRegionParameter.parseFrom(parameter);
+    this.regionInfo = ProtobufUtil.toRegionInfo(param.getRegion());
+  }
+
+  @Override
+  public EventType getEventType() {
+    return EventType.RS_REFRESH_HFILES;
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshHFilesBase.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshHFilesBase.java
new file mode 100644
index 00000000000..3d9f188242d
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshHFilesBase.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.RefreshHFilesTableProcedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.security.access.ReadOnlyController;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestRefreshHFilesBase {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestRefreshHFilesBase.class);
+
+  protected static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+  protected Admin admin;
+  protected HMaster master;
+  List<HRegionServer> regionServers;
+  protected SingleProcessHBaseCluster cluster;
+  protected ProcedureExecutor<MasterProcedureEnv> procExecutor;
+  protected static Configuration conf;
+  protected static final TableName TEST_TABLE = 
TableName.valueOf("testRefreshHFilesTable");
+  protected static final String TEST_NAMESPACE = "testRefreshHFilesNamespace";
+  protected static final byte[] TEST_FAMILY = 
Bytes.toBytes("testRefreshHFilesCF1");
+
+  protected void createTableAndWait(TableName table, byte[] cf)
+    throws IOException, InterruptedException {
+    TEST_UTIL.createTable(table, cf);
+    TEST_UTIL.waitTableAvailable(table);
+  }
+
+  protected void createTableInNamespaceAndWait(String namespace, TableName 
table, byte[] cf)
+    throws IOException, InterruptedException {
+    TableName fqTableName = TableName.valueOf(namespace + 
table.getNameAsString());
+    TEST_UTIL.createTable(fqTableName, cf);
+    TEST_UTIL.waitTableAvailable(fqTableName);
+  }
+
+  protected void deleteTable(TableName table) throws IOException {
+    TEST_UTIL.deleteTableIfAny(table);
+  }
+
+  protected void createNamespace(String namespace) throws RuntimeException {
+    try {
+      final NamespaceDescriptor nsd = 
NamespaceDescriptor.create(namespace).build();
+      // Create the namespace if it doesn’t exist
+      if (
+        Arrays.stream(admin.listNamespaceDescriptors())
+          .noneMatch(ns -> ns.getName().equals(namespace))
+      ) {
+        admin.createNamespace(nsd);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected void deleteNamespace(String namespace) {
+    try {
+      // List table in namespace
+      TableName[] tables = admin.listTableNamesByNamespace(namespace);
+      for (TableName t : tables) {
+        TEST_UTIL.deleteTableIfAny(t);
+      }
+      // Now delete the namespace
+      admin.deleteNamespace(namespace);
+    } catch (Exception e) {
+      LOG.debug(
+        "Unable to delete namespace " + namespace + " post test execution. 
This isn't a failure");
+    }
+  }
+
+  protected void submitProcedureAndAssertNotFailed(RefreshHFilesTableProcedure 
procedure) {
+    long procId = procExecutor.submitProcedure(procedure);
+    ProcedureTestingUtility.waitProcedure(procExecutor, procId);
+    
ProcedureTestingUtility.assertProcNotFailed(procExecutor.getResult(procId));
+  }
+
+  protected void setReadOnlyMode(boolean isReadOnly) {
+    conf.setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, isReadOnly);
+    notifyConfigurationObservers();
+  }
+
+  private void notifyConfigurationObservers() {
+    
master.getConfigurationManager().notifyAllObservers(TEST_UTIL.getConfiguration());
+    for (HRegionServer rs : regionServers) {
+      
rs.getConfigurationManager().notifyAllObservers(TEST_UTIL.getConfiguration());
+    }
+  }
+
+  private void setupReadOnlyConf(boolean addReadOnlyConf) {
+    if (!addReadOnlyConf) return;
+    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, 
ReadOnlyController.class.getName());
+    conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, 
ReadOnlyController.class.getName());
+    // Keep ReadOnly property to false at the beginning so that create table 
succeed.
+    conf.setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, false);
+  }
+
+  protected void baseSetup(boolean addReadOnlyConf) throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    // Shorten the run time of failed unit tests by limiting retries and the 
session timeout
+    // threshold
+    conf.setInt(HBASE_CLIENT_RETRIES_NUMBER, 1);
+    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
+    conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 60000);
+    conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 120000);
+
+    setupReadOnlyConf(addReadOnlyConf);
+
+    try {
+      // Start the test cluster
+      cluster = TEST_UTIL.startMiniCluster(1);
+      admin = TEST_UTIL.getAdmin();
+      procExecutor = 
TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+      master = TEST_UTIL.getHBaseCluster().getMaster();
+      regionServers = cluster.getRegionServerThreads().stream()
+        
.map(JVMClusterUtil.RegionServerThread::getRegionServer).collect(Collectors.toList());
+    } catch (Exception e) {
+      TEST_UTIL.shutdownMiniCluster();
+      throw new RuntimeException(e);
+    }
+  }
+
+  protected void baseTearDown() throws Exception {
+    if (admin != null) {
+      admin.close();
+    }
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java
new file mode 100644
index 00000000000..c134c6c9804
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.NamespaceNotFoundException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TestRefreshHFilesBase;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MediumTests.class, ClientTests.class })
+public class TestRefreshHFilesFromClient extends TestRefreshHFilesBase {
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesFromClient.class);
+
+  private static final TableName TEST_NONEXISTENT_TABLE =
+    TableName.valueOf("testRefreshHFilesNonExistentTable");
+  private static final String TEST_NONEXISTENT_NAMESPACE = 
"testRefreshHFilesNonExistentNamespace";
+
+  @Before
+  public void setup() throws Exception {
+    baseSetup(false);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseTearDown();
+  }
+
+  @Test
+  public void testRefreshHFilesForTable() throws Exception {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      // RefreshHFiles for table
+      Long procId = admin.refreshHFiles(TEST_TABLE);
+      assertTrue(procId >= 0);
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForTable Should Not Throw Exception: " + e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+    }
+  }
+
+  // Not creating table hence refresh should throw exception
+  @Test(expected = TableNotFoundException.class)
+  public void testRefreshHFilesForNonExistentTable() throws Exception {
+    // RefreshHFiles for table
+    admin.refreshHFiles(TEST_NONEXISTENT_TABLE);
+  }
+
+  @Test
+  public void testRefreshHFilesForNamespace() throws Exception {
+    try {
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      // RefreshHFiles for namespace
+      Long procId = admin.refreshHFiles(TEST_NAMESPACE);
+      assertTrue(procId >= 0);
+
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForAllNamespace Should Not Throw Exception: " 
+ e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+
+  @Test(expected = NamespaceNotFoundException.class)
+  public void testRefreshHFilesForNonExistentNamespace() throws Exception {
+    // RefreshHFiles for namespace
+    admin.refreshHFiles(TEST_NONEXISTENT_NAMESPACE);
+  }
+
+  @Test
+  public void testRefreshHFilesForAllTables() throws Exception {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      // Create test namespace
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      // RefreshHFiles for all the tables
+      Long procId = admin.refreshHFiles();
+      assertTrue(procId >= 0);
+
+    } catch (Exception e) {
+      Assert.fail("RefreshHFilesForAllTables Should Not Throw Exception: " + 
e);
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedure.java
new file mode 100644
index 00000000000..9bb7bf181c3
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedure.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TestRefreshHFilesBase;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestRefreshHFilesProcedure extends TestRefreshHFilesBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRefreshHFilesProcedure.class);
+
+  @Before
+  public void setup() throws Exception {
+    baseSetup(false);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseTearDown();
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForTable() throws IOException {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment(), 
TEST_TABLE);
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForNamespace() {
+    try {
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment(), 
TEST_NAMESPACE);
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForAllTables() throws IOException {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      // Create test namespace
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment());
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedureWithReadOnlyConf.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedureWithReadOnlyConf.java
new file mode 100644
index 00000000000..351081d04fc
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedureWithReadOnlyConf.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TestRefreshHFilesBase;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestRefreshHFilesProcedureWithReadOnlyConf extends 
TestRefreshHFilesBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    
HBaseClassTestRule.forClass(TestRefreshHFilesProcedureWithReadOnlyConf.class);
+
+  @Before
+  public void setup() throws Exception {
+    // When true is passed only setup for readonly property is done.
+    // The initial ReadOnly property will be false for table creation
+    baseSetup(true);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseTearDown();
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForTable() throws IOException {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      setReadOnlyMode(true);
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment(), 
TEST_TABLE);
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForNamespace() {
+    try {
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      setReadOnlyMode(true);
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment(), 
TEST_NAMESPACE);
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+
+  @Test
+  public void testRefreshHFilesProcedureForAllTables() throws IOException {
+    try {
+      // Create table in default namespace
+      createTableAndWait(TEST_TABLE, TEST_FAMILY);
+
+      // Create test namespace
+      createNamespace(TEST_NAMESPACE);
+
+      // Create table under test namespace
+      createTableInNamespaceAndWait(TEST_NAMESPACE, TEST_TABLE, TEST_FAMILY);
+
+      setReadOnlyMode(true);
+      RefreshHFilesTableProcedure procedure =
+        new RefreshHFilesTableProcedure(procExecutor.getEnvironment());
+      submitProcedureAndAssertNotFailed(procedure);
+
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    } finally {
+      setReadOnlyMode(false);
+      // Delete table name post test execution
+      deleteTable(TEST_TABLE);
+
+      // Delete namespace post test execution
+      // This will delete all tables under namespace hence no explicit table
+      // deletion for table under namespace is needed.
+      deleteNamespace(TEST_NAMESPACE);
+    }
+  }
+}
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
index 1ac785e7276..cf4f1f0e838 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
@@ -1015,4 +1015,19 @@ public class VerifyingRSGroupAdmin implements Admin, 
Closeable {
   public Long refreshMeta() throws IOException {
     return admin.refreshMeta();
   }
+
+  @Override
+  public Long refreshHFiles(final TableName tableName) throws IOException {
+    return admin.refreshHFiles(tableName);
+  }
+
+  @Override
+  public Long refreshHFiles(final String namespace) throws IOException {
+    return admin.refreshHFiles(namespace);
+  }
+
+  @Override
+  public Long refreshHFiles() throws IOException {
+    return admin.refreshHFiles();
+  }
 }
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb 
b/hbase-shell/src/main/ruby/hbase/admin.rb
index 60f1c72cda2..970cd2fcd4d 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1933,6 +1933,26 @@ module Hbase
     def refresh_meta()
       @admin.refreshMeta()
     end
+
+    
#----------------------------------------------------------------------------------------------
+    # Refresh HFiles for the table
+    def refresh_hfiles(args = {})
+      table_name = args.fetch(TABLE_NAME, nil)
+      namespace = args.fetch(NAMESPACE, nil)
+      if namespace && table_name
+        raise ArgumentError, "Specify either a TABLE_NAME or a NAMESPACE, not 
both"
+      elsif namespace == "" || table_name == ""
+        raise ArgumentError, "TABLE_NAME or NAMESPACE cannot be empty string"
+      elsif namespace.is_a?(Array) || table_name.is_a?(Array)
+        raise ArgumentError, "TABLE_NAME or NAMESPACE must be a single string, 
not an array"
+      elsif namespace
+        @admin.refreshHFiles(namespace)
+      elsif table_name
+        
@admin.refreshHFiles(org.apache.hadoop.hbase.TableName.valueOf(table_name))
+      else
+        @admin.refreshHFiles()
+      end
+    end
   end
   # rubocop:enable Metrics/ClassLength
 end
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index aadf2b255f1..0e278be2cba 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -497,6 +497,7 @@ Shell.load_command_group(
     recommission_regionserver
     truncate_region
     refresh_meta
+    refresh_hfiles
   ],
   # TODO: remove older hlog_roll command
   aliases: {
diff --git a/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb 
b/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb
new file mode 100644
index 00000000000..1ec0c74381a
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb
@@ -0,0 +1,64 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class RefreshHfiles < Command
+      def help
+        return <<-EOF
+Refresh HFiles/storefiles for table(s) or namespaces.
+
+Allowed Syntax:
+hbase> refresh_hfiles
+hbase> refresh_hfiles 'TABLE_NAME' => 'test_table'
+hbase> refresh_hfiles 'TABLE_NAME' => 'namespace:test_table'
+hbase> refresh_hfiles 'NAMESPACE' => 'test_namespace'
+
+Behavior:
+- Without any argument, it refreshes HFiles for all user tables in HBase.
+- If 'TABLE_NAME' is provided: Refreshes HFiles for that specific table.
+   - If provided without a namespace qualifier (e.g., 'TABLE_NAME' => 
'test_table'),
+    it refreshes HFiles for that table in the default namespace.
+   - If provided with a namespace qualifier (e.g., 'TABLE_NAME' => 
'namespace:test_table'),
+    it refreshes HFiles for the table in the specified namespace.
+- With 'NAMESPACE', it refreshes HFiles for all tables in the given namespace.
+On successful submission, it returns the procedure ID (procId). Otherwise, it 
throws an exception.
+
+Important Note:
+This command should ideally be run on a read-replica cluster,
+ and only after successfully executing refresh_meta.
+
+Not Allowed:
+hbase> refresh_hfiles 'TABLE_NAME' => 'test_table', 'NAMESPACE' => 
'test_namespace'
+
+Passing both 'TABLE_NAME' and 'NAMESPACE' is not allowed to avoid ambiguity.
+Otherwise, it is unclear whether the user intends to:
+1. Refresh HFiles for 'test_table' under 'test_namespace',
+or
+2. Refresh HFiles for 'test_table' under the default namespace and
+   all tables under 'test_namespace'.
+To prevent such confusion, only one argument should be provided per command.
+
+EOF
+      end
+      def command(args = {})
+        admin.refresh_hfiles(args)
+      end
+    end
+  end
+end
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index cfd53156cc4..f29d6b5771d 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -1391,4 +1391,19 @@ public class ThriftAdmin implements Admin {
     throw new NotImplementedException(
       "isReplicationPeerModificationEnabled not supported in ThriftAdmin");
   }
+
+  @Override
+  public Long refreshHFiles(final TableName tableName) throws IOException {
+    throw new NotImplementedException("refreshHFiles not supported in 
ThriftAdmin");
+  }
+
+  @Override
+  public Long refreshHFiles(final String namespace) throws IOException {
+    throw new NotImplementedException("refreshHFiles not supported in 
ThriftAdmin");
+  }
+
+  @Override
+  public Long refreshHFiles() throws IOException {
+    throw new NotImplementedException("refreshHFiles not supported in 
ThriftAdmin");
+  }
 }

Reply via email to