This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new 81d24451f37 HBASE-28972 Limit the number of retries in
FanOutOneBlockAsyncDFSOutputHelper.completeFile (#6453)
81d24451f37 is described below
commit 81d24451f376f1a7d1e66ae93871a96a882d6489
Author: Duo Zhang <[email protected]>
AuthorDate: Mon Nov 11 22:32:31 2024 +0800
HBASE-28972 Limit the number of retries in
FanOutOneBlockAsyncDFSOutputHelper.completeFile (#6453)
Signed-off-by: Istvan Toth <[email protected]>
(cherry picked from commit 05bd810f4e250a2829f57ab7208ab22c066568a9)
---
.../io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
index 669727ea59e..5fb044489ee 100644
---
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
+++
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
@@ -83,7 +83,6 @@ import
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
@@ -715,8 +714,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
}
static void completeFile(DFSClient client, ClientProtocol namenode, String
src, String clientName,
- ExtendedBlock block, HdfsFileStatus stat) {
- for (int retry = 0;; retry++) {
+ ExtendedBlock block, HdfsFileStatus stat) throws IOException {
+ int maxRetries = client.getConf().getNumBlockWriteLocateFollowingRetry();
+ for (int retry = 0; retry < maxRetries; retry++) {
try {
if (namenode.complete(src, clientName, block, stat.getFileId())) {
endFileLease(client, stat);
@@ -725,18 +725,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
LOG.warn("complete file " + src + " not finished, retry = " + retry);
}
} catch (RemoteException e) {
- IOException ioe = e.unwrapRemoteException();
- if (ioe instanceof LeaseExpiredException) {
- LOG.warn("lease for file " + src + " is expired, give up", e);
- return;
- } else {
- LOG.warn("complete file " + src + " failed, retry = " + retry, e);
- }
- } catch (Exception e) {
- LOG.warn("complete file " + src + " failed, retry = " + retry, e);
+ throw e.unwrapRemoteException();
}
sleepIgnoreInterrupt(retry);
}
+ throw new IOException("can not complete file after retrying " + maxRetries
+ " times");
}
static void sleepIgnoreInterrupt(int retry) {