This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/master by this push:
new 05bd810f4e2 HBASE-28972 Limit the number of retries in
FanOutOneBlockAsyncDFSOutputHelper.completeFile (#6453)
05bd810f4e2 is described below
commit 05bd810f4e250a2829f57ab7208ab22c066568a9
Author: Duo Zhang <[email protected]>
AuthorDate: Mon Nov 11 22:32:31 2024 +0800
HBASE-28972 Limit the number of retries in
FanOutOneBlockAsyncDFSOutputHelper.completeFile (#6453)
Signed-off-by: Istvan Toth <[email protected]>
---
.../io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java | 17 +++++------------
pom.xml | 2 +-
2 files changed, 6 insertions(+), 13 deletions(-)
diff --git
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
index 7d74adb44b8..e97c5de2181 100644
---
a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
+++
b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java
@@ -83,7 +83,6 @@ import
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
@@ -640,8 +639,9 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
static void completeFile(FanOutOneBlockAsyncDFSOutput output, DFSClient
client,
ClientProtocol namenode, String src, String clientName, ExtendedBlock
block,
- HdfsFileStatus stat) {
- for (int retry = 0;; retry++) {
+ HdfsFileStatus stat) throws IOException {
+ int maxRetries = client.getConf().getNumBlockWriteLocateFollowingRetry();
+ for (int retry = 0; retry < maxRetries; retry++) {
try {
if (namenode.complete(src, clientName, block, stat.getFileId())) {
endFileLease(output);
@@ -650,18 +650,11 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
LOG.warn("complete file " + src + " not finished, retry = " + retry);
}
} catch (RemoteException e) {
- IOException ioe = e.unwrapRemoteException();
- if (ioe instanceof LeaseExpiredException) {
- LOG.warn("lease for file " + src + " is expired, give up", e);
- return;
- } else {
- LOG.warn("complete file " + src + " failed, retry = " + retry, e);
- }
- } catch (Exception e) {
- LOG.warn("complete file " + src + " failed, retry = " + retry, e);
+ throw e.unwrapRemoteException();
}
sleepIgnoreInterrupt(retry);
}
+ throw new IOException("can not complete file after retrying " + maxRetries
+ " times");
}
static void sleepIgnoreInterrupt(int retry) {
diff --git a/pom.xml b/pom.xml
index 42604e8e734..b5349c601dd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -26,7 +26,7 @@
(in particular, if you are doing it for the first time), instead do
'mvn package'. If you are interested in the full story, see
https://issues.apache.org/jira/browse/HBASE-6795.
-
+for trigger test
-->
<modelVersion>4.0.0</modelVersion>
<parent>