Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ed7f847c9 -> 0a41b1bdc


HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for 
DFSClient. Contributed by Zhihai Xu.

(cherry-picked from commit 15612313f578a5115f8d03885e9b0c8c376ed56e)

Conflicts:
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a41b1bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a41b1bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a41b1bd

Branch: refs/heads/branch-2
Commit: 0a41b1bdcadbdfc427348799f89d122a152d41fe
Parents: ed7f847
Author: Yongjun Zhang <yzh...@cloudera.com>
Authored: Fri Mar 20 08:59:44 2015 -0700
Committer: Yongjun Zhang <yzh...@cloudera.com>
Committed: Fri Mar 20 09:25:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 11 ++++++++++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 +++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 10 +++++----
 .../src/main/resources/hdfs-default.xml         |  7 +++++++
 .../hadoop/hdfs/TestDFSClientRetries.java       | 22 ++++++++++++++++++++
 6 files changed, 52 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f9c4258..402504d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -10,6 +10,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
+    HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for
+    DFSClient. (Zhihai Xu via Yongjun Zhang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 658cccf..74c0d78 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -24,6 +24,8 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAUL
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
@@ -307,6 +309,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
     final int nCachedConnRetry;
     final int nBlockWriteRetry;
     final int nBlockWriteLocateFollowingRetry;
+    final int blockWriteLocateFollowingInitialDelayMs;
     final long defaultBlockSize;
     final long prefetchSize;
     final short defaultReplication;
@@ -418,6 +421,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
       nBlockWriteLocateFollowingRetry = conf.getInt(
           DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
           DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
+      blockWriteLocateFollowingInitialDelayMs = conf.getInt(
+          DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+          DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT);
       uMask = FsPermission.getUMask(conf);
       connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
           DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
@@ -568,6 +574,11 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
       }
       return dataChecksum;
     }
+
+    @VisibleForTesting
+    public int getBlockWriteLocateFollowingInitialDelayMs() {
+      return blockWriteLocateFollowingInitialDelayMs;
+    }
   }
  
   public Conf getConf() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d1c37df..0894e0f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -401,6 +401,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // Much code in hdfs is not yet updated to use these keys.
   public static final String  
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = 
"dfs.client.block.write.locateFollowingBlock.retries";
   public static final int     
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+  // the initial delay (unit is ms) for locateFollowingBlock, the delay time 
will increase exponentially(double) for each retry.
+  public static final String  
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY = 
"dfs.client.block.write.locateFollowingBlock.initial.delay.ms";
+  public static final int     
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT = 400;
   public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = 
"dfs.client.block.write.retries";
   public static final int     DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
   public static final String  DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = 
"dfs.client.max.block.acquire.failures";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 6a403af..4eca7d1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -1432,7 +1432,8 @@ public class DFSOutputStream extends FSOutputSummer
     private LocatedBlock locateFollowingBlock(long start,
         DatanodeInfo[] excludedNodes)  throws IOException {
       int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
-      long sleeptime = 400;
+      long sleeptime = dfsClient.getConf().
+          blockWriteLocateFollowingInitialDelayMs;
       while (true) {
         long localstart = Time.now();
         while (true) {
@@ -2257,7 +2258,8 @@ public class DFSOutputStream extends FSOutputSummer
   // be called during unit tests
   private void completeFile(ExtendedBlock last) throws IOException {
     long localstart = Time.now();
-    long localTimeout = 400;
+    long sleeptime = dfsClient.getConf().
+        blockWriteLocateFollowingInitialDelayMs;
     boolean fileComplete = false;
     int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
     while (!fileComplete) {
@@ -2280,8 +2282,8 @@ public class DFSOutputStream extends FSOutputSummer
                 + " does not have enough number of replicas.");
           }
           retries--;
-          Thread.sleep(localTimeout);
-          localTimeout *= 2;
+          Thread.sleep(sleeptime);
+          sleeptime *= 2;
           if (Time.now() - localstart > 5000) {
             DFSClient.LOG.info("Could not complete " + src + " retrying...");
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index a8c2400..8a22a52 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2305,4 +2305,11 @@
     <description>Whether pin blocks on favored DataNode.</description>
   </property>
 
+<property>
+  <name>dfs.client.block.write.locateFollowingBlock.initial.delay.ms</name>
+  <value>400</value>
+  <description>The initial delay (unit is ms) for locateFollowingBlock,
+    the delay time will increase exponentially(double) for each retry.
+  </description>
+</property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a41b1bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 382ad48..45f21df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -1126,4 +1126,26 @@ public class TestDFSClientRetries {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testDFSClientConfigurationLocateFollowingBlockInitialDelay()
+      throws Exception {
+    // test if DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
+    // is not configured, verify DFSClient uses the default value 400.
+    Configuration dfsConf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(dfsConf).build();
+    cluster.waitActive();
+    NamenodeProtocols nn = cluster.getNameNodeRpc();
+    DFSClient client = new DFSClient(null, nn, dfsConf, null);
+    assertEquals(client.getConf().
+        getBlockWriteLocateFollowingInitialDelayMs(), 400);
+
+    // change DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+    // verify DFSClient uses the configured value 1000.
+    dfsConf.setInt(DFSConfigKeys.
+        DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, 1000);
+    client = new DFSClient(null, nn, dfsConf, null);
+    assertEquals(client.getConf().
+        getBlockWriteLocateFollowingInitialDelayMs(), 1000);
+  }
 }

Reply via email to