git commit: HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.

2014-09-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/trunk df8c84cba - 6a84f88c1


HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a84f88c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a84f88c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a84f88c

Branch: refs/heads/trunk
Commit: 6a84f88c1190a8fecadd81deb6e7b8a69675fa91
Parents: df8c84c
Author: Kihwal Lee kih...@apache.org
Authored: Mon Sep 8 14:41:44 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Sep 8 14:41:44 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 37 
 3 files changed, 40 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1329442..2c45017 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -621,6 +621,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-7025. HDFS Credential Provider related Unit Test Failure.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-7005. DFS input streams do not timeout.
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating  deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e4215f0..27abfb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3015,6 +3015,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 dfsClientConf.socketTimeout);
   peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
   blockToken, datanodeId);
+  peer.setReadTimeout(dfsClientConf.socketTimeout);
   success = true;
   return peer;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b71cc32..da81d2f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -31,6 +31,9 @@ import static org.mockito.Mockito.mock;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -60,6 +63,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@@ -69,6 +73,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
@@ -961,4 +966,36 @@ public class TestDistributedFileSystem {
   cluster.shutdown();
 }
   }
+  
+  
+  @Test(timeout=1)
+  public void testDFSClientPeerTimeout() throws IOException {
+final int timeout = 1000;
+final Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
+
+// only need cluster to create a dfs client to get a peer
+final MiniDFSCluster 

git commit: HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp. (cherry picked from commit 6a84f88c1190a8fecadd81deb6e7b8a69675fa91)

2014-09-08 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d510cefd1 - d20047edd


HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.
(cherry picked from commit 6a84f88c1190a8fecadd81deb6e7b8a69675fa91)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d20047ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d20047ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d20047ed

Branch: refs/heads/branch-2
Commit: d20047eddafd70867d9b7036b6a73f6f8151fb35
Parents: d510cef
Author: Kihwal Lee kih...@apache.org
Authored: Mon Sep 8 14:44:47 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Sep 8 14:44:47 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 37 
 3 files changed, 40 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d20047ed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bc38c65..029 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -360,6 +360,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-7025. HDFS Credential Provider related Unit Test Failure.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-7005. DFS input streams do not timeout.
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating  deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d20047ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e4215f0..27abfb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3015,6 +3015,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 dfsClientConf.socketTimeout);
   peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
   blockToken, datanodeId);
+  peer.setReadTimeout(dfsClientConf.socketTimeout);
   success = true;
   return peer;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d20047ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 7fa9f27..564c759 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -31,6 +31,9 @@ import static org.mockito.Mockito.mock;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -60,6 +63,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.web.HftpFileSystem;
@@ -70,6 +74,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
@@ -991,4 +996,36 @@ public class TestDistributedFileSystem {
   cluster.shutdown();
 }
   }
+  
+  
+  @Test(timeout=1)
+  public void testDFSClientPeerTimeout() throws IOException {
+final int timeout = 1000;
+final Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
+
+// only need 

[3/7] git commit: HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.

2014-09-08 Thread arp
HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a84f88c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a84f88c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a84f88c

Branch: refs/heads/HDFS-6581
Commit: 6a84f88c1190a8fecadd81deb6e7b8a69675fa91
Parents: df8c84c
Author: Kihwal Lee kih...@apache.org
Authored: Mon Sep 8 14:41:44 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Sep 8 14:41:44 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 37 
 3 files changed, 40 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1329442..2c45017 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -621,6 +621,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-7025. HDFS Credential Provider related Unit Test Failure.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-7005. DFS input streams do not timeout.
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating  deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e4215f0..27abfb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3015,6 +3015,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 dfsClientConf.socketTimeout);
   peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
   blockToken, datanodeId);
+  peer.setReadTimeout(dfsClientConf.socketTimeout);
   success = true;
   return peer;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b71cc32..da81d2f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -31,6 +31,9 @@ import static org.mockito.Mockito.mock;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -60,6 +63,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@@ -69,6 +73,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
@@ -961,4 +966,36 @@ public class TestDistributedFileSystem {
   cluster.shutdown();
 }
   }
+  
+  
+  @Test(timeout=1)
+  public void testDFSClientPeerTimeout() throws IOException {
+final int timeout = 1000;
+final Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
+
+// only need cluster to create a dfs client to get a peer
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+try {
+  

[4/8] git commit: HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.

2014-09-08 Thread jing9
HDFS-7005. DFS input streams do not timeout. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a84f88c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a84f88c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a84f88c

Branch: refs/heads/HDFS-6584
Commit: 6a84f88c1190a8fecadd81deb6e7b8a69675fa91
Parents: df8c84c
Author: Kihwal Lee kih...@apache.org
Authored: Mon Sep 8 14:41:44 2014 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Mon Sep 8 14:41:44 2014 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  1 +
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 37 
 3 files changed, 40 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1329442..2c45017 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -621,6 +621,8 @@ Release 2.6.0 - UNRELEASED
 HDFS-7025. HDFS Credential Provider related Unit Test Failure.
 (Xiaoyu Yao via cnauroth)
 
+HDFS-7005. DFS input streams do not timeout.
+
 BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
   HDFS-6387. HDFS CLI admin tool for creating  deleting an

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index e4215f0..27abfb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3015,6 +3015,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 dfsClientConf.socketTimeout);
   peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
   blockToken, datanodeId);
+  peer.setReadTimeout(dfsClientConf.socketTimeout);
   success = true;
   return peer;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a84f88c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index b71cc32..da81d2f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -31,6 +31,9 @@ import static org.mockito.Mockito.mock;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -60,6 +63,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
@@ -69,6 +73,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
@@ -961,4 +966,36 @@ public class TestDistributedFileSystem {
   cluster.shutdown();
 }
   }
+  
+  
+  @Test(timeout=1)
+  public void testDFSClientPeerTimeout() throws IOException {
+final int timeout = 1000;
+final Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
+
+// only need cluster to create a dfs client to get a peer
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+try {
+