Repository: hadoop
Updated Branches:
refs/heads/branch-2 f1f441b80 -> be5894c12
HDFS-10223. peerFromSocketAndKey performs SASL exchange before setting
connection timeouts (cmccabe)
(cherry picked from commit 37e23ce45c592f3c9c48a08a52a5f46787f6c0e9)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be5894c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be5894c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be5894c1
Branch: refs/heads/branch-2
Commit: be5894c12dd3af20474d74c444ad8ffd798ff417
Parents: f1f441b
Author: Colin Patrick Mccabe <[email protected]>
Authored: Wed Mar 30 13:37:37 2016 -0700
Committer: Colin Patrick Mccabe <[email protected]>
Committed: Wed Mar 30 13:42:24 2016 -0700
----------------------------------------------------------------------
.../java/org/apache/hadoop/hdfs/DFSClient.java | 4 +-
.../org/apache/hadoop/hdfs/DFSUtilClient.java | 6 ++-
.../hdfs/server/namenode/NamenodeFsck.java | 2 +-
.../datatransfer/sasl/TestSaslDataTransfer.java | 48 ++++++++++++++++++++
4 files changed, 54 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5894c1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 42d153d..5365847 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2926,9 +2926,7 @@ public class DFSClient implements java.io.Closeable,
RemotePeerFactory,
NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(),
socketTimeout);
peer = DFSUtilClient.peerFromSocketAndKey(saslClient, sock, this,
- blockToken, datanodeId);
- peer.setReadTimeout(socketTimeout);
- peer.setWriteTimeout(socketTimeout);
+ blockToken, datanodeId, socketTimeout);
success = true;
return peer;
} finally {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5894c1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 897a40c..af700c1 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -579,12 +579,14 @@ public class DFSUtilClient {
public static Peer peerFromSocketAndKey(
SaslDataTransferClient saslClient, Socket s,
DataEncryptionKeyFactory keyFactory,
- Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
- throws IOException {
+ Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId,
+ int socketTimeoutMs) throws IOException {
Peer peer = null;
boolean success = false;
try {
peer = peerFromSocket(s);
+ peer.setReadTimeout(socketTimeoutMs);
+ peer.setWriteTimeout(socketTimeoutMs);
peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
success = true;
return peer;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5894c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 66b7dac..c2b616e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -869,7 +869,7 @@ public class NamenodeFsck implements
DataEncryptionKeyFactory {
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocketAndKey(
dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
- blockToken, datanodeId);
+ blockToken, datanodeId, HdfsConstants.READ_TIMEOUT);
} finally {
if (peer == null) {
IOUtils.closeQuietly(s);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5894c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
----------------------------------------------------------------------
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 2d4eb0d..8555e5d 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -25,6 +25,10 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.BlockLocation;
@@ -32,12 +36,18 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.Peer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.junit.After;
@@ -197,4 +207,42 @@ public class TestSaslDataTransfer extends
SaslDataTransferTestCase {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
}
+
+ /**
+ * Verifies that peerFromSocketAndKey honors socket read timeouts.
+ */
+ @Test(timeout=60000)
+ public void TestPeerFromSocketAndKeyReadTimeout() throws Exception {
+ HdfsConfiguration conf = createSecureConfig(
+ "authentication,integrity,privacy");
+ AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
+ SaslDataTransferClient saslClient = new SaslDataTransferClient(
+ conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
+ TrustedChannelResolver.getInstance(conf), fallbackToSimpleAuth);
+ DatanodeID fakeDatanodeId = new DatanodeID("127.0.0.1", "localhost",
+ "beefbeef-beef-beef-beef-beefbeefbeef", 1, 2, 3, 4);
+ DataEncryptionKeyFactory dataEncKeyFactory =
+ new DataEncryptionKeyFactory() {
+ @Override
+ public DataEncryptionKey newDataEncryptionKey() {
+ return new DataEncryptionKey(123, "456", new byte[8],
+ new byte[8], 1234567, "fakeAlgorithm");
+ }
+ };
+ ServerSocket serverSocket = null;
+ Socket socket = null;
+ try {
+ serverSocket = new ServerSocket(0, -1);
+ socket = new Socket(serverSocket.getInetAddress(),
+ serverSocket.getLocalPort());
+ Peer peer = DFSUtilClient.peerFromSocketAndKey(saslClient, socket,
+ dataEncKeyFactory, new Token(), fakeDatanodeId, 1);
+ peer.close();
+ Assert.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
+ } catch (SocketTimeoutException e) {
+ GenericTestUtils.assertExceptionContains("Read timed out", e);
+ } finally {
+ IOUtils.cleanup(null, socket, serverSocket);
+ }
+ }
}