Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2551cc6e6 -> c081fa6ae


 HDFS-7224. Allow reuse of NN connections via webhdfs. Contributed by Eric Payne
(cherry picked from commit 2b0fa20f69417326a92beac10ffa072db2616e73)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c081fa6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c081fa6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c081fa6a

Branch: refs/heads/branch-2
Commit: c081fa6ae8fbd38eb3b6fb2ba0aa3ba1ac779240
Parents: 2551cc6
Author: Kihwal Lee <kih...@apache.org>
Authored: Mon Jan 26 08:15:37 2015 -0600
Committer: Kihwal Lee <kih...@apache.org>
Committed: Mon Jan 26 08:15:37 2015 -0600

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 20 ++++----
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   | 49 ++++++++++++++++++++
 3 files changed, 64 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 82d933c..1220678 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -265,6 +265,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7623. Add htrace configuration properties to core-default.xml and
     update user doc about how to enable htrace. (yliu)
 
+    HDFS-7224. Allow reuse of NN connections via webhdfs (Eric Payne via
+    kihwal)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 7284b55..2a3faf7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -313,16 +313,20 @@ public class WebHdfsFileSystem extends FileSystem
     if (in == null) {
       throw new IOException("The " + (useErrorStream? "error": "input") + " 
stream is null.");
     }
-    final String contentType = c.getContentType();
-    if (contentType != null) {
-      final MediaType parsed = MediaType.valueOf(contentType);
-      if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
-        throw new IOException("Content-Type \"" + contentType
-            + "\" is incompatible with \"" + MediaType.APPLICATION_JSON
-            + "\" (parsed=\"" + parsed + "\")");
+    try {
+      final String contentType = c.getContentType();
+      if (contentType != null) {
+        final MediaType parsed = MediaType.valueOf(contentType);
+        if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) {
+          throw new IOException("Content-Type \"" + contentType
+              + "\" is incompatible with \"" + MediaType.APPLICATION_JSON
+              + "\" (parsed=\"" + parsed + "\")");
+        }
       }
+      return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
+    } finally {
+      in.close();
     }
-    return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8));
   }
 
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c081fa6a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index a5bb41d..295aff0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -17,8 +17,14 @@
  */
 package org.apache.hadoop.hdfs.web;
 
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.doReturn;
+
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -32,6 +38,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
@@ -128,6 +136,47 @@ public class TestFSMainOperationsWebHdfs extends 
FSMainOperationsBaseTest {
     Assert.assertEquals(1024*4, fileStatus.getLen());
   }
 
+  // Test that WebHdfsFileSystem.jsonParse() closes the connection's input
+  // stream.
+  // Closing the inputstream in jsonParse will allow WebHDFS to reuse
+  // connections to the namenode rather than needing to always open new ones.
+  boolean closedInputStream = false;
+  @Test
+  public void testJsonParseClosesInputStream() throws Exception {
+    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fileSystem;
+    Path file = getTestRootPath(fSys, "test/hadoop/file");
+    createFile(file);
+    final HttpOpParam.Op op = GetOpParam.Op.GETHOMEDIRECTORY;
+    final URL url = webhdfs.toUrl(op, file);
+    final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod(op.getType().toString());
+    conn.connect();
+
+    InputStream myIn = new InputStream(){
+      private HttpURLConnection localConn = conn;
+      @Override
+      public void close() throws IOException {
+        closedInputStream = true;
+        localConn.getInputStream().close();
+      }
+      @Override
+      public int read() throws IOException {
+        return localConn.getInputStream().read();
+      }
+    };
+    final HttpURLConnection spyConn = spy(conn);
+    doReturn(myIn).when(spyConn).getInputStream();
+
+    try {
+      Assert.assertFalse(closedInputStream);
+      WebHdfsFileSystem.jsonParse(spyConn, false);
+      Assert.assertTrue(closedInputStream);
+    } catch(IOException ioe) {
+      junit.framework.TestCase.fail();
+    }
+    conn.disconnect();
+  }
+
   @Override
   @Test
   public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {

Reply via email to