Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4e847d63a -> f671c22e3
  refs/heads/trunk 040a38dc4 -> 4908a8970


HDFS-12705. WebHdfsFileSystem exceptions should retain the caused by exception. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4908a897
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4908a897
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4908a897

Branch: refs/heads/trunk
Commit: 4908a8970eaf500642a9d8427e322032c1ec047a
Parents: 040a38d
Author: Arpit Agarwal <a...@apache.org>
Authored: Mon Nov 13 11:30:39 2017 -0800
Committer: Arpit Agarwal <a...@apache.org>
Committed: Mon Nov 13 11:30:39 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  1 +
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 59 ++++++++++++++++++++
 2 files changed, 60 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4908a897/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 34f5d6e..c1aef49 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -780,6 +780,7 @@ public class WebHdfsFileSystem extends FileSystem
           try {
             IOException newIoe = ioe.getClass().getConstructor(String.class)
                 .newInstance(node + ": " + ioe.getMessage());
+            newIoe.initCause(ioe.getCause());
             newIoe.setStackTrace(ioe.getStackTrace());
             ioe = newIoe;
           } catch (NoSuchMethodException | SecurityException 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4908a897/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 3ee8ad0..500ec0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -1452,4 +1452,63 @@ public class TestWebHDFS {
       }
     }
   }
+
+  /**
+   * Tests that {@link WebHdfsFileSystem.AbstractRunner} propagates original
+   * exception's stacktrace and cause during runWithRetry attempts.
+   * @throws Exception
+   */
+  @Test
+  public void testExceptionPropogationInAbstractRunner() throws Exception{
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path dir = new Path("/testExceptionPropogationInAbstractRunner");
+
+    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
+
+    final short numDatanodes = 1;
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDatanodes)
+        .build();
+    try {
+      cluster.waitActive();
+      final FileSystem fs = WebHdfsTestUtil
+          .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
+
+      //create a file
+      final long length = 1L << 20;
+      final Path file1 = new Path(dir, "testFile");
+
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
+
+      //get file status and check that it was written properly.
+      final FileStatus s1 = fs.getFileStatus(file1);
+      assertEquals("Write failed for file " + file1, length, s1.getLen());
+
+      FSDataInputStream in = fs.open(file1);
+      in.read(); // Connection is made only when the first read() occurs.
+      final WebHdfsInputStream webIn =
+          (WebHdfsInputStream)(in.getWrappedStream());
+
+      final String msg = "Throwing dummy exception";
+      IOException ioe = new IOException(msg, new DummyThrowable());
+
+      WebHdfsFileSystem.ReadRunner readRunner = spy(webIn.getReadRunner());
+      doThrow(ioe).when(readRunner).getResponse(any(HttpURLConnection.class));
+
+      webIn.setReadRunner(readRunner);
+
+      try {
+        webIn.read();
+        fail("Read should have thrown IOException.");
+      } catch (IOException e) {
+        assertTrue(e.getMessage().contains(msg));
+        assertTrue(e.getCause() instanceof DummyThrowable);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  final static class DummyThrowable extends Throwable {
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to