Repository: hadoop Updated Branches: refs/heads/branch-2 7a92345f1 -> 1ab279c31
HDFS-11195. Return error when appending files by webhdfs rest api fails. Contributed by Yuanbo Liu. (cherry picked from commit 5b7acdd206f5a7d1b7af29b68adaa7587d7d8c43) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ab279c3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ab279c3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ab279c3 Branch: refs/heads/branch-2 Commit: 1ab279c311dfc36f5b84cc6db3b3e0296562f691 Parents: 7a92345 Author: Xiao Chen <[email protected]> Authored: Tue Dec 20 12:24:00 2016 -0800 Committer: Xiao Chen <[email protected]> Committed: Tue Dec 20 12:27:40 2016 -0800 ---------------------------------------------------------------------- .../server/datanode/web/webhdfs/HdfsWriter.java | 19 ++++++++-- .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 15 ++++++-- .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 39 ++++++++++++++++++++ 3 files changed, 65 insertions(+), 8 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ab279c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java index 99924e5..8de4bb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java @@ -55,9 +55,13 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> { throws IOException { chunk.content().readBytes(out, chunk.content().readableBytes()); if (chunk instanceof LastHttpContent) { - response.headers().set(CONNECTION, CLOSE); - ctx.write(response).addListener(ChannelFutureListener.CLOSE); - releaseDfsResources(); + try { + releaseDfsResourcesAndThrow(); + response.headers().set(CONNECTION, CLOSE); + ctx.write(response).addListener(ChannelFutureListener.CLOSE); + } catch (Exception cause) { + exceptionCaught(ctx, cause); + } } } @@ -71,7 +75,10 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> { releaseDfsResources(); DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause); resp.headers().set(CONNECTION, CLOSE); - ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE); + if (LOG != null && LOG.isDebugEnabled()) { + LOG.debug("Exception in channel handler ", cause); + } } private void releaseDfsResources() { @@ -79,4 +86,8 @@ class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> { IOUtils.cleanup(LOG, client); } + private void releaseDfsResourcesAndThrow() throws Exception { + out.close(); + client.close(); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ab279c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 6a02ee3..8a9b213 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1835,14 +1835,21 @@ public class MiniDFSCluster implements AutoCloseable { */ public void shutdownDataNodes() { for (int i = dataNodes.size()-1; i >= 0; i--) { - LOG.info("Shutting down DataNode " + i); - DataNode dn = dataNodes.remove(i).datanode; - dn.shutdown(); - numDataNodes--; + shutdownDataNode(i); } } /** + * Shutdown the datanode at a given index. + */ + public void shutdownDataNode(int dnIndex) { + LOG.info("Shutting down DataNode " + dnIndex); + DataNode dn = dataNodes.remove(dnIndex).datanode; + dn.shutdown(); + numDataNodes--; + } + + /** * Shutdown all the namenodes. */ public synchronized void shutdownNameNodes() { http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ab279c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index fe127a0..638decc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -1173,4 +1173,43 @@ public class TestWebHDFS { cluster.shutdown(); } } + + @Test + public void testWebHdfsAppend() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + final int dnNumber = 3; + try { + + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNumber).build(); + + final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + + final DistributedFileSystem fs = cluster.getFileSystem(); + + final Path appendFile = new Path("/testAppend.txt"); + final String content = "hello world"; + DFSTestUtil.writeFile(fs, appendFile, content); + + for (int index = 0; index < dnNumber - 1; index++){ + cluster.shutdownDataNode(index); + } + cluster.restartNameNodes(); + cluster.waitActive(); + + try { + DFSTestUtil.appendFile(webFS, appendFile, content); + fail("Should fail to append file since " + + "datanode number is 1 and replication is 3"); + } catch (IOException ignored) { + String resultContent = DFSTestUtil.readFile(fs, appendFile); + assertTrue(resultContent.equals(content)); + } + } finally { + if (cluster != null) { + cluster.shutdown(true); + } + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
