HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup 
a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cf8db41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cf8db41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cf8db41

Branch: refs/heads/YARN-2928
Commit: 2cf8db41489c177068d57e0359ab10735f1666d7
Parents: e8f979b
Author: Vinayakumar B <vinayakum...@apache.org>
Authored: Fri May 8 17:18:14 2015 +0530
Committer: Zhijie Shen <zjs...@apache.org>
Committed: Fri May 8 17:32:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java    |  4 ++++
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 22 ++++++++++++++++++++
 3 files changed, 29 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c9e5b87..4decfd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -690,6 +690,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6576. Datanode log is generating at root directory in security mode
     (surendra singh lilhore via vinayakumarb)
 
+    HDFS-3384. DataStreamer thread should be closed immediatly when failed to
+    setup a PipelineForAppendOrRecovery (Uma Maheswara Rao G via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 96bf212..697ee11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -460,6 +460,9 @@ class DataStreamer extends Daemon {
             LOG.debug("Append to block " + block);
           }
           setupPipelineForAppendOrRecovery();
+          if (true == streamerClosed) {
+            continue;
+          }
           initDataStreaming();
         }
 
@@ -571,6 +574,7 @@ class DataStreamer extends Daemon {
           }
         }
         lastException.set(e);
+        assert !(e instanceof NullPointerException);
         hasError = true;
         if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
           // Not a datanode issue

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 6a7c3ea..402c944 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -602,4 +602,26 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
+  
+  @Test(timeout = 10000)
+  public void testAppendCorruptedBlock() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    conf.setInt("dfs.min.replication", 1);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    try {
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path fileName = new Path("/appendCorruptBlock");
+      DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
+      DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+      Assert.assertTrue("File not created", fs.exists(fileName));
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      cluster.corruptBlockOnDataNodes(block);
+      DFSTestUtil.appendFile(fs, fileName, "appendCorruptBlock");
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Reply via email to