hadoop git commit: HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)

2015-05-08 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1ffb7fa42 - c648317a6


HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup 
a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c648317a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c648317a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c648317a

Branch: refs/heads/trunk
Commit: c648317a68891e1c900f04b7a9c98ba40c5faddb
Parents: 1ffb7fa
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 17:18:14 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 17:18:14 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  4 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 22 
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c648317a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c9e5b87..4decfd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -690,6 +690,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6576. Datanode log is generating at root directory in security mode
 (surendra singh lilhore via vinayakumarb)
 
+HDFS-3384. DataStreamer thread should be closed immediatly when failed to
+setup a PipelineForAppendOrRecovery (Uma Maheswara Rao G via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c648317a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 96bf212..697ee11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -460,6 +460,9 @@ class DataStreamer extends Daemon {
 LOG.debug(Append to block  + block);
   }
   setupPipelineForAppendOrRecovery();
+  if (true == streamerClosed) {
+continue;
+  }
   initDataStreaming();
 }
 
@@ -571,6 +574,7 @@ class DataStreamer extends Daemon {
   }
 }
 lastException.set(e);
+assert !(e instanceof NullPointerException);
 hasError = true;
 if (errorIndex == -1  restartingNodeIndex.get() == -1) {
   // Not a datanode issue

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c648317a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 6a7c3ea..402c944 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -602,4 +602,26 @@ public class TestFileAppend{
   cluster.shutdown();
 }
   }
+  
+  @Test(timeout = 1)
+  public void testAppendCorruptedBlock() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setInt(dfs.min.replication, 1);
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+try {
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path fileName = new Path(/appendCorruptBlock);
+  DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
+  DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+  Assert.assertTrue(File not created, fs.exists(fileName));
+  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+  cluster.corruptBlockOnDataNodes(block);
+  DFSTestUtil.appendFile(fs, fileName, appendCorruptBlock);
+} finally {
+  cluster.shutdown();
+}
+  }
 }



hadoop git commit: HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)

2015-05-08 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2c3d79023 - d1293


HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup 
a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)

(cherry picked from commit c648317a68891e1c900f04b7a9c98ba40c5faddb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d129

Branch: refs/heads/branch-2
Commit: d12937eadc0d096c0ca5f2c86cb57e148b75
Parents: 2c3d790
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 17:18:14 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 8 17:18:59 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  4 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 22 
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d129/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 56f419c..0cc149c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -363,6 +363,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6576. Datanode log is generating at root directory in security mode
 (surendra singh lilhore via vinayakumarb)
 
+HDFS-3384. DataStreamer thread should be closed immediatly when failed to
+setup a PipelineForAppendOrRecovery (Uma Maheswara Rao G via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d129/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 96bf212..697ee11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -460,6 +460,9 @@ class DataStreamer extends Daemon {
 LOG.debug(Append to block  + block);
   }
   setupPipelineForAppendOrRecovery();
+  if (true == streamerClosed) {
+continue;
+  }
   initDataStreaming();
 }
 
@@ -571,6 +574,7 @@ class DataStreamer extends Daemon {
   }
 }
 lastException.set(e);
+assert !(e instanceof NullPointerException);
 hasError = true;
 if (errorIndex == -1  restartingNodeIndex.get() == -1) {
   // Not a datanode issue

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d129/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 04f523e..622b349 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -603,4 +603,26 @@ public class TestFileAppend{
   cluster.shutdown();
 }
   }
+  
+  @Test(timeout = 1)
+  public void testAppendCorruptedBlock() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setInt(dfs.min.replication, 1);
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+try {
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path fileName = new Path(/appendCorruptBlock);
+  DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
+  DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+  Assert.assertTrue(File not created, fs.exists(fileName));
+  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+  cluster.corruptBlockOnDataNodes(block);
+  DFSTestUtil.appendFile(fs, fileName, appendCorruptBlock);
+} finally {
+  cluster.shutdown();
+}
+  }
 }



[07/50] hadoop git commit: HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)

2015-05-08 Thread zjshen
HDFS-3384. DataStreamer thread should be closed immediatly when failed to setup 
a PipelineForAppendOrRecovery (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cf8db41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cf8db41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cf8db41

Branch: refs/heads/YARN-2928
Commit: 2cf8db41489c177068d57e0359ab10735f1666d7
Parents: e8f979b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 8 17:18:14 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Fri May 8 17:32:46 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  4 
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 22 
 3 files changed, 29 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c9e5b87..4decfd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -690,6 +690,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6576. Datanode log is generating at root directory in security mode
 (surendra singh lilhore via vinayakumarb)
 
+HDFS-3384. DataStreamer thread should be closed immediatly when failed to
+setup a PipelineForAppendOrRecovery (Uma Maheswara Rao G via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 96bf212..697ee11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -460,6 +460,9 @@ class DataStreamer extends Daemon {
 LOG.debug(Append to block  + block);
   }
   setupPipelineForAppendOrRecovery();
+  if (true == streamerClosed) {
+continue;
+  }
   initDataStreaming();
 }
 
@@ -571,6 +574,7 @@ class DataStreamer extends Daemon {
   }
 }
 lastException.set(e);
+assert !(e instanceof NullPointerException);
 hasError = true;
 if (errorIndex == -1  restartingNodeIndex.get() == -1) {
   // Not a datanode issue

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cf8db41/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 6a7c3ea..402c944 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -602,4 +602,26 @@ public class TestFileAppend{
   cluster.shutdown();
 }
   }
+  
+  @Test(timeout = 1)
+  public void testAppendCorruptedBlock() throws Exception {
+Configuration conf = new HdfsConfiguration();
+conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+conf.setInt(dfs.min.replication, 1);
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+.build();
+try {
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path fileName = new Path(/appendCorruptBlock);
+  DFSTestUtil.createFile(fs, fileName, 512, (short) 1, 0);
+  DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+  Assert.assertTrue(File not created, fs.exists(fileName));
+  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+  cluster.corruptBlockOnDataNodes(block);
+  DFSTestUtil.appendFile(fs, fileName, appendCorruptBlock);
+} finally {
+  cluster.shutdown();
+}
+  }
 }