Author: szetszwo
Date: Wed May 23 18:01:11 2012
New Revision: 1341962
URL: http://svn.apache.org/viewvc?rev=1341962&view=rev
Log:
svn merge -c 1341961 from trunk for HDFS-3436. In
DataNode.transferReplicaForPipelineRecovery(..), it should use the stored
generation stamp to check if the block is valid.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props
changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1341961
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1341962&r1=1341961&r2=1341962&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Wed May 23 18:01:11 2012
@@ -97,6 +97,13 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-2717. BookKeeper Journal output stream doesn't check addComplete rc.
(Ivan Kelly via umamahesh)
+ HDFS-3415. Make sure all layout versions are the same for all storage
+ directories in the Namenode. (Brandon Li via szetszwo)
+
+ HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should
+ use the stored generation stamp to check if the block is valid. (Vinay
+ via szetszwo)
+
Release 2.0.0-alpha - UNRELEASED
INCOMPATIBLE CHANGES
@@ -619,9 +626,6 @@ Release 2.0.0-alpha - UNRELEASED
HDFS-860. fuse-dfs truncate behavior causes issues with scp.
(Brian Bockelman via eli)
- HDFS-3415. Make sure all layout versions are the same for all storage
- directories in the Namenode. (Brandon Li via szetszwo)
-
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1341961
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1341962&r1=1341961&r2=1341962&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Wed May 23 18:01:11 2012
@@ -2032,6 +2032,18 @@ public class DataNode extends Configured
//get replica information
synchronized(data) {
+ Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
+ b.getBlockId());
+ if (null == storedBlock) {
+ throw new IOException(b + " not found in datanode.");
+ }
+ storedGS = storedBlock.getGenerationStamp();
+ if (storedGS < b.getGenerationStamp()) {
+ throw new IOException(storedGS
+ + " = storedGS < b.getGenerationStamp(), b=" + b);
+ }
+ // Update the genstamp with storedGS
+ b.setGenerationStamp(storedGS);
if (data.isValidRbw(b)) {
stage = BlockConstructionStage.TRANSFER_RBW;
} else if (data.isValidBlock(b)) {
@@ -2040,18 +2052,9 @@ public class DataNode extends Configured
final String r = data.getReplicaString(b.getBlockPoolId(),
b.getBlockId());
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
}
-
- storedGS = data.getStoredBlock(b.getBlockPoolId(),
- b.getBlockId()).getGenerationStamp();
- if (storedGS < b.getGenerationStamp()) {
- throw new IOException(
- storedGS + " = storedGS < b.getGenerationStamp(), b=" + b);
- }
visible = data.getReplicaVisibleLength(b);
}
-
- //set storedGS and visible length
- b.setGenerationStamp(storedGS);
+ //set visible length
b.setNumBytes(visible);
if (targets.length > 0) {
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java?rev=1341962&r1=1341961&r2=1341962&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
Wed May 23 18:01:11 2012
@@ -176,4 +176,32 @@ public class TestFileAppendRestart {
cluster.shutdown();
}
}
+
+ /**
+ * Test to append to the file, when one of datanode in the existing pipeline
is down.
+ * @throws Exception
+ */
+ @Test
+ public void testAppendWithPipelineRecovery() throws Exception {
+ Configuration conf = new Configuration();
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true)
+ .manageNameDfsDirs(true).numDataNodes(4)
+ .racks(new String[] { "/rack1", "/rack1", "/rack1", "/rack2" })
+ .build();
+ cluster.waitActive();
+
+ DistributedFileSystem fs = cluster.getFileSystem();
+ Path path = new Path("/test1");
+ DFSTestUtil.createFile(fs, path, 1024, (short) 3, 1l);
+
+ cluster.stopDataNode(3);
+ DFSTestUtil.appendFile(fs, path, "hello");
+ } finally {
+ if (null != cluster) {
+ cluster.shutdown();
+ }
+ }
+ }
}