Author: atm
Date: Mon Jan 7 21:50:28 2013
New Revision: 1430038
URL: http://svn.apache.org/viewvc?rev=1430038&view=rev
Log:
HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad VERSION
file. Contributed by Vinay and Andrew Wang.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1430038&r1=1430037&r2=1430038&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Mon Jan 7 21:50:28 2013
@@ -357,6 +357,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4302. Fix fatal exception when starting NameNode with DEBUG logs
(Eugene Koontz via todd)
+ HDFS-3970. Fix bug causing rollback of HDFS upgrade to result in bad
+ VERSION file. (Vinay and Andrew Wang via atm)
+
BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java?rev=1430038&r1=1430037&r2=1430038&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
Mon Jan 7 21:50:28 2013
@@ -78,6 +78,10 @@ public class BlockPoolSliceStorage exten
this.clusterID = clusterId;
}
+ private BlockPoolSliceStorage() {
+ super(NodeType.DATA_NODE);
+ }
+
/**
* Analyze storage directories. Recover from previous transitions if
required.
*
@@ -378,7 +382,7 @@ public class BlockPoolSliceStorage exten
if (!prevDir.exists())
return;
// read attributes out of the VERSION file of previous directory
- DataStorage prevInfo = new DataStorage();
+ BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
prevInfo.readPreviousVersionProperties(bpSd);
// We allow rollback to a state, which is either consistent with
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1430038&r1=1430037&r2=1430038&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
Mon Jan 7 21:50:28 2013
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import static
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
import static
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
@@ -31,6 +32,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
@@ -176,6 +178,44 @@ public class TestDFSRollback {
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+ log("Normal BlockPool rollback", numDirs);
+ UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
+ UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ .format(false)
+ .manageDataDfsDirs(false)
+ .manageNameDfsDirs(false)
+
.startupOption(StartupOption.ROLLBACK)
+ .build();
+ UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
+ UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
+ // Create a previous snapshot for the blockpool
+ UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
+ // Older LayoutVersion to make it rollback
+ storageInfo = new StorageInfo(
+ UpgradeUtilities.getCurrentLayoutVersion()+1,
+ UpgradeUtilities.getCurrentNamespaceID(cluster),
+ UpgradeUtilities.getCurrentClusterID(cluster),
+ UpgradeUtilities.getCurrentFsscTime(cluster));
+ // Create old VERSION file for each data dir
+ for (int i=0; i<dataNodeDirs.length; i++) {
+ Path bpPrevPath = new Path(dataNodeDirs[i] + "/current/"
+ + UpgradeUtilities.getCurrentBlockPoolID(cluster));
+ UpgradeUtilities.createBlockPoolVersionFile(
+ new File(bpPrevPath.toString()),
+ storageInfo,
+ UpgradeUtilities.getCurrentBlockPoolID(cluster));
+ }
+
+ cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
+ assertTrue(cluster.isDataNodeUp());
+
+ cluster.shutdown();
+ UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+ UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode rollback without existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");