Author: umamahesh
Date: Tue May 1 05:47:27 2012
New Revision: 1332527
URL: http://svn.apache.org/viewvc?rev=1332527&view=rev
Log:
HDFS-3275. Skip format for non-file based directories. Contributed by Amith D K.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1332527&r1=1332526&r2=1332527&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Tue May 1 05:47:27 2012
@@ -25,6 +25,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.URI;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
@@ -674,10 +675,14 @@ public class NameNode {
initializeGenericKeys(conf, nsId, namenodeId);
checkAllowFormat(conf);
- Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+ Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+ List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
+ List<URI> dirsToPrompt = new ArrayList<URI>();
+ dirsToPrompt.addAll(nameDirsToFormat);
+ dirsToPrompt.addAll(sharedDirs);
List<URI> editDirsToFormat =
FSNamesystem.getNamespaceEditsDirs(conf);
- if (!confirmFormat(dirsToFormat, force, isInteractive)) {
+ if (!confirmFormat(dirsToPrompt, force, isInteractive)) {
return true; // aborted
}
@@ -689,7 +694,7 @@ public class NameNode {
}
System.out.println("Formatting using clusterid: " + clusterId);
- FSImage fsImage = new FSImage(conf, dirsToFormat, editDirsToFormat);
+ FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsImage.format(fsn, clusterId);
return false;
@@ -711,7 +716,18 @@ public class NameNode {
boolean force, boolean interactive)
throws IOException {
for(Iterator<URI> it = dirsToFormat.iterator(); it.hasNext();) {
- File curDir = new File(it.next().getPath());
+ URI dirUri = it.next();
+ if (!dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
+ System.err.println("Skipping format for directory \"" + dirUri
+ + "\". Can only format local directories with scheme \""
+ + NNStorage.LOCAL_URI_SCHEME + "\".");
+ continue;
+ }
+ // To validate only file based schemes are formatted
+ assert dirUri.getScheme().equals(NNStorage.LOCAL_URI_SCHEME) :
+ "formatting is not supported for " + dirUri;
+
+ File curDir = new File(dirUri.getPath());
// Its alright for a dir not to exist, or to exist (properly accessible)
// and be completely empty.
if (!curDir.exists() ||
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java?rev=1332527&r1=1332526&r2=1332527&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
Tue May 1 05:47:27 2012
@@ -27,13 +27,19 @@ import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
+import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import
org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.util.StringUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -144,4 +150,34 @@ public class TestAllowFormat {
NameNode.format(config);
LOG.info("Done verifying format will succeed with allowformat true");
}
+
+ /**
+ * Test to skip format for non file scheme directory configured
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ String logicalName = "mycluster";
+
+ // DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
+ // is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
+ // is considered.
+ String localhost = "127.0.0.1";
+ InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
+ InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
+ HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+
+ conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
+ conf.set(DFSUtil.addKeySuffixes(
+ DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
+ DummyJournalManager.class.getName());
+ conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
+ + localhost + ":2181/ledgers");
+ conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
+
+ // An internal assert is added to verify the working of test
+ NameNode.format(conf);
+ }
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java?rev=1332527&r1=1332526&r2=1332527&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
Tue May 1 05:47:27 2012
@@ -167,6 +167,15 @@ public abstract class HATestUtil {
Configuration conf, String logicalName, int nsIndex) {
InetSocketAddress nnAddr1 = cluster.getNameNode(2 *
nsIndex).getNameNodeAddress();
InetSocketAddress nnAddr2 = cluster.getNameNode(2 * nsIndex +
1).getNameNodeAddress();
+ setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
+ }
+
+ /**
+ * Sets the required configurations for performing failover
+ */
+ public static void setFailoverConfigurations(Configuration conf,
+ String logicalName, InetSocketAddress nnAddr1,
+ InetSocketAddress nnAddr2) {
String nameNodeId1 = "nn1";
String nameNodeId2 = "nn2";
String address1 = "hdfs://" + nnAddr1.getHostName() + ":" +
nnAddr1.getPort();