HDFS-11085. Add unit test for NameNode failing to start when name dir is 
unwritable. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c0ab102
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c0ab102
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c0ab102

Branch: refs/heads/YARN-5355
Commit: 0c0ab102ab392ba07ed2aa8d8a67eef4c20cad9b
Parents: de01327
Author: Mingliang Liu <lium...@apache.org>
Authored: Fri Nov 4 14:34:40 2016 -0700
Committer: Mingliang Liu <lium...@apache.org>
Committed: Fri Nov 4 14:34:49 2016 -0700

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestStartup.java       | 55 +++++++++++++++++++-
 1 file changed, 54 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c0ab102/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 4b6c0bd..5da19a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -19,8 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption.IMPORT;
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -29,6 +33,8 @@ import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
+import java.nio.file.Paths;
+import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
@@ -49,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -108,7 +115,7 @@ public class TestStartup {
         fileAsURI(new File(hdfsDir, "secondary")).toString());
     config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
               WILDCARD_HTTP_HOST + "0");
-    
+
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
   }
 
@@ -645,6 +652,52 @@ public class TestStartup {
     }
   }
 
+  @Test(timeout = 30000)
+  public void testNNFailToStartOnReadOnlyNNDir() throws Exception {
+    /* set NN dir */
+    final String nnDirStr = Paths.get(
+        hdfsDir.toString(),
+        GenericTestUtils.getMethodName(), "name").toString();
+    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nnDirStr);
+
+    try(MiniDFSCluster cluster = new MiniDFSCluster.Builder(config)
+        .numDataNodes(1)
+        .manageNameDfsDirs(false)
+        .build()) {
+      cluster.waitActive();
+
+      /* get and verify NN dir */
+      final Collection<URI> nnDirs = FSNamesystem.getNamespaceDirs(config);
+      assertNotNull(nnDirs);
+      assertTrue(nnDirs.iterator().hasNext());
+      assertEquals(
+          "NN dir should be created after NN startup.",
+          nnDirStr,
+          nnDirs.iterator().next().getPath());
+      final File nnDir = new File(nnDirStr);
+      assertTrue(nnDir.exists());
+      assertTrue(nnDir.isDirectory());
+
+      try {
+        /* set read only */
+        assertTrue(
+            "Setting NN dir read only should succeed.",
+            nnDir.setReadOnly());
+        cluster.restartNameNodes();
+        fail("Restarting NN should fail on read only NN dir.");
+      } catch (InconsistentFSStateException e) {
+        assertThat(e.toString(), is(allOf(
+            containsString("InconsistentFSStateException"),
+            containsString(nnDirStr),
+            containsString("in an inconsistent state"),
+            containsString(
+                "storage directory does not exist or is not accessible."))));
+      } finally {
+        /* set back to writable in order to clean it */
+        assertTrue("Setting NN dir should succeed.", nnDir.setWritable(true));
+      }
+    }
+  }
 
   /**
    * Verify the following scenario.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to