Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b7f7fb003 -> 9873eb63a
  refs/heads/branch-2.9 52281fd89 -> 3487f2b72


HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one 
namenode is up. Contributed by Jianfei Jiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9873eb63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9873eb63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9873eb63

Branch: refs/heads/branch-2
Commit: 9873eb63a7525301ab601a0ae65f7e615d1a6bce
Parents: b7f7fb0
Author: Brahma Reddy Battula <bra...@apache.org>
Authored: Thu Feb 8 18:33:11 2018 +0530
Committer: Brahma Reddy Battula <bra...@apache.org>
Committed: Thu Feb 8 18:33:11 2018 +0530

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 192 +++++---
 .../hadoop/hdfs/tools/TestDFSAdminWithHA.java   | 464 ++++++++++++++++++-
 4 files changed, 601 insertions(+), 66 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 043e087..67fdc04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import 
org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -320,6 +321,7 @@ public class HAUtil {
    */
   public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
       throws IOException {
+    List<IOException> exceptions = new ArrayList<>();
     for (ClientProtocol namenode : namenodes) {
       try {
         namenode.getFileInfo("/");
@@ -329,10 +331,15 @@ public class HAUtil {
         if (cause instanceof StandbyException) {
           // This is expected to happen for a standby NN.
         } else {
-          throw re;
+          exceptions.add(re);
         }
+      } catch (IOException ioe) {
+        exceptions.add(ioe);
       }
     }
+    if(!exceptions.isEmpty()){
+      throw MultipleIOException.createIOException(exceptions);
+    }
     return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 50e9420..e724db7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4265,7 +4265,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   void setBalancerBandwidth(long bandwidth) throws IOException {
-    checkOperation(OperationCategory.UNCHECKED);
+    checkOperation(OperationCategory.WRITE);
     checkSuperuserPrivilege();
     getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 1264fa0..1e877ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -49,7 +48,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.shell.Command;
@@ -76,12 +74,12 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
-import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -768,15 +766,25 @@ public class DFSAdmin extends FsShell {
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
     boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
+    List<IOException> exceptions = new ArrayList<>();
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().saveNamespace();
-        System.out.println("Save namespace successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().saveNamespace();
+          System.out.println("Save namespace successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Save namespace failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.saveNamespace();
@@ -818,10 +826,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        Boolean res = proxy.getProxy().restoreFailedStorage(arg);
-        System.out.println("restoreFailedStorage is set to " + res + " for "
-            + proxy.getAddress());
+        try{
+          Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+          System.out.println("restoreFailedStorage is set to " + res + " for "
+              + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("restoreFailedStorage failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       Boolean res = dfs.restoreFailedStorage(arg);
@@ -851,10 +869,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
-        proxy.getProxy().refreshNodes();
-        System.out.println("Refresh nodes successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().refreshNodes();
+          System.out.println("Refresh nodes successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh nodes failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.refreshNodes();
@@ -873,21 +901,15 @@ public class DFSAdmin extends FsShell {
    */
   public int listOpenFiles() throws IOException {
     DistributedFileSystem dfs = getDFS();
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
     RemoteIterator<OpenFileEntry> openFilesRemoteIterator;
-    if (isHaEnabled) {
-      ProxyAndInfo<ClientProtocol> proxy = NameNodeProxies.createNonHAProxy(
-          dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class,
-          UserGroupInformation.getCurrentUser(), false);
-      openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(),
-          FsTracer.get(dfsConf));
-    } else {
+
+    try{
       openFilesRemoteIterator = dfs.listOpenFiles();
+      printOpenFiles(openFilesRemoteIterator);
+    } catch (IOException ioe){
+      System.out.println("List open files failed.");
+      throw ioe;
     }
-    printOpenFiles(openFilesRemoteIterator);
     return 0;
   }
 
@@ -905,8 +927,7 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
-   * Command to ask the namenode to set the balancer bandwidth for all of the
-   * datanodes.
+   * Command to ask the active namenode to set the balancer bandwidth.
    * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
    * @param argv List of of command line parameters.
    * @param idx The index of the command that is being processed.
@@ -937,27 +958,15 @@ public class DFSAdmin extends FsShell {
     }
 
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
-    Configuration dfsConf = dfs.getConf();
-    URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
-
-    if (isHaEnabled) {
-      String nsId = dfsUri.getHost();
-      List<ProxyAndInfo<ClientProtocol>> proxies =
-          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
-          nsId, ClientProtocol.class);
-      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().setBalancerBandwidth(bandwidth);
-        System.out.println("Balancer bandwidth is set to " + bandwidth +
-            " for " + proxy.getAddress());
-      }
-    } else {
+    try{
       dfs.setBalancerBandwidth(bandwidth);
       System.out.println("Balancer bandwidth is set to " + bandwidth);
+    } catch (IOException ioe){
+      System.err.println("Balancer bandwidth is set failed.");
+      throw ioe;
     }
-    exitCode = 0;
 
-    return exitCode;
+    return 0;
   }
 
   /**
@@ -1304,10 +1313,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().finalizeUpgrade();
-        System.out.println("Finalize upgrade successful for " +
-            proxy.getAddress());
+        try{
+          proxy.getProxy().finalizeUpgrade();
+          System.out.println("Finalize upgrade successful for " +
+              proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Finalize upgrade failed for " +
+              proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.finalizeUpgrade();
@@ -1337,10 +1356,21 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().metaSave(pathname);
-        System.out.println("Created metasave file " + pathname + " in the log "
-            + "directory of namenode " + proxy.getAddress());
+        try{
+          proxy.getProxy().metaSave(pathname);
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress());
+        } catch (IOException ioe){
+          System.out.println("Created metasave file " + pathname
+              + " in the log directory of namenode " + proxy.getAddress()
+              + " failed");
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       dfs.metaSave(pathname);
@@ -1425,10 +1455,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshAuthorizationPolicyProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
-        proxy.getProxy().refreshServiceAcl();
-        System.out.println("Refresh service acl successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshServiceAcl();
+          System.out.println("Refresh service acl successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh service acl failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()) {
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1468,10 +1508,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshUserToGroupsMappings();
-        System.out.println("Refresh user to groups mapping successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshUserToGroupsMappings();
+          System.out.println("Refresh user to groups mapping successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh user to groups mapping failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1513,10 +1563,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshUserMappingsProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
-        proxy.getProxy().refreshSuperUserGroupsConfiguration();
-        System.out.println("Refresh super user groups configuration " +
-            "successful for " + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshSuperUserGroupsConfiguration();
+          System.out.println("Refresh super user groups configuration " +
+              "successful for " + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh super user groups configuration " +
+              "failed for " + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client
@@ -1552,10 +1612,20 @@ public class DFSAdmin extends FsShell {
       List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
               RefreshCallQueueProtocol.class);
+      List<IOException> exceptions = new ArrayList<>();
       for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
-        proxy.getProxy().refreshCallQueue();
-        System.out.println("Refresh call queue successful for "
-            + proxy.getAddress());
+        try{
+          proxy.getProxy().refreshCallQueue();
+          System.out.println("Refresh call queue successful for "
+              + proxy.getAddress());
+        }catch (IOException ioe){
+          System.out.println("Refresh call queue failed for "
+              + proxy.getAddress());
+          exceptions.add(ioe);
+        }
+      }
+      if(!exceptions.isEmpty()){
+        throw MultipleIOException.createIOException(exceptions);
       }
     } else {
       // Create the client

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9873eb63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 74f5e7a..97daf09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -33,6 +33,7 @@ import org.junit.After;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -50,7 +51,7 @@ public class TestDFSAdminWithHA {
   private static String newLine = System.getProperty("line.separator");
 
   private void assertOutputMatches(String string) {
-    String errOutput = new String(out.toByteArray(), Charsets.UTF_8);
+    String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
     String output = new String(out.toByteArray(), Charsets.UTF_8);
 
     if (!errOutput.matches(string) && !output.matches(string)) {
@@ -156,6 +157,60 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testSaveNamespaceNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(1);
+//
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace successful for.*" + newLine
+        + "Save namespace failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*" + newLine
+        + "Save namespace successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testSaveNamespaceNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    // Safe mode should be turned ON in order to create namespace image.
+    int exitCode = admin.run(new String[] {"-safemode", "enter"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Safe mode is ON in.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+
+    exitCode = admin.run(new String[] {"-saveNamespace"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "Save namespace failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRestoreFailedStorage() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
@@ -176,6 +231,76 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to true for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage is set to false for.*" + newLine
+        + "restoreFailedStorage failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    // Default is false
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to true for.*" + newLine;
+    assertOutputMatches(message);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*" + newLine
+        + "restoreFailedStorage is set to false for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRestoreFailedStorageNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "restoreFailedStorage failed for.*";
+    // Default is false
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+
+    exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    message = "restoreFailedStorage failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshNodes() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
@@ -185,12 +310,81 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshNodesNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes successful for.*" + newLine
+        + "Refresh nodes failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*" + newLine
+        + "Refresh nodes successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshNodesNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshNodes"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh nodes failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testSetBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
+    cluster.getDfsCluster().transitionToActive(0);
+
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
     assertEquals(err.toString().trim(), 0, exitCode);
-    String message = "Balancer bandwidth is set to 10 for.*";
-    assertOutputMatches(message + newLine + message + newLine);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set to 10";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test
+  public void testSetBalancerBandwidthNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Balancer bandwidth is set failed." + newLine
+        + ".*" + newLine;
+    assertOutputMatches(message);
   }
 
   @Test (timeout = 30000)
@@ -211,6 +405,44 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testMetaSaveNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed" + newLine
+        + "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testMetaSaveNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Created metasave file dfs.meta in the log directory"
+        + " of namenode.*failed";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshServiceAcl() throws Exception {
     setUpHaCluster(true);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
@@ -220,6 +452,40 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1UpNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl successful for.*" + newLine
+        + "Refresh service acl failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Up() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*" + newLine
+        + "Refresh service acl successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshServiceAclNN1DownNN2Down() throws Exception {
+    setUpHaCluster(true);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh service acl failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+
+  @Test (timeout = 30000)
   public void testRefreshUserToGroupsMappings() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
@@ -229,6 +495,43 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping successful for.*"
+        + newLine
+        + "Refresh user to groups mapping failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*"
+        + newLine
+        + "Refresh user to groups mapping successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception 
{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh user to groups mapping failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(
@@ -239,6 +542,49 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration successful for.*"
+        + newLine
+        + "Refresh super user groups configuration failed for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*"
+        + newLine
+        + "Refresh super user groups configuration successful for.*"
+        + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down()
+      throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(
+        new String[] {"-refreshSuperUserGroupsConfiguration"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh super user groups configuration failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
   public void testRefreshCallQueue() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
@@ -246,4 +592,116 @@ public class TestDFSAdminWithHA {
     String message = "Refresh call queue successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue successful for.*" + newLine
+        + "Refresh call queue failed for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*" + newLine
+        + "Refresh call queue successful for.*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testRefreshCallQueueNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-refreshCallQueue"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Refresh call queue failed for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgrade() throws Exception {
+    setUpHaCluster(false);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*Cannot finalize with no NameNode active";
+    assertOutputMatches(message + newLine);
+
+    cluster.getDfsCluster().transitionToActive(0);
+    exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+    message = "Finalize upgrade successful for.*";
+    assertOutputMatches(message + newLine + message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1UpNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade successful for .*" + newLine
+        + "Finalize upgrade failed for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Up() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = "Finalize upgrade failed for .*" + newLine
+        + "Finalize upgrade successful for .*" + newLine;
+    assertOutputMatches(message);
+  }
+
+  @Test (timeout = 30000)
+  public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*2 exceptions.*";
+    assertOutputMatches(message + newLine);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1UpNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    cluster.getDfsCluster().transitionToActive(0);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test (timeout = 30000)
+  public void testListOpenFilesNN1DownNN2Up() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().transitionToActive(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertEquals(err.toString().trim(), 0, exitCode);
+  }
+
+  @Test
+  public void testListOpenFilesNN1DownNN2Down() throws Exception{
+    setUpHaCluster(false);
+    cluster.getDfsCluster().shutdownNameNode(0);
+    cluster.getDfsCluster().shutdownNameNode(1);
+    int exitCode = admin.run(new String[] {"-listOpenFiles"});
+    assertNotEquals(err.toString().trim(), 0, exitCode);
+    String message = ".*" + newLine + "List open files failed." + newLine;
+    assertOutputMatches(message);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to