This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 853eafa81afcb17e27a14807af27d7f0af2d5dcd
Author: Ayush Saxena <ayushsax...@apache.org>
AuthorDate: Thu Mar 19 21:23:13 2020 +0530

    HDFS-15223. FSCK fails if one namenode is not available. Contributed by 
Ayush Saxena.
    
    (cherry picked from commit bb41ddaf1e0c9bf44830b2cf0ac653b7354abf46)
---
 .../src/main/java/org/apache/hadoop/hdfs/HAUtil.java      | 15 ++++++++++++---
 .../apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java |  7 ++++++-
 2 files changed, 18 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 79275b0..aebc28a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -57,10 +57,14 @@ import org.apache.hadoop.security.UserGroupInformation;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
+import org.slf4j.LoggerFactory;
 
 @InterfaceAudience.Private
 public class HAUtil {
-  
+
+  public static final org.slf4j.Logger LOG =
+      LoggerFactory.getLogger(HAUtil.class.getName());
+
   private static final String[] HA_SPECIAL_INDEPENDENT_KEYS = new String[]{
     DFS_NAMENODE_RPC_ADDRESS_KEY,
     DFS_NAMENODE_RPC_BIND_HOST_KEY,
@@ -273,8 +277,13 @@ public class HAUtil {
       List<ClientProtocol> namenodes =
           getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
       for (ClientProtocol proxy : namenodes) {
-        if (proxy.getHAServiceState().equals(HAServiceState.ACTIVE)) {
-          inAddr = RPC.getServerAddress(proxy);
+        try {
+          if (proxy.getHAServiceState().equals(HAServiceState.ACTIVE)) {
+            inAddr = RPC.getServerAddress(proxy);
+          }
+        } catch (Exception e) {
+          //Ignore the exception while connecting to a namenode.
+          LOG.debug("Error while connecting to namenode", e);
         }
       }
     } else {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
index cc8ead1..46ebb8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java
@@ -75,7 +75,12 @@ public class TestHAFsck {
       
       cluster.transitionToStandby(0);
       cluster.transitionToActive(1);
-      
+
+      runFsck(conf);
+      // Stop one standby namenode, FSCK should still be successful, since 
there
+      // is one Active namenode available
+      cluster.getNameNode(0).stop();
+
       runFsck(conf);
     } finally {
       if (fs != null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to