This is an automated email from the ASF dual-hosted git repository.

cliang pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 82f68a47c4f259aeb4585623485e309b4319c430
Author: Chen Liang <[email protected]>
AuthorDate: Wed Dec 12 10:39:39 2018 -0800

    HDFS-14142. Move ipfailover config key out of HdfsClientConfigKeys. 
Contributed by Chen Liang.
---
 .../java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java | 3 ---
 .../namenode/ha/ObserverReadProxyProviderWithIPFailover.java     | 9 +++++----
 2 files changed, 5 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 00fb12d..52a7cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -181,9 +181,6 @@ public interface HdfsClientConfigKeys {
   String DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES =
       "dfs.namenode.snapshot.capture.openfiles";
   boolean DFS_NAMENODE_SNAPSHOT_CAPTURE_OPENFILES_DEFAULT = false;
-  
-  String DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS =
-      Failover.PREFIX + "ipfailover.virtual-address";
 
   /**
    * These are deprecated config keys to client code.
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
index 22f6dd3..fc12386 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ObserverReadProxyProviderWithIPFailover.java
@@ -22,13 +22,12 @@ import java.net.URI;
 
 import java.util.Collections;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS;
-
 /**
  * Extends {@link ObserverReadProxyProvider} to support NameNode IP failover.
  *
@@ -81,6 +80,9 @@ public class ObserverReadProxyProviderWithIPFailover<T 
extends ClientProtocol>
   private static final Logger LOG = LoggerFactory.getLogger(
       ObserverReadProxyProviderWithIPFailover.class);
 
+  private static final String IPFAILOVER_CONFIG_PREFIX =
+      HdfsClientConfigKeys.Failover.PREFIX + "ipfailover.virtual-address";
+
   /**
    * By default ObserverReadProxyProviderWithIPFailover
    * uses {@link IPFailoverProxyProvider} for failover.
@@ -123,8 +125,7 @@ public class ObserverReadProxyProviderWithIPFailover<T 
extends ClientProtocol>
 
   private static URI getFailoverVirtualIP(
       Configuration conf, String nameServiceID) {
-    String configKey = DFS_CLIENT_FAILOVER_IPFAILOVER_VIRTUAL_ADDRESS
-        + "." + nameServiceID;
+    String configKey = IPFAILOVER_CONFIG_PREFIX + "." + nameServiceID;
     String virtualIP = conf.get(configKey);
     LOG.info("Name service ID {} will use virtual IP {} for failover",
         nameServiceID, virtualIP);


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to