This is an automated email from the ASF dual-hosted git repository.

brahma pushed a commit to branch HADOOP-17800
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HADOOP-17800 by this push:
     new 62e77a5  HDFS-9266.Avoid unsafe split and append on fields that might 
be IPv6 literals. Contributed by Nemanja Matkovic And Hemanth Boyina.
62e77a5 is described below

commit 62e77a5bc13358b3a6b9092f8f3f4d5c556e11b1
Author: Brahma Reddy Battula <bra...@apache.org>
AuthorDate: Fri Jul 30 08:39:51 2021 +0530

    HDFS-9266.Avoid unsafe split and append on fields that might be IPv6 
literals. Contributed by Nemanja Matkovic And Hemanth Boyina.
---
 .../hdfs/client/impl/BlockReaderFactory.java       |   3 +-
 .../org/apache/hadoop/hdfs/web/JsonUtilClient.java |   2 +-
 .../hdfs/qjournal/client/IPCLoggerChannel.java     |   5 +-
 .../server/blockmanagement/DatanodeManager.java    |   9 +-
 .../server/datanode/BlockPoolSliceStorage.java     |  16 ++-
 .../hadoop/hdfs/server/datanode/DataXceiver.java   |   2 +-
 .../hadoop/hdfs/server/namenode/Checkpointer.java  |   6 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java     |   5 +
 .../web/resources/NamenodeWebHdfsMethods.java      |  15 +--
 .../java/org/apache/hadoop/hdfs/tools/GetConf.java |   3 +-
 .../tools/offlineImageViewer/WebImageViewer.java   |   5 +-
 .../apache/hadoop/hdfs/TestDFSAddressConfig.java   |  10 +-
 .../java/org/apache/hadoop/hdfs/TestDFSUtil.java   |  49 ++++++--
 .../org/apache/hadoop/hdfs/TestFileAppend.java     |   7 +-
 .../org/apache/hadoop/hdfs/TestFileCreation.java   | 137 +++++++++++----------
 .../hdfs/client/impl/BlockReaderTestUtil.java      |   3 +-
 .../qjournal/client/TestQuorumJournalManager.java  |   6 +-
 .../server/datanode/TestBlockPoolSliceStorage.java |  28 ++++-
 .../namenode/TestNameNodeRespectsBindHostKeys.java |  79 ++++++++++--
 .../server/namenode/TestNameNodeRpcServer.java     |  12 +-
 20 files changed, 274 insertions(+), 128 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
index f9fd2b1..70545c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
@@ -66,6 +66,7 @@ import 
org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -876,6 +877,6 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
    */
   public static String getFileName(final InetSocketAddress s,
       final String poolId, final long blockId) {
-    return s.toString() + ":" + poolId + ":" + blockId;
+    return NetUtils.getSocketAddressString(s) + ":" + poolId + ":" + blockId;
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 6acd062..e302605 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -305,7 +305,7 @@ public class JsonUtilClient {
     if (ipAddr == null) {
       String name = getString(m, "name", null);
       if (name != null) {
-        int colonIdx = name.indexOf(':');
+        int colonIdx = name.lastIndexOf(':');
         if (colonIdx > 0) {
           ipAddr = name.substring(0, colonIdx);
           xferPort = Integer.parseInt(name.substring(colonIdx +1));
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 9908160..e695790 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -54,12 +54,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.ipc.ProtobufRpcEngine2;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StopWatch;
 
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
-import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
 import 
org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
 import 
org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
@@ -709,8 +709,7 @@ public class IPCLoggerChannel implements AsyncLogger {
 
   @Override
   public String toString() {
-    return InetAddresses.toAddrString(addr.getAddress()) + ':' +
-        addr.getPort();
+    return NetUtils.getHostPortString(addr);
   }
 
   @Override
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 68ee16c..dd3d23b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -56,6 +56,7 @@ import 
org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.http.conn.util.InetAddressUtils;
 import org.apache.hadoop.util.Sets;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -1522,7 +1523,13 @@ public class DatanodeManager {
     DatanodeID dnId;
     String hostStr;
     int port;
-    int idx = hostLine.indexOf(':');
+    int idx;
+
+    if (InetAddressUtils.isIPv6StdAddress(hostLine)) {
+      idx = -1;
+    } else {
+      idx = hostLine.lastIndexOf(':');
+    }
 
     if (-1 == idx) {
       hostStr = hostLine;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index e92ae07..40df72e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -80,11 +80,21 @@ public class BlockPoolSliceStorage extends Storage {
    *      progress. Do not delete the 'previous' directory.
    */
   static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
+  private static final String BLOCK_POOL_ID_IPV4_PATTERN_BASE =
+      "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}";
+
+  // Because we don't support ":" in path BlockPoolID on IPv6 boxes we replace
+  // ":" with ".".
+  // Also format of IPv6 is less fixed so we surround it with square brackets
+  // and just check that match
+  private static final String BLOCK_POOL_ID_IPV6_PATTERN_BASE =
+      Pattern.quote("[") + "(?:.*)" + Pattern.quote("]");
 
   private static final String BLOCK_POOL_ID_PATTERN_BASE =
-      Pattern.quote(File.separator) +
-      "BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
-      Pattern.quote(File.separator);
+      Pattern.quote(File.separator) + "BP-\\d+-(?:"
+          + BLOCK_POOL_ID_IPV4_PATTERN_BASE + "|"
+          + BLOCK_POOL_ID_IPV6_PATTERN_BASE + ")-\\d+" + Pattern
+          .quote(File.separator);
 
   private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
       "^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index a80d0c4..88ed7e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -147,7 +147,7 @@ class DataXceiver extends Receiver implements Runnable {
     this.ioFileBufferSize = 
DFSUtilClient.getIoFileBufferSize(datanode.getConf());
     this.smallBufferSize = 
DFSUtilClient.getSmallBufferSize(datanode.getConf());
     remoteAddress = peer.getRemoteAddressString();
-    final int colonIdx = remoteAddress.indexOf(':');
+    final int colonIdx = remoteAddress.lastIndexOf(':');
     remoteAddressWithoutPort =
         (colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
     localAddress = peer.getLocalAddressString();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index 618a372..252d56e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -103,9 +103,9 @@ class Checkpointer extends Daemon {
     checkpointConf = new CheckpointConf(conf);
 
     // Pull out exact http address for posting url to avoid ip aliasing issues
-    String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
-                                   DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
-    infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
+    String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+        DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
+    infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.lastIndexOf(":"));
 
     LOG.info("Checkpoint Period : " +
              checkpointConf.getPeriod() + " secs " +
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 092885a..e8a346b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
+import org.apache.http.conn.util.InetAddressUtils;
 import org.eclipse.jetty.util.ajax.JSON;
 
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -1024,6 +1025,10 @@ public class NNStorage extends Storage implements 
Closeable,
     String ip;
     try {
       ip = DNS.getDefaultIP("default");
+      if (InetAddressUtils.isIPv6StdAddress(ip)) {
+        // HDFS doesn't support ":" in path, replace it with "."
+        ip = "[" + ip.replaceAll(":", ".") + "]";
+      }
     } catch (UnknownHostException e) {
       LOG.warn("Could not find ip address of \"default\" inteface.");
       throw e;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 249bac6..72fa9b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -117,6 +117,7 @@ import org.apache.hadoop.util.StringUtils;
 
 import 
org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
+import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
 import com.sun.jersey.spi.container.ResourceFilters;
 
 /** Web-hdfs NameNode implementation. */
@@ -273,22 +274,22 @@ public class NamenodeWebHdfsMethods {
     
     HashSet<Node> excludes = new HashSet<Node>();
     if (excludeDatanodes != null) {
-      for (String host : StringUtils
+      for (String hostAndPort : StringUtils
           .getTrimmedStringCollection(excludeDatanodes)) {
-        int idx = host.indexOf(":");
+        HostAndPort hp = HostAndPort.fromString(hostAndPort);
         Node excludeNode = null;
-        if (idx != -1) {
-          excludeNode = bm.getDatanodeManager().getDatanodeByXferAddr(
-             host.substring(0, idx), Integer.parseInt(host.substring(idx + 
1)));
+        if (hp.hasPort()) {
+          excludeNode = bm.getDatanodeManager()
+              .getDatanodeByXferAddr(hp.getHost(), hp.getPort());
         } else {
-          excludeNode = bm.getDatanodeManager().getDatanodeByHost(host);
+          excludeNode = bm.getDatanodeManager().getDatanodeByHost(hostAndPort);
         }
 
         if (excludeNode != null) {
           excludes.add(excludeNode);
         } else {
           LOG.debug("DataNode {} was requested to be excluded, "
-                + "but it was not found.", host);
+              + "but it was not found.", hostAndPort);
         }
       }
     }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index aaa1038..448d993 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
@@ -245,7 +246,7 @@ public class GetConf extends Configured implements Tool {
       if (!cnnlist.isEmpty()) {
         for (ConfiguredNNAddress cnn : cnnlist) {
           InetSocketAddress rpc = cnn.getAddress();
-          tool.printOut(rpc.getHostName()+":"+rpc.getPort());
+          tool.printOut(NetUtils.getHostPortString(rpc));
         }
         return 0;
       }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
index 29ac759..b4c3dcb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
@@ -34,6 +34,7 @@ import io.netty.handler.codec.string.StringEncoder;
 import io.netty.util.concurrent.GlobalEventExecutor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -122,7 +123,9 @@ public class WebImageViewer implements Closeable {
     allChannels.add(channel);
 
     address = (InetSocketAddress) channel.localAddress();
-    LOG.info("WebImageViewer started. Listening on " + address.toString() + ". 
Press Ctrl+C to stop the viewer.");
+    LOG.info("WebImageViewer started. Listening on " + NetUtils
+        .getSocketAddressString(address) +
+        ". Press Ctrl+C to stop the viewer.");
   }
 
   /**
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
index c61c0b1..266c0fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 
@@ -55,7 +56,7 @@ public class TestDFSAddressConfig {
     ArrayList<DataNode> dns = cluster.getDataNodes();
     DataNode dn = dns.get(0);
 
-    String selfSocketAddr = dn.getXferAddress().toString();
+    String selfSocketAddr = 
NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
@@ -80,7 +81,7 @@ public class TestDFSAddressConfig {
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getXferAddress().toString();
+    selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 127.0.0.1
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -105,10 +106,11 @@ public class TestDFSAddressConfig {
     dns = cluster.getDataNodes();
     dn = dns.get(0);
 
-    selfSocketAddr = dn.getXferAddress().toString();
+    selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 0.0.0.0
-    assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
+    assertTrue(selfSocketAddr.contains("/0.0.0.0:") ||
+        selfSocketAddr.contains("/[0:0:0:0:0:0:0:0]:"));
 
     cluster.shutdown();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 9a024c3..43bd44e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -541,7 +541,7 @@ public class TestDFSUtil {
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
         NS2_NN1_HOST);
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
+          DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
         NS2_NN2_HOST);
     
     Map<String, Map<String, InetSocketAddress>> map =
@@ -550,17 +550,21 @@ public class TestDFSUtil {
     assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
     assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
     assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
-    
-    assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
-    assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
-    assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
-    assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
-    
-    assertEquals(NS1_NN1_HOST, 
+
+    assertEquals(NS1_NN1_HOST,
+        NetUtils.getHostPortString(map.get("ns1").get("ns1-nn1")));
+    assertEquals(NS1_NN2_HOST,
+        NetUtils.getHostPortString(map.get("ns1").get("ns1-nn2")));
+    assertEquals(NS2_NN1_HOST,
+        NetUtils.getHostPortString(map.get("ns2").get("ns2-nn1")));
+    assertEquals(NS2_NN2_HOST,
+        NetUtils.getHostPortString(map.get("ns2").get("ns2-nn2")));
+
+    assertEquals(NS1_NN1_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
-    assertEquals(NS1_NN2_HOST, 
+    assertEquals(NS1_NN2_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
-    assertEquals(NS2_NN1_HOST, 
+    assertEquals(NS2_NN1_HOST,
         DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
 
     // No nameservice was given and we can't determine which service addr
@@ -630,8 +634,29 @@ public class TestDFSUtil {
     Map<String, Map<String, InetSocketAddress>> map =
         DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
 
-    assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
-    assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
+    assertEquals(NS1_NN1_ADDR,
+        NetUtils.getHostPortString(map.get("ns1").get("nn1")));
+    assertEquals(NS1_NN2_ADDR,
+        NetUtils.getHostPortString(map.get("ns1").get("nn2")));
+  }
+
+  @Test
+  public void testIPv6GetHaNnHttpAddresses() throws IOException {
+    final String logicalHostName = "ns1";
+    final String ns1Nn1Addr = "[0:0:0:0:0:b00c:c0a8:12a]:8020";
+    final String ns1Nn2Addr = "[::face:a0b:182a]:8020";
+
+    Configuration conf =
+        createWebHDFSHAConfiguration(logicalHostName, ns1Nn1Addr,
+            ns1Nn2Addr);
+
+    Map<String, Map<String, InetSocketAddress>> map =
+        DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
+
+    assertEquals(ns1Nn1Addr,
+        NetUtils.getHostPortString(map.get("ns1").get("nn1")));
+    assertEquals(ns1Nn2Addr.replace("::", "0:0:0:0:0:"),
+        NetUtils.getHostPortString(map.get("ns1").get("nn2")));
   }
 
   private static Configuration createWebHDFSHAConfiguration(String 
logicalHostName, String nnaddr1, String nnaddr2) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index b65301f8..1b5b6cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -55,6 +55,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
@@ -554,7 +555,8 @@ public class TestFileAppend{
 
       // stop one datanode
       DataNodeProperties dnProp = cluster.stopDataNode(0);
-      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      String dnAddress = NetUtils.getSocketAddressString(
+          dnProp.datanode.getXferAddress());
       if (dnAddress.startsWith("/")) {
         dnAddress = dnAddress.substring(1);
       }
@@ -609,7 +611,8 @@ public class TestFileAppend{
 
       // stop one datanode
       DataNodeProperties dnProp = cluster.stopDataNode(0);
-      String dnAddress = dnProp.datanode.getXferAddress().toString();
+      String dnAddress = NetUtils
+          .getSocketAddressString(dnProp.datanode.getXferAddress());
       if (dnAddress.startsWith("/")) {
         dnAddress = dnAddress.substring(1);
       }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index a7cf68b..0c50a89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -92,11 +92,14 @@ import org.apache.hadoop.util.Time;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.event.Level;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 /**
  * This class tests various cases during file creation.
  */
 public class TestFileCreation {
+  public static final Log LOG = LogFactory.getLog(TestFileCreation.class);
   static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
 
   {
@@ -125,7 +128,7 @@ public class TestFileCreation {
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, 
int repl)
     throws IOException {
-    System.out.println("createFile: Created " + name + " with " + repl + " 
replica.");
+    LOG.info("createFile: Created " + name + " with " + repl + " replica.");
     FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
         .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
         (short) repl, blockSize);
@@ -305,8 +308,8 @@ public class TestFileCreation {
   public void testFileCreationSetLocalInterface() throws IOException {
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
 
-    // The mini cluster listens on the loopback so we can use it here
-    checkFileCreation("lo", false);
+    // Use wildcard address to force interface to be used
+    checkFileCreation("0.0.0.0", false);
 
     try {
       checkFileCreation("bogus-interface", false);
@@ -348,9 +351,9 @@ public class TestFileCreation {
       // check that / exists
       //
       Path path = new Path("/");
-      System.out.println("Path : \"" + path.toString() + "\"");
-      System.out.println(fs.getFileStatus(path).isDirectory()); 
-      assertTrue("/ should be a directory", 
+      LOG.info("Path : \"" + path.toString() + "\"");
+      LOG.info(fs.getFileStatus(path).isDirectory());
+      assertTrue("/ should be a directory",
                  fs.getFileStatus(path).isDirectory());
 
       //
@@ -358,8 +361,8 @@ public class TestFileCreation {
       //
       Path dir1 = new Path("/test_dir");
       fs.mkdirs(dir1);
-      System.out.println("createFile: Creating " + dir1.getName() + 
-        " for overwrite of existing directory.");
+      LOG.info("createFile: Creating " + dir1.getName()
+          + " for overwrite of existing directory.");
       try {
         fs.create(dir1, true); // Create path, overwrite=true
         fs.close();
@@ -379,9 +382,9 @@ public class TestFileCreation {
       FSDataOutputStream stm = createFile(fs, file1, 1);
 
       // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
+      assertTrue(file1 + " should be a file",
                  fs.getFileStatus(file1).isFile());
-      System.out.println("Path : \"" + file1 + "\"");
+      LOG.info("Path : \"" + file1 + "\"");
 
       // write to file
       writeFile(stm);
@@ -393,13 +396,13 @@ public class TestFileCreation {
       assertTrue(file1 + " should be of size " + fileSize +
                  " but found to be of size " + len, 
                   len == fileSize);
-      
+
       // verify the disk space the file occupied
       long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
       assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
           "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
-      
-      // Check storage usage 
+
+      // Check storage usage
       // can't check capacities for real storage since the OS file system may 
be changing under us.
       if (simulatedStorage) {
         DataNode dn = cluster.getDataNodes().get(0);
@@ -436,7 +439,7 @@ public class TestFileCreation {
       FSDataOutputStream stm1 = createFile(fs, file1, 1);
       FSDataOutputStream stm2 = createFile(fs, file2, 1);
       FSDataOutputStream stm3 = createFile(localfs, file3, 1);
-      System.out.println("DeleteOnExit: Created files.");
+      LOG.info("DeleteOnExit: Created files.");
 
       // write to files and close. Purposely, do not close file2.
       writeFile(stm1);
@@ -467,7 +470,7 @@ public class TestFileCreation {
                  !fs.exists(file2));
       assertTrue(file3 + " still exists inspite of deletOnExit set.",
                  !localfs.exists(file3));
-      System.out.println("DeleteOnExit successful.");
+      LOG.info("DeleteOnExit successful.");
 
     } finally {
       IOUtils.closeStream(fs);
@@ -563,7 +566,7 @@ public class TestFileCreation {
       // verify that file exists in FS namespace
       assertTrue(file1 + " should be a file", 
                  fs.getFileStatus(file1).isFile());
-      System.out.println("Path : \"" + file1 + "\"");
+      LOG.info("Path : \"" + file1 + "\"");
 
       // kill the datanode
       cluster.shutdownDataNodes();
@@ -575,7 +578,7 @@ public class TestFileCreation {
         if (info.length == 0) {
           break;
         }
-        System.out.println("testFileCreationError1: waiting for datanode " +
+        LOG.info("testFileCreationError1: waiting for datanode " +
                            " to die.");
         try {
           Thread.sleep(1000);
@@ -597,7 +600,7 @@ public class TestFileCreation {
       // bad block allocations were cleaned up earlier.
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up",
                  locations.locatedBlockCount() == 0);
     } finally {
@@ -613,7 +616,7 @@ public class TestFileCreation {
   @Test
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
-    System.out.println("testFileCreationError2 start");
+    LOG.info("testFileCreationError2 start");
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -632,24 +635,24 @@ public class TestFileCreation {
       //
       Path file1 = new Path("/filestatus.dat");
       createFile(dfs, file1, 1);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
                          + "Created file filestatus.dat with one replicas.");
 
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add one block to the file
       LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
           client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, 
null, null);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
       locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
       int count = locations.locatedBlockCount();
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "The file now has " + count + " blocks.");
       
       // set the soft and hard limit to be 1 second so that the
@@ -665,10 +668,10 @@ public class TestFileCreation {
       // verify that the last block was synchronized.
       locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
-      System.out.println("testFileCreationError2: "
+      LOG.info("testFileCreationError2: "
           + "locations = " + locations.locatedBlockCount());
       assertEquals(0, locations.locatedBlockCount());
-      System.out.println("testFileCreationError2 successful");
+      LOG.info("testFileCreationError2 successful");
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
@@ -678,7 +681,7 @@ public class TestFileCreation {
   /** test addBlock(..) when replication<min and excludeNodes==null. */
   @Test
   public void testFileCreationError3() throws IOException {
-    System.out.println("testFileCreationError3 start");
+    LOG.info("testFileCreationError3 start");
     Configuration conf = new HdfsConfiguration();
     // create cluster
     MiniDFSCluster cluster = new 
MiniDFSCluster.Builder(conf).numDataNodes(0).build();
@@ -699,7 +702,7 @@ public class TestFileCreation {
         FileSystem.LOG.info("GOOD!", ioe);
       }
 
-      System.out.println("testFileCreationError3 successful");
+      LOG.info("testFileCreationError3 successful");
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
@@ -732,7 +735,7 @@ public class TestFileCreation {
       // create a new file.
       Path file1 = new Path("/filestatus.dat");
       HdfsDataOutputStream stm = create(fs, file1, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file1);
       assertEquals(file1 + " should be replicated to 1 datanode.", 1,
           stm.getCurrentBlockReplication());
@@ -746,7 +749,7 @@ public class TestFileCreation {
       // rename file wile keeping it open.
       Path fileRenamed = new Path("/filestatusRenamed.dat");
       fs.rename(file1, fileRenamed);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file1 + " to " +
                          fileRenamed);
       file1 = fileRenamed;
@@ -755,7 +758,7 @@ public class TestFileCreation {
       //
       Path file2 = new Path("/filestatus2.dat");
       FSDataOutputStream stm2 = createFile(fs, file2, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file2);
 
       // create yet another new file with full path name. 
@@ -763,21 +766,21 @@ public class TestFileCreation {
       //
       Path file3 = new Path("/user/home/fullpath.dat");
       FSDataOutputStream stm3 = createFile(fs, file3, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file3);
       Path file4 = new Path("/user/home/fullpath4.dat");
       FSDataOutputStream stm4 = createFile(fs, file4, 1);
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Created file " + file4);
 
       fs.mkdirs(new Path("/bin"));
       fs.rename(new Path("/user/home"), new Path("/bin"));
       Path file3new = new Path("/bin/home/fullpath.dat");
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file3 + " to " +
                          file3new);
       Path file4new = new Path("/bin/home/fullpath4.dat");
-      System.out.println("testFileCreationNamenodeRestart: "
+      LOG.info("testFileCreationNamenodeRestart: "
                          + "Renamed file " + file4 + " to " +
                          file4new);
 
@@ -837,14 +840,14 @@ public class TestFileCreation {
       DFSClient client = fs.dfs;
       LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
                  locations.locatedBlockCount() == 3);
 
       // verify filestatus2.dat
       locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
-      System.out.println("locations = " + locations.locatedBlockCount());
+      LOG.info("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file2,
                  locations.locatedBlockCount() == 1);
     } finally {
@@ -859,7 +862,7 @@ public class TestFileCreation {
   @Test
   public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
-    System.out.println("Testing adbornal client death.");
+    LOG.info("Testing adbornal client death.");
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
@@ -873,7 +876,7 @@ public class TestFileCreation {
       //
       Path file1 = new Path("/clienttest.dat");
       FSDataOutputStream stm = createFile(fs, file1, 1);
-      System.out.println("Created file clienttest.dat");
+      LOG.info("Created file clienttest.dat");
 
       // write to file
       writeFile(stm);
@@ -889,7 +892,7 @@ public class TestFileCreation {
       cluster.shutdown();
     }
   }
-  
+
   /**
    * Test file creation using createNonRecursive().
    */
@@ -970,7 +973,7 @@ public class TestFileCreation {
   static IOException createNonRecursive(FileSystem fs, Path name,
       int repl, EnumSet<CreateFlag> flag) throws IOException {
     try {
-      System.out.println("createNonRecursive: Attempting to create " + name +
+      LOG.info("createNonRecursive: Attempting to create " + name +
           " with " + repl + " replica.");
       int bufferSize = fs.getConf()
           .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
@@ -1004,9 +1007,9 @@ public class TestFileCreation {
 
     try {
       FileSystem fs = cluster.getFileSystem();
-      
+
       Path[] p = {new Path("/foo"), new Path("/bar")};
-      
+
       //write 2 files at the same time
       FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
       int i = 0;
@@ -1038,9 +1041,9 @@ public class TestFileCreation {
 
     try {
       FileSystem fs = cluster.getFileSystem();
-      
+
       Path[] p = {new Path("/foo"), new Path("/bar")};
-      
+
       //write 2 files at the same time
       FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
       int i = 0;
@@ -1068,7 +1071,7 @@ public class TestFileCreation {
    */
   @Test
   public void testLeaseExpireHardLimit() throws Exception {
-    System.out.println("testLeaseExpireHardLimit start");
+    LOG.info("testLeaseExpireHardLimit start");
     final long leasePeriod = 1000;
     final int DATANODE_NUM = 3;
 
@@ -1113,20 +1116,20 @@ public class TestFileCreation {
           successcount++;
         }
       }
-      System.out.println("successcount=" + successcount);
-      assertTrue(successcount > 0); 
+      LOG.info("successcount=" + successcount);
+      assertTrue(successcount > 0);
     } finally {
       IOUtils.closeStream(dfs);
       cluster.shutdown();
     }
 
-    System.out.println("testLeaseExpireHardLimit successful");
+    LOG.info("testLeaseExpireHardLimit successful");
   }
 
   // test closing file system before all file handles are closed.
   @Test
   public void testFsClose() throws Exception {
-    System.out.println("test file system close start");
+    LOG.info("test file system close start");
     final int DATANODE_NUM = 3;
 
     Configuration conf = new HdfsConfiguration();
@@ -1147,7 +1150,7 @@ public class TestFileCreation {
       // close file system without closing file
       dfs.close();
     } finally {
-      System.out.println("testFsClose successful");
+      LOG.info("testFsClose successful");
       cluster.shutdown();
     }
   }
@@ -1155,7 +1158,7 @@ public class TestFileCreation {
   // test closing file after cluster is shutdown
   @Test
   public void testFsCloseAfterClusterShutdown() throws IOException {
-    System.out.println("test testFsCloseAfterClusterShutdown start");
+    LOG.info("test testFsCloseAfterClusterShutdown start");
     final int DATANODE_NUM = 3;
 
     Configuration conf = new HdfsConfiguration();
@@ -1186,13 +1189,13 @@ public class TestFileCreation {
       boolean hasException = false;
       try {
         out.close();
-        System.out.println("testFsCloseAfterClusterShutdown: Error here");
+        LOG.info("testFsCloseAfterClusterShutdown: Error here");
       } catch (IOException e) {
         hasException = true;
       }
       assertTrue("Failed to close file after cluster shutdown", hasException);
     } finally {
-      System.out.println("testFsCloseAfterClusterShutdown successful");
+      LOG.info("testFsCloseAfterClusterShutdown successful");
       if (cluster != null) {
         cluster.shutdown();
       }
@@ -1211,7 +1214,7 @@ public class TestFileCreation {
   public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
     doCreateTest(CreationMethod.DIRECT_NN_RPC);
   }
-  
+
   /**
    * Another regression test for HDFS-3626. This one creates files using
    * a Path instantiated from a string object.
@@ -1231,7 +1234,7 @@ public class TestFileCreation {
       throws Exception {
     doCreateTest(CreationMethod.PATH_FROM_URI);
   }
-  
+
   private enum CreationMethod {
     DIRECT_NN_RPC,
     PATH_FROM_URI,
@@ -1246,7 +1249,7 @@ public class TestFileCreation {
       NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
 
       for (String pathStr : NON_CANONICAL_PATHS) {
-        System.out.println("Creating " + pathStr + " by " + method);
+        LOG.info("Creating " + pathStr + " by " + method);
         switch (method) {
         case DIRECT_NN_RPC:
           try {
@@ -1261,7 +1264,7 @@ public class TestFileCreation {
             // So, we expect all of them to fail. 
           }
           break;
-          
+
         case PATH_FROM_URI:
         case PATH_FROM_STRING:
           // Unlike the above direct-to-NN case, we expect these to succeed,
@@ -1279,7 +1282,7 @@ public class TestFileCreation {
           throw new AssertionError("bad method: " + method);
         }
       }
-      
+
       cluster.restartNameNode();
 
     } finally {
@@ -1336,7 +1339,7 @@ public class TestFileCreation {
       dfs.mkdirs(new Path("/foo/dir"));
       String file = "/foo/dir/file";
       Path filePath = new Path(file);
-      
+
       // Case 1: Create file with overwrite, check the blocks of old file
       // are cleaned after creating with overwrite
       NameNode nn = cluster.getNameNode();
@@ -1350,7 +1353,7 @@ public class TestFileCreation {
       } finally {
         out.close();
       }
-      
+
       LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
           nn, file, 0, fileSize);
       assertBlocks(bm, oldBlocks, true);
@@ -1363,7 +1366,7 @@ public class TestFileCreation {
         out.close();
       }
       dfs.deleteOnExit(filePath);
-      
+
       LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
           nn, file, 0, fileSize);
       assertBlocks(bm, newBlocks, true);
@@ -1377,7 +1380,7 @@ public class TestFileCreation {
         in.close();
       }
       Assert.assertArrayEquals(newData, result);
-      
+
       // Case 2: Restart NN, check the file
       cluster.restartNameNode();
       nn = cluster.getNameNode();
@@ -1388,13 +1391,13 @@ public class TestFileCreation {
         in.close();
       }
       Assert.assertArrayEquals(newData, result);
-      
+
       // Case 3: Save new checkpoint and restart NN, check the file
       NameNodeAdapter.enterSafeMode(nn, false);
       NameNodeAdapter.saveNamespace(nn);
       cluster.restartNameNode();
       nn = cluster.getNameNode();
-      
+
       in = dfs.open(filePath);
       try {
         result = readAll(in);
@@ -1411,8 +1414,8 @@ public class TestFileCreation {
       }
     }
   }
-  
-  private void assertBlocks(BlockManager bm, LocatedBlocks lbs, 
+
+  private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
       boolean exist) {
     for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
       if (exist) {
@@ -1424,7 +1427,7 @@ public class TestFileCreation {
       }
     }
   }
-  
+
   private byte[] readAll(FSDataInputStream in) throws IOException {
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     byte[] buffer = new byte[1024];
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
index 826299b..40ad502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
@@ -195,7 +195,8 @@ public class BlockReaderTestUtil {
     return new BlockReaderFactory(fs.getClient().getConf()).
       setInetSocketAddress(targetAddr).
       setBlock(block).
-      setFileName(targetAddr.toString()+ ":" + block.getBlockId()).
+        setFileName(NetUtils.getSocketAddressString(targetAddr) + ":" + block
+            .getBlockId()).
       setBlockToken(testBlock.getBlockToken()).
       setStartOffset(offset).
       setLength(lenToRead).
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
index c4760a0..a135251 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java
@@ -921,14 +921,14 @@ public class TestQuorumJournalManager {
     GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
         "3");
   }
-  
+
   @Test
   public void testToString() throws Exception {
     GenericTestUtils.assertMatches(
         qjm.toString(),
-        "QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
+        "QJM to \\[localhost:\\d+, localhost:\\d+, localhost:\\d+\\]");
   }
-  
+
   @Test
   public void testSelectInputStreamsNotOnBoundary() throws Exception {
     final int txIdsPerSegment = 10; 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
index 5e850b9..a9b315e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockPoolSliceStorage.java
@@ -22,6 +22,8 @@ import org.junit.Test;
 import org.mockito.Mockito;
 
 import java.io.File;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.Random;
 import java.util.UUID;
 import org.slf4j.Logger;
@@ -56,13 +58,37 @@ public class TestBlockPoolSliceStorage {
     }
   }
 
-  private String makeRandomIpAddress() {
+  private String makeRandomIpv4Address() {
     return rand.nextInt(256) + "." +
            rand.nextInt(256) + "." +
            rand.nextInt(256) + "." +
            rand.nextInt(256);
   }
 
+  private String makeRandomIpv6Address() {
+    byte[] bytes = new byte[16];
+    rand.nextBytes(bytes);
+    InetAddress adr = null;
+    try {
+      adr = InetAddress.getByAddress("unused", bytes);
+    } catch (UnknownHostException uhe) {
+      // Should never happen
+      LOG.error("UnknownHostException " + uhe);
+      assertThat(true, is(false));
+    }
+    String addrString = adr.getHostAddress().replaceAll(":", ".");
+
+    return "[" + addrString + "]";
+  }
+
+  private String makeRandomIpAddress() {
+    if (rand.nextBoolean()) {
+      return makeRandomIpv4Address();
+    } else {
+      return makeRandomIpv6Address();
+    }
+  }
+
   private String makeRandomBlockpoolId() {
     return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
            "-" + makeRandomIpAddress() +
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
index 594b07b..4b9a0a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.AnyOf.anyOf;
 import static org.hamcrest.core.IsNot.not;
 
+import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 import org.apache.hadoop.fs.FileUtil;
@@ -30,6 +32,8 @@ import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 
+import java.net.InetAddress;
+import java.net.Inet6Address;
 import java.io.File;
 import java.io.IOException;
 
@@ -55,6 +59,7 @@ public class TestNameNodeRespectsBindHostKeys {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestNameNodeRespectsBindHostKeys.class);
   private static final String WILDCARD_ADDRESS = "0.0.0.0";
+  private static final String IPV6_WILDCARD_ADDRESS = "0:0:0:0:0:0:0:0";
   private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
   private static String keystoresDir;
   private static String sslConfDir;
@@ -79,9 +84,9 @@ public class TestNameNodeRespectsBindHostKeys {
   public void testRpcBindHostKey() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
-    
+
     LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
-    
+
     // NN should not bind the wildcard address by default.
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
@@ -97,7 +102,7 @@ public class TestNameNodeRespectsBindHostKeys {
     }
 
     LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
-    
+
     // Tell NN to bind the wildcard address.
     conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
 
@@ -106,13 +111,36 @@ public class TestNameNodeRespectsBindHostKeys {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = getRpcServerAddress(cluster);
-      assertThat("Bind address " + address + " is not wildcard.",
-                 address, is("/" + WILDCARD_ADDRESS));
+      assertThat("Bind address " + address + " is not wildcard.", address,
+          anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
       }
-    }    
+    }
+
+    InetAddress localAddr = InetAddress.getLocalHost();
+    if (localAddr instanceof Inet6Address) {
+      // Tell NN to bind the IPv6 wildcard address.
+      conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
+
+      // Verify that NN binds wildcard address now.
+      try {
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+        cluster.waitActive();
+        String address = getRpcServerAddress(cluster);
+        assertThat("Bind address " + address + " is not wildcard.",
+            address, anyOf(
+                is("/" + WILDCARD_ADDRESS),
+                is("/" + IPV6_WILDCARD_ADDRESS)));
+      } finally {
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+      }
+    } else {
+      LOG.info("Not testing IPv6 binding as IPv6 us not supported");
+    }
   }
 
   @Test (timeout=300000)
@@ -121,7 +149,7 @@ public class TestNameNodeRespectsBindHostKeys {
     MiniDFSCluster cluster = null;
 
     LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
-    
+
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
 
     // NN should not bind the wildcard address by default.
@@ -140,6 +168,27 @@ public class TestNameNodeRespectsBindHostKeys {
 
     LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
 
+    InetAddress localAddr = InetAddress.getLocalHost();
+    if (localAddr instanceof Inet6Address) {
+      // Tell NN to bind the IPv6 wildcard address.
+      conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
+
+      // Verify that NN binds wildcard address now.
+      try {
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+        cluster.waitActive();
+        String address = getRpcServerAddress(cluster);
+        assertThat("Bind address " + address + " is not wildcard.", address,
+            anyOf(is("/" + WILDCARD_ADDRESS), is("/" + 
IPV6_WILDCARD_ADDRESS)));
+      } finally {
+        if (cluster != null) {
+          cluster.shutdown();
+        }
+      }
+    } else {
+      LOG.info("Not testing IPv6 binding as IPv6 us not supported");
+    }
+
     // Tell NN to bind the wildcard address.
     conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
 
@@ -148,8 +197,8 @@ public class TestNameNodeRespectsBindHostKeys {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
       String address = getServiceRpcServerAddress(cluster);
-      assertThat("Bind address " + address + " is not wildcard.",
-                 address, is("/" + WILDCARD_ADDRESS));
+      assertThat("Bind address " + address + " is not wildcard.", address,
+          anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -211,7 +260,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpAddress());
       assertFalse("HTTP Bind address not expected to be wildcard by default.",
                   address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -231,7 +281,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpAddress());
       assertTrue("HTTP Bind address " + address + " is not wildcard.",
                  address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -285,7 +336,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpsAddress().toString();
+      String address = NetUtils.getSocketAddressString(
+          cluster.getNameNode().getHttpsAddress());
       assertFalse("HTTP Bind address not expected to be wildcard by default.",
                   address.startsWith(WILDCARD_ADDRESS));
     } finally {
@@ -305,7 +357,8 @@ public class TestNameNodeRespectsBindHostKeys {
       conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       cluster.waitActive();
-      String address = cluster.getNameNode().getHttpsAddress().toString();
+      String address = NetUtils
+          .getSocketAddressString(cluster.getNameNode().getHttpsAddress());
       assertTrue("HTTP Bind address " + address + " is not wildcard.",
                  address.startsWith(WILDCARD_ADDRESS));
     } finally {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
index ada93e8..5000ce0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
@@ -25,7 +25,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.AnyOf.anyOf;
 
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
@@ -43,14 +45,18 @@ public class TestNameNodeRpcServer {
     // The name node in MiniDFSCluster only binds to 127.0.0.1.
     // We can set the bind address to 0.0.0.0 to make it listen
     // to all interfaces.
+    // On IPv4-only machines it will return that it is listening on 0.0.0.0
+    // On dual-stack or IPv6-only machines it will return 0:0:0:0:0:0:0:0
     conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
     MiniDFSCluster cluster = null;
 
     try {
       cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
-      assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
-          .getClientRpcServer().getListenerAddress().getHostName());
+      String listenerAddress = ((NameNodeRpcServer)cluster.getNameNodeRpc())
+          .getClientRpcServer().getListenerAddress().getHostName();
+      assertThat("Bind address " + listenerAddress + " is not wildcard.",
+          listenerAddress, anyOf(is("0.0.0.0"), is("0:0:0:0:0:0:0:0")));
     } finally {
       if (cluster != null) {
         cluster.shutdown();

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to