HDFS-9482. Replace DatanodeInfo constructors with a builder pattern. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed0bebab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed0bebab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed0bebab

Branch: refs/heads/YARN-4752
Commit: ed0bebabaaf27cd730f7f8eb002d92c9c7db327d
Parents: 62d8c17
Author: Brahma Reddy Battula <bra...@apache.org>
Authored: Tue Nov 8 18:17:07 2016 -0800
Committer: Arpit Agarwal <a...@apache.org>
Committed: Tue Nov 8 18:17:07 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/DFSStripedOutputStream.java     |   4 +-
 .../hadoop/hdfs/protocol/DatanodeInfo.java      | 217 +++++++++++++++----
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  22 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |  42 ++--
 .../NamenodeProtocolServerSideTranslatorPB.java |   6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   4 +-
 .../server/datanode/ReportBadBlockAction.java   |   4 +-
 .../erasurecode/StripedBlockWriter.java         |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java      |   7 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  37 ++--
 .../hadoop/hdfs/TestDFSClientSocketSize.java    |   6 +-
 .../apache/hadoop/hdfs/TestFileCorruption.java  |   7 +-
 .../client/impl/TestBlockReaderFactory.java     |   6 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java    |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   6 +-
 .../impl/TestInterDatanodeProtocol.java         |   4 +-
 .../shortcircuit/TestShortCircuitCache.java     |  11 +-
 17 files changed, 277 insertions(+), 114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index d5d0dfb..52fc5eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -766,7 +767,8 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
         newNodes[i] = nodes[0];
         newStorageIDs[i] = storageIDs[0];
       } else {
-        newNodes[i] = new DatanodeInfo(DatanodeID.EMPTY_DATANODE_ID);
+        newNodes[i] = new DatanodeInfoBuilder()
+            .setNodeID(DatanodeID.EMPTY_DATANODE_ID).build();
         newStorageIDs[i] = "";
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index e9ee8b9..8f9f3d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -86,7 +86,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   protected AdminStates adminState;
   private long maintenanceExpireTimeInMS;
 
-  public DatanodeInfo(DatanodeInfo from) {
+  protected DatanodeInfo(DatanodeInfo from) {
     super(from);
     this.capacity = from.getCapacity();
     this.dfsUsed = from.getDfsUsed();
@@ -103,7 +103,7 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     this.upgradeDomain = from.getUpgradeDomain();
   }
 
-  public DatanodeInfo(DatanodeID nodeID) {
+  protected DatanodeInfo(DatanodeID nodeID) {
     super(nodeID);
     this.capacity = 0L;
     this.dfsUsed = 0L;
@@ -118,57 +118,13 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
     this.adminState = null;
   }
 
-  public DatanodeInfo(DatanodeID nodeID, String location) {
+  protected DatanodeInfo(DatanodeID nodeID, String location) {
     this(nodeID);
     this.location = location;
   }
 
-  public DatanodeInfo(DatanodeID nodeID, String location,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
-      final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final AdminStates adminState,
-      final String upgradeDomain) {
-    this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
-        nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
-        nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
-        cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
-        xceiverCount, location, adminState, upgradeDomain);
-  }
-
-  /** Constructor */
-  public DatanodeInfo(final String ipAddr, final String hostName,
-      final String datanodeUuid, final int xferPort, final int infoPort,
-      final int infoSecurePort, final int ipcPort,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
-      final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final String networkLocation,
-      final AdminStates adminState) {
-    this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
-        ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity,
-        cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
-        networkLocation, adminState, null);
-  }
-
-  /** Constructor */
-  public DatanodeInfo(final String ipAddr, final String hostName,
-      final String datanodeUuid, final int xferPort, final int infoPort,
-      final int infoSecurePort, final int ipcPort,
-      final long capacity, final long dfsUsed, final long remaining,
-      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
-      final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final String networkLocation,
-      final AdminStates adminState,
-      final String upgradeDomain) {
-    this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
-        ipcPort, capacity, dfsUsed, 0L, remaining, blockPoolUsed,
-        cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
-        xceiverCount, networkLocation, adminState, upgradeDomain);
-  }
-
   /** Constructor. */
-  public DatanodeInfo(final String ipAddr, final String hostName,
+  private DatanodeInfo(final String ipAddr, final String hostName,
       final String datanodeUuid, final int xferPort, final int infoPort,
       final int infoSecurePort, final int ipcPort, final long capacity,
       final long dfsUsed, final long nonDfsUsed, final long remaining,
@@ -662,4 +618,169 @@ public class DatanodeInfo extends DatanodeID implements 
Node {
   public void setSoftwareVersion(String softwareVersion) {
     this.softwareVersion = softwareVersion;
   }
+
+  /**
+   * Building the DataNodeInfo.
+   */
+  public static class DatanodeInfoBuilder {
+    private String location = NetworkTopology.DEFAULT_RACK;
+    private long capacity;
+    private long dfsUsed;
+    private long remaining;
+    private long blockPoolUsed;
+    private long cacheCapacity;
+    private long cacheUsed;
+    private long lastUpdate;
+    private long lastUpdateMonotonic;
+    private int xceiverCount;
+    private DatanodeInfo.AdminStates adminState;
+    private String upgradeDomain;
+    private String ipAddr;
+    private String hostName;
+    private String datanodeUuid;
+    private int xferPort;
+    private int infoPort;
+    private int infoSecurePort;
+    private int ipcPort;
+    private long nonDfsUsed = 0L;
+
+    public DatanodeInfoBuilder setFrom(DatanodeInfo from) {
+      this.capacity = from.getCapacity();
+      this.dfsUsed = from.getDfsUsed();
+      this.nonDfsUsed = from.getNonDfsUsed();
+      this.remaining = from.getRemaining();
+      this.blockPoolUsed = from.getBlockPoolUsed();
+      this.cacheCapacity = from.getCacheCapacity();
+      this.cacheUsed = from.getCacheUsed();
+      this.lastUpdate = from.getLastUpdate();
+      this.lastUpdateMonotonic = from.getLastUpdateMonotonic();
+      this.xceiverCount = from.getXceiverCount();
+      this.location = from.getNetworkLocation();
+      this.adminState = from.getAdminState();
+      this.upgradeDomain = from.getUpgradeDomain();
+      setNodeID(from);
+      return this;
+    }
+
+    public DatanodeInfoBuilder setNodeID(DatanodeID nodeID) {
+      this.ipAddr = nodeID.getIpAddr();
+      this.hostName = nodeID.getHostName();
+      this.datanodeUuid = nodeID.getDatanodeUuid();
+      this.xferPort = nodeID.getXferPort();
+      this.infoPort = nodeID.getInfoPort();
+      this.infoSecurePort = nodeID.getInfoSecurePort();
+      this.ipcPort = nodeID.getIpcPort();
+      return this;
+    }
+
+    public DatanodeInfoBuilder setCapacity(long capacity) {
+      this.capacity = capacity;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setDfsUsed(long dfsUsed) {
+      this.dfsUsed = dfsUsed;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setRemaining(long remaining) {
+      this.remaining = remaining;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setBlockPoolUsed(long blockPoolUsed) {
+      this.blockPoolUsed = blockPoolUsed;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setCacheCapacity(long cacheCapacity) {
+      this.cacheCapacity = cacheCapacity;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setCacheUsed(long cacheUsed) {
+      this.cacheUsed = cacheUsed;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setLastUpdate(long lastUpdate) {
+      this.lastUpdate = lastUpdate;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setLastUpdateMonotonic(
+        long lastUpdateMonotonic) {
+      this.lastUpdateMonotonic = lastUpdateMonotonic;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setXceiverCount(int xceiverCount) {
+      this.xceiverCount = xceiverCount;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setAdminState(
+        DatanodeInfo.AdminStates adminState) {
+      this.adminState = adminState;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setUpgradeDomain(String upgradeDomain) {
+      this.upgradeDomain = upgradeDomain;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setIpAddr(String ipAddr) {
+      this.ipAddr = ipAddr;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setHostName(String hostName) {
+      this.hostName = hostName;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setDatanodeUuid(String datanodeUuid) {
+      this.datanodeUuid = datanodeUuid;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setXferPort(int xferPort) {
+      this.xferPort = xferPort;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setInfoPort(int infoPort) {
+      this.infoPort = infoPort;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setInfoSecurePort(int infoSecurePort) {
+      this.infoSecurePort = infoSecurePort;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setIpcPort(int ipcPort) {
+      this.ipcPort = ipcPort;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setNetworkLocation(String networkLocation) {
+      this.location = networkLocation;
+      return this;
+    }
+
+    public DatanodeInfoBuilder setNonDfsUsed(long nonDfsUsed) {
+      this.nonDfsUsed = nonDfsUsed;
+      return this;
+    }
+
+    public DatanodeInfo build() {
+      return new DatanodeInfo(ipAddr, hostName, datanodeUuid, xferPort,
+          infoPort, infoSecurePort, ipcPort, capacity, dfsUsed, nonDfsUsed,
+          remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate,
+          lastUpdateMonotonic, xceiverCount, location, adminState,
+          upgradeDomain);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 246b5a5..2ba7bad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -585,13 +586,18 @@ public class PBHelperClient {
     if (di == null) {
       return null;
     }
-    DatanodeInfo dinfo = new DatanodeInfo(convert(di.getId()),
-        di.hasLocation() ? di.getLocation() : null, di.getCapacity(),
-        di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(),
-        di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(),
-        di.getLastUpdateMonotonic(), di.getXceiverCount(),
-        convert(di.getAdminState()),
-        di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
+    DatanodeInfoBuilder dinfo =
+        new DatanodeInfoBuilder().setNodeID(convert(di.getId()))
+            .setNetworkLocation(di.hasLocation() ? di.getLocation() : null)
+            .setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed())
+            .setRemaining(di.getRemaining())
+            .setBlockPoolUsed(di.getBlockPoolUsed())
+            .setCacheCapacity(di.getCacheCapacity())
+            .setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate())
+            .setLastUpdateMonotonic(di.getLastUpdateMonotonic())
+            .setXceiverCount(di.getXceiverCount())
+            .setAdminState(convert(di.getAdminState())).setUpgradeDomain(
+            di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
     if (di.hasNonDfsUsed()) {
       dinfo.setNonDfsUsed(di.getNonDfsUsed());
     } else {
@@ -599,7 +605,7 @@ public class PBHelperClient {
       long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
       dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
     }
-    return dinfo;
+    return dinfo.build();
   }
 
   public static StorageType[] convertStorageTypes(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 97cb042..a75f4f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
@@ -271,27 +272,26 @@ class JsonUtilClient {
     }
 
     // TODO: Fix storageID
-    return new DatanodeInfo(
-        ipAddr,
-        (String)m.get("hostName"),
-        (String)m.get("storageID"),
-        xferPort,
-        ((Number) m.get("infoPort")).intValue(),
-        getInt(m, "infoSecurePort", 0),
-        ((Number) m.get("ipcPort")).intValue(),
-
-        getLong(m, "capacity", 0l),
-        getLong(m, "dfsUsed", 0l),
-        getLong(m, "remaining", 0l),
-        getLong(m, "blockPoolUsed", 0l),
-        getLong(m, "cacheCapacity", 0l),
-        getLong(m, "cacheUsed", 0l),
-        getLong(m, "lastUpdate", 0l),
-        getLong(m, "lastUpdateMonotonic", 0l),
-        getInt(m, "xceiverCount", 0),
-        getString(m, "networkLocation", ""),
-        DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")),
-        getString(m, "upgradeDomain", ""));
+    return new DatanodeInfoBuilder().setIpAddr(ipAddr)
+        .setHostName((String) m.get("hostName"))
+        .setDatanodeUuid((String) m.get("storageID")).setXferPort(xferPort)
+        .setInfoPort(((Number) m.get("infoPort")).intValue())
+        .setInfoSecurePort(getInt(m, "infoSecurePort", 0))
+        .setIpcPort(((Number) m.get("ipcPort")).intValue())
+        .setCapacity(getLong(m, "capacity", 0L))
+        .setDfsUsed(getLong(m, "dfsUsed", 0L))
+        .setRemaining(getLong(m, "remaining", 0L))
+        .setBlockPoolUsed(getLong(m, "blockPoolUsed", 0L))
+        .setCacheCapacity(getLong(m, "cacheCapacity", 0L))
+        .setCacheUsed(getLong(m, "cacheUsed", 0L))
+        .setLastUpdate(getLong(m, "lastUpdate", 0L))
+        .setLastUpdateMonotonic(getLong(m, "lastUpdateMonotonic", 0L))
+        .setXceiverCount(getInt(m, "xceiverCount", 0))
+        .setNetworkLocation(getString(m, "networkLocation", "")).setAdminState(
+            DatanodeInfo.AdminStates
+                .valueOf(getString(m, "adminState", "NORMAL")))
+        .setUpgradeDomain(getString(m, "upgradeDomain", ""))
+        .build();
   }
 
   /** Convert an Object[] to a DatanodeInfo[]. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index db7a8d2..c7b3e74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.protocolPB;
 import java.io.IOException;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
@@ -78,8 +79,9 @@ public class NamenodeProtocolServerSideTranslatorPB implements
   @Override
   public GetBlocksResponseProto getBlocks(RpcController unused,
       GetBlocksRequestProto request) throws ServiceException {
-    DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request
-        .getDatanode()));
+    DatanodeInfo dnInfo = new DatanodeInfoBuilder()
+        .setNodeID(PBHelperClient.convert(request.getDatanode()))
+        .build();
     BlocksWithLocations blocks;
     try {
       blocks = impl.getBlocks(dnInfo, request.getSize());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index de9e48e..78a2044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -2384,7 +2385,8 @@ public class DataNode extends ReconfigurableBase
         in = new DataInputStream(unbufIn);
         blockSender = new BlockSender(b, 0, b.getNumBytes(), 
             false, false, true, DataNode.this, null, cachingStrategy);
-        DatanodeInfo srcNode = new DatanodeInfo(bpReg);
+        DatanodeInfo srcNode = new DatanodeInfoBuilder().setNodeID(bpReg)
+            .build();
 
         new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
             clientname, targets, targetStorageTypes, srcNode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index be46707..2946358 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
@@ -52,7 +53,8 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
     if (bpRegistration == null) {
       return;
     }
-    DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
+    DatanodeInfo[] dnArr = {new DatanodeInfoBuilder()
+        .setNodeID(bpRegistration).build()};
     String[] uuids = { storageUuid };
     StorageType[] types = { storageType };
     LocatedBlock[] locatedBlock = { new LocatedBlock(block,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
index 11551e7..592be45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockWriter.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSPacket;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
@@ -132,7 +133,8 @@ class StripedBlockWriter {
           DFSUtilClient.getSmallBufferSize(conf)));
       in = new DataInputStream(unbufIn);
 
-      DatanodeInfo source = new DatanodeInfo(datanode.getDatanodeId());
+      DatanodeInfo source = new DatanodeInfoBuilder()
+          .setNodeID(datanode.getDatanodeId()).build();
       new Sender(out).writeBlock(block, storageType,
           blockToken, "", new DatanodeInfo[]{target},
           new StorageType[]{storageType}, source,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d6ae0fa..7e3fabe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -181,6 +181,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -4040,7 +4041,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
       DatanodeInfo[] arr = new DatanodeInfo[results.size()];
       for (int i=0; i<arr.length; i++) {
-        arr[i] = new DatanodeInfo(results.get(i));
+        arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i))
+            .build();
       }
       return arr;
     } finally {
@@ -4061,7 +4063,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
       DatanodeStorageReport[] reports = new 
DatanodeStorageReport[datanodes.size()];
       for (int i = 0; i < reports.length; i++) {
         final DatanodeDescriptor d = datanodes.get(i);
-        reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
+        reports[i] = new DatanodeStorageReport(
+            new DatanodeInfoBuilder().setFrom(d).build(),
             d.getStorageReports());
       }
       return reports;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7f26b03..945d2c8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -110,6 +110,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -1066,34 +1067,42 @@ public class DFSTestUtil {
   }
 
   public static DatanodeInfo getLocalDatanodeInfo() {
-    return new DatanodeInfo(getLocalDatanodeID());
+    return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID())
+        .build();
   }
 
   public static DatanodeInfo getDatanodeInfo(String ipAddr) {
-    return new DatanodeInfo(getDatanodeID(ipAddr));
+    return new DatanodeInfoBuilder().setNodeID(getDatanodeID(ipAddr))
+        .build();
   }
   
   public static DatanodeInfo getLocalDatanodeInfo(int port) {
-    return new DatanodeInfo(getLocalDatanodeID(port));
+    return new DatanodeInfoBuilder().setNodeID(getLocalDatanodeID(port))
+        .build();
   }
 
   public static DatanodeInfo getDatanodeInfo(String ipAddr, 
       String host, int port) {
-    return new DatanodeInfo(new DatanodeID(ipAddr, host,
-        UUID.randomUUID().toString(), port,
-        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT));
+    return new DatanodeInfoBuilder().setNodeID(
+        new DatanodeID(ipAddr, host, UUID.randomUUID().toString(), port,
+            DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+            DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
+            DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT)).build();
   }
 
   public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
       String hostname, AdminStates adminState) {
-    return new DatanodeInfo(ipAddr, hostname, "",
-        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
-        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
-        1l, 2l, 3l, 4l, 0l, 0l, 0l, 5, 6, "local", adminState);
+    return new DatanodeInfoBuilder().setIpAddr(ipAddr).setHostName(hostname)
+        .setDatanodeUuid("")
+        .setXferPort(DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT)
+        .setInfoPort(DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT)
+        .setInfoSecurePort(DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT)
+        
.setIpcPort(DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT).setCapacity(1L)
+        .setDfsUsed(2L).setRemaining(3L).setBlockPoolUsed(4L)
+        .setCacheCapacity(0L).setCacheUsed(0L).setLastUpdate(0L)
+        .setLastUpdateMonotonic(5).setXceiverCount(6)
+        .setNetworkLocation("local").setAdminState(adminState)
+        .build();
   }
 
   public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
index a2a7afc..fa12f34 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 
@@ -91,7 +91,9 @@ public class TestDFSClientSocketSize {
       cluster.waitActive();
       LOG.info("MiniDFSCluster started.");
       try (Socket socket = DataStreamer.createSocketForPipeline(
-          new DatanodeInfo(cluster.dataNodes.get(0).datanode.getDatanodeId()),
+          new DatanodeInfoBuilder()
+              .setNodeID(cluster.dataNodes.get(0).datanode.getDatanodeId())
+              .build(),
           1, cluster.getFileSystem().getClient())) {
         return socket.getSendBufferSize();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 5477700..1f62414 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Supplier;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -43,7 +44,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -161,8 +161,9 @@ public class TestFileCorruption {
       FSNamesystem ns = cluster.getNamesystem();
       ns.writeLock();
       try {
-        cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(
-            blk, new DatanodeInfo(dnR), "TEST", "STORAGE_ID");
+        
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
+            new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
+            "STORAGE_ID");
       } finally {
         ns.writeUnlock();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
index ca498c7..2d04549 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo;
 import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor;
@@ -333,8 +334,9 @@ public class TestBlockReaderFactory {
     Assert.assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache();
-    final DatanodeInfo datanode =
-        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
+    final DatanodeInfo datanode = new DatanodeInfoBuilder()
+        .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
+        .build();
     cache.getDfsClientShmManager().visit(new Visitor() {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index 4072071..5cb2571 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -735,7 +736,8 @@ public class TestPBHelper {
   @Test
   public void testDataNodeInfoPBHelper() {
     DatanodeID id = DFSTestUtil.getLocalDatanodeID();
-    DatanodeInfo dnInfos0 = new DatanodeInfo(id);
+    DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id)
+        .build();
     dnInfos0.setCapacity(3500L);
     dnInfos0.setDfsUsed(1000L);
     dnInfos0.setNonDfsUsed(2000L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index c09303f..f011b9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -510,8 +511,9 @@ public class TestBlockRecovery {
   private Collection<RecoveringBlock> initRecoveringBlocks() throws 
IOException {
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
     DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
-    DatanodeInfo[] locs = new DatanodeInfo[] {
-        new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
+    DatanodeInfo[] locs = new DatanodeInfo[] {new DatanodeInfoBuilder()
+        .setNodeID(dn.getDNRegistrationForBP(
+            block.getBlockPoolId())).build(),
         mockOtherDN };
     RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
     blocks.add(rBlock);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index 4f6db24..86e9f90 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -400,7 +401,8 @@ public class TestInterDatanodeProtocol {
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
-    DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
+    DatanodeInfo dInfo = new DatanodeInfoBuilder().setNodeID(fakeDnId)
+        .build();
     InterDatanodeProtocol proxy = null;
 
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed0bebab/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 8e217c2..06c6cf6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.DomainPeer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
@@ -430,8 +431,9 @@ public class TestShortCircuitCache {
     DomainPeer peer = getDomainPeerToDn(conf);
     MutableBoolean usedPeer = new MutableBoolean(false);
     ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
-    final DatanodeInfo datanode =
-        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
+    final DatanodeInfo datanode = new DatanodeInfoBuilder()
+        .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
+        .build();
     // Allocating the first shm slot requires using up a peer.
     Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
                     blockId, "testAllocShm_client");
@@ -571,8 +573,9 @@ public class TestShortCircuitCache {
     Assert.assertTrue(Arrays.equals(contents, expected));
     // Loading this file brought the ShortCircuitReplica into our local
     // replica cache.
-    final DatanodeInfo datanode =
-        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
+    final DatanodeInfo datanode = new DatanodeInfoBuilder()
+        .setNodeID(cluster.getDataNodes().get(0).getDatanodeId())
+        .build();
     cache.getDfsClientShmManager().visit(new Visitor() {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to