HDFS-9004. Add upgrade domain to DatanodeInfo. Contributed by Ming Ma (via Lei (Eddy) Xu).
Change-Id: I887c66578eebd61acc34b94f18da6e6851c609f4 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a9c7076 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a9c7076 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a9c7076 Branch: refs/heads/HDFS-7966 Commit: 3a9c7076e81c1cc47c0ecf30c60abd9a65d8a501 Parents: c39ddc3 Author: Lei Xu <l...@apache.org> Authored: Sat Sep 19 18:08:09 2015 -0700 Committer: Lei Xu <l...@apache.org> Committed: Sat Sep 19 18:08:09 2015 -0700 ---------------------------------------------------------------------- .../hadoop/hdfs/protocol/DatanodeInfo.java | 41 ++++++++++++++++++-- .../hadoop/hdfs/protocolPB/PBHelperClient.java | 3 ++ .../apache/hadoop/hdfs/web/JsonUtilClient.java | 3 +- .../src/main/proto/hdfs.proto | 1 + .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 3 +- .../hdfs/server/namenode/FSNamesystem.java | 3 ++ .../org/apache/hadoop/hdfs/web/JsonUtil.java | 3 ++ .../server/namenode/TestNameNodeMXBean.java | 20 ++++++++++ 8 files changed, 72 insertions(+), 5 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 2ef40d2..c895a1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -53,7 +53,7 @@ public class DatanodeInfo extends DatanodeID implements Node { private String location = NetworkTopology.DEFAULT_RACK; private String softwareVersion; private List<String> dependentHostNames = new LinkedList<String>(); - + private String upgradeDomain; // Datanode administrative states public enum AdminStates { @@ -95,6 +95,7 @@ public class DatanodeInfo extends DatanodeID implements Node { this.xceiverCount = from.getXceiverCount(); this.location = from.getNetworkLocation(); this.adminState = from.getAdminState(); + this.upgradeDomain = from.getUpgradeDomain(); } public DatanodeInfo(DatanodeID nodeID) { @@ -120,12 +121,13 @@ public class DatanodeInfo extends DatanodeID implements Node { final long capacity, final long dfsUsed, final long remaining, final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, final long lastUpdate, final long lastUpdateMonotonic, - final int xceiverCount, final AdminStates adminState) { + final int xceiverCount, final AdminStates adminState, + final String upgradeDomain) { this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(), nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic, - xceiverCount, location, adminState); + xceiverCount, location, adminState, upgradeDomain); } /** Constructor */ @@ -137,6 +139,22 @@ public class DatanodeInfo extends DatanodeID implements Node { final long lastUpdate, final long lastUpdateMonotonic, final int xceiverCount, final String networkLocation, final AdminStates adminState) { + this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort, + ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity, + cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount, + networkLocation, adminState, null); + } + + /** Constructor */ + public DatanodeInfo(final String ipAddr, final String hostName, + final String datanodeUuid, final int xferPort, final int infoPort, + final int infoSecurePort, final int ipcPort, + final long capacity, final long dfsUsed, final long remaining, + final long blockPoolUsed, final long cacheCapacity, final long cacheUsed, + final long lastUpdate, final long lastUpdateMonotonic, + final int xceiverCount, final String networkLocation, + final AdminStates adminState, + final String upgradeDomain) { super(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort, ipcPort); this.capacity = capacity; @@ -150,6 +168,7 @@ public class DatanodeInfo extends DatanodeID implements Node { this.xceiverCount = xceiverCount; this.location = networkLocation; this.adminState = adminState; + this.upgradeDomain = upgradeDomain; } /** Network location name */ @@ -300,6 +319,16 @@ public class DatanodeInfo extends DatanodeID implements Node { this.location = NodeBase.normalize(location); } + /** Sets the upgrade domain */ + public void setUpgradeDomain(String upgradeDomain) { + this.upgradeDomain = upgradeDomain; + } + + /** upgrade domain */ + public String getUpgradeDomain() { + return upgradeDomain; + } + /** Add a hostname to a list of network dependencies */ public void addDependentHostName(String hostname) { dependentHostNames.add(hostname); @@ -341,6 +370,9 @@ public class DatanodeInfo extends DatanodeID implements Node { if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append("Rack: "+location+"\n"); } + if (upgradeDomain != null) { + buffer.append("Upgrade domain: "+ upgradeDomain +"\n"); + } buffer.append("Decommission Status : "); if (isDecommissioned()) { buffer.append("Decommissioned\n"); @@ -380,6 +412,9 @@ public class DatanodeInfo extends DatanodeID implements Node { if (!NetworkTopology.DEFAULT_RACK.equals(location)) { buffer.append(" "+location); } + if (upgradeDomain != null) { + buffer.append(" " + upgradeDomain); + } if (isDecommissioned()) { buffer.append(" DD"); } else if (isDecommissionInProgress()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 1e561cc..98de2e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -140,6 +140,9 @@ public class PBHelperClient { if (info.getNetworkLocation() != null) { builder.setLocation(info.getNetworkLocation()); } + if (info.getUpgradeDomain() != null) { + builder.setUpgradeDomain(info.getUpgradeDomain()); + } builder .setId(convert((DatanodeID) info)) .setCapacity(info.getCapacity()) http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 713836c..3f85814 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -241,7 +241,8 @@ class JsonUtilClient { getLong(m, "lastUpdateMonotonic", 0l), getInt(m, "xceiverCount", 0), getString(m, "networkLocation", ""), - DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL"))); + DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")), + getString(m, "upgradeDomain", "")); } /** Convert an Object[] to a DatanodeInfo[]. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 86fb462..ee77dc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -98,6 +98,7 @@ message DatanodeInfoProto { optional uint64 cacheCapacity = 11 [default = 0]; optional uint64 cacheUsed = 12 [default = 0]; optional uint64 lastUpdateMonotonic = 13 [default = 0]; + optional string upgradeDomain = 14; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 5b60307..cf55445 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -594,7 +594,8 @@ public class PBHelper { di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(), di.getLastUpdate(), di.getLastUpdateMonotonic(), - di.getXceiverCount(), PBHelper.convert(di.getAdminState())); + di.getXceiverCount(), PBHelper.convert(di.getAdminState()), + di.hasUpgradeDomain() ? di.getUpgradeDomain() : null); } static public DatanodeInfo[] convert(DatanodeInfoProto di[]) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 328c29d..75b6be9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5922,6 +5922,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, .put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal()); } + if (node.getUpgradeDomain() != null) { + innerinfo.put("upgradeDomain", node.getUpgradeDomain()); + } info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build()); } return JSON.toString(info); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 0de89cc..4b0e63e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -169,6 +169,9 @@ public class JsonUtil { m.put("xceiverCount", datanodeinfo.getXceiverCount()); m.put("networkLocation", datanodeinfo.getNetworkLocation()); m.put("adminState", datanodeinfo.getAdminState().name()); + if (datanodeinfo.getUpgradeDomain() != null) { + m.put("upgradeDomain", datanodeinfo.getUpgradeDomain()); + } return m; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a9c7076/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index f8aa317..f10d57e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -26,6 +26,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.io.nativeio.NativeIO; @@ -76,6 +78,15 @@ public class TestNameNodeMXBean { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); + // Set upgrade domain on the first DN. + String upgradeDomain = "abcd"; + DatanodeManager dm = cluster.getNameNode().getNamesystem(). + getBlockManager().getDatanodeManager(); + DatanodeDescriptor dd = dm.getDatanode( + cluster.getDataNodes().get(0).getDatanodeId()); + dd.setUpgradeDomain(upgradeDomain); + String dnXferAddrWithUpgradeDomainSet = dd.getXferAddr(); + FSNamesystem fsn = cluster.getNameNode().namesystem; MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); @@ -125,6 +136,15 @@ public class TestNameNodeMXBean { assertTrue(((Long)liveNode.get("capacity")) > 0); assertTrue(liveNode.containsKey("numBlocks")); assertTrue(((Long)liveNode.get("numBlocks")) == 0); + // a. By default the upgrade domain isn't defined on any DN. + // b. If the upgrade domain is set on a DN, JMX should have the same + // value. + String xferAddr = (String)liveNode.get("xferaddr"); + if (!xferAddr.equals(dnXferAddrWithUpgradeDomainSet)) { + assertTrue(!liveNode.containsKey("upgradeDomain")); + } else { + assertTrue(liveNode.get("upgradeDomain").equals(upgradeDomain)); + } } assertEquals(fsn.getLiveNodes(), alivenodeinfo); // get attribute deadnodeinfo