This is an automated email from the ASF dual-hosted git repository. ayushsaxena pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new 5a8f70a72eb HDFS-17559. Fix the uuid as null in NameNodeMXBean (#6906). Contributed by Haiyang Hu. 5a8f70a72eb is described below commit 5a8f70a72eb012e4345d8693ee939eb2237f1dd7 Author: huhaiyang <huhaiyang...@126.com> AuthorDate: Sat Jul 6 15:46:25 2024 +0800 HDFS-17559. Fix the uuid as null in NameNodeMXBean (#6906). Contributed by Haiyang Hu. Signed-off-by: Ayush Saxena <ayushsax...@apache.org> --- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 9 +-- .../hdfs/server/namenode/TestNameNodeMXBean.java | 64 ++++++++++++++++++++++ 2 files changed, 69 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index edc790dbc30..2cb29dfef8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -97,6 +97,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LI import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; import java.nio.charset.StandardCharsets; +import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.text.CaseUtils; @@ -6674,7 +6675,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) .put("location", node.getNetworkLocation()) - .put("uuid", node.getDatanodeUuid()) + .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse("")) .put("lastContact", getLastContact(node)) .put("usedSpace", getDfsUsed(node)) .put("adminState", node.getAdminState().toString()) @@ -6728,7 +6729,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, .put("adminState", node.getAdminState().toString()) .put("xferaddr", node.getXferAddr()) .put("location", node.getNetworkLocation()) - .put("uuid", node.getDatanodeUuid()) + .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse("")) .build(); info.put(node.getXferAddrWithHostname(), innerinfo); } @@ -6751,7 +6752,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, .<String, Object> builder() .put("xferaddr", node.getXferAddr()) .put("location", node.getNetworkLocation()) - .put("uuid", node.getDatanodeUuid()) + .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse("")) .put("underReplicatedBlocks", node.getLeavingServiceStatus().getUnderReplicatedBlocks()) .put("decommissionOnlyReplicas", @@ -6782,7 +6783,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, .<String, Object> builder() .put("xferaddr", node.getXferAddr()) .put("location", node.getNetworkLocation()) - .put("uuid", node.getDatanodeUuid()) + .put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse("")) .put("underReplicatedBlocks", node.getLeavingServiceStatus().getUnderReplicatedBlocks()) .put("maintenanceOnlyReplicas", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 89ae01ddd25..2f8258baa5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -18,7 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; import com.fasterxml.jackson.databind.ObjectMapper; + +import java.util.Optional; import java.util.function.Supplier; + +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; @@ -1132,6 +1136,66 @@ public class TestNameNodeMXBean { } } + @SuppressWarnings({ "unchecked" }) + @Test + public void testDeadNodesInNameNodeMXBean() throws Exception { + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1); + MiniDFSCluster cluster = null; + HostsFileWriter hostsFileWriter = new HostsFileWriter(); + hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean"); + + try { + cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot()).numDataNodes(3).build(); + cluster.waitActive(); + + FSNamesystem fsn = cluster.getNameNode().namesystem; + + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=NameNode,name=NameNodeInfo"); + + List<String> hosts = new ArrayList<>(); + for(DataNode dn : cluster.getDataNodes()) { + hosts.add(dn.getDisplayName()); + } + + DatanodeDescriptor mockNode = new DatanodeDescriptor( + new DatanodeID("127.0.0.2", "127.0.0.2", "", + 5000, 5001, 5002, 5003)); + + assertEquals("", Optional.ofNullable(mockNode.getDatanodeUuid()).orElse("")); + hosts.add(mockNode.getXferAddrWithHostname()); + hostsFileWriter.initIncludeHosts(hosts.toArray( + new String[hosts.size()])); + fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); + DatanodeManager dm = cluster.getNameNode().getNamesystem(). + getBlockManager().getDatanodeManager(); + LOG.info("Get all include nodes: {}", dm.getHostConfigManager().getIncludes()); + + // get attribute DeadNodes + String deadNodeInfo = (String) (mbs.getAttribute(mxbeanName, + "DeadNodes")); + assertEquals(fsn.getDeadNodes(), deadNodeInfo); + LOG.info("Get deadNode info: {}", deadNodeInfo); + Map<String, Map<String, Object>> deadNodes = + (Map<String, Map<String, Object>>) JSON.parse(deadNodeInfo); + assertEquals(1, deadNodes.size()); + for (Map<String, Object> deadNode : deadNodes.values()) { + assertTrue(deadNode.containsKey("lastContact")); + assertTrue(deadNode.containsKey("adminState")); + assertTrue(deadNode.containsKey("xferaddr")); + assertEquals("", deadNode.get("uuid")); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + hostsFileWriter.cleanup(); + } + } + void verifyTotalBlocksMetrics(long expectedTotalReplicatedBlocks, long expectedTotalECBlockGroups, long actualTotalBlocks) throws Exception { --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org