HDFS-11517. Expose slow disks via DataNode JMX. Contributed by Hanisha Koneru
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f8e9284 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f8e9284 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f8e9284 Branch: refs/heads/HDFS-10467 Commit: 7f8e928400db954cfe37a3bd35762e1b310dcb8e Parents: 4a8e304 Author: Hanisha Koneru <[email protected]> Authored: Fri Mar 17 15:42:25 2017 -0700 Committer: Arpit Agarwal <[email protected]> Committed: Fri Mar 17 15:42:25 2017 -0700 ---------------------------------------------------------------------- .../hadoop/hdfs/server/datanode/DataNode.java | 14 +++++++++ .../hdfs/server/datanode/DataNodeMXBean.java | 7 +++++ .../datanode/metrics/DataNodeDiskMetrics.java | 13 +++++++- .../server/datanode/TestDataNodeMXBean.java | 33 +++++++++++++++++++- 4 files changed, 65 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8e9284/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 5a82850..4b7e052 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1821,6 +1821,10 @@ public class DataNode extends ReconfigurableBase public DataNodeMetrics getMetrics() { return metrics; } + + public DataNodeDiskMetrics getDiskMetrics() { + return diskMetrics; + } public DataNodePeerMetrics getPeerMetrics() { return peerMetrics; @@ -3520,4 +3524,14 @@ public class DataNode extends ReconfigurableBase return peerMetrics != null ? peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson() : null; } + + @Override // DataNodeMXBean + public String getSlowDisks() { + if (diskMetrics == null) { + //Disk Stats not enabled + return null; + } + Set<String> slowDisks = diskMetrics.getDiskOutliersStats().keySet(); + return JSON.toString(slowDisks); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8e9284/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java index fb79a86..c86fe44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java @@ -132,4 +132,11 @@ public interface DataNodeMXBean { * </p> */ String getSendPacketDownstreamAvgInfo(); + + /** + * Gets the slow disks in the Datanode. + * + * @return list of slow disks + */ + String getSlowDisks(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8e9284/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java index 85e2bd9..20a3567 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.metrics; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.hadoop.classification.InterfaceAudience; @@ -52,7 +54,8 @@ public class DataNodeDiskMetrics { private volatile boolean shouldRun; private OutlierDetector slowDiskDetector; private Daemon slowDiskDetectionDaemon; - private volatile Map<String, Map<DiskOutlierDetectionOp, Double>> diskOutliersStats; + private volatile Map<String, Map<DiskOutlierDetectionOp, Double>> + diskOutliersStats = Maps.newHashMap(); public DataNodeDiskMetrics(DataNode dn, long diskOutlierDetectionIntervalMs) { this.dn = dn; @@ -178,4 +181,12 @@ public class DataNodeDiskMetrics { LOG.error("Disk Outlier Detection daemon did not shutdown", e); } } + + /** + * Use only for testing. + */ + @VisibleForTesting + public void addSlowDiskForTesting(String slowDiskPath) { + diskOutliersStats.put(slowDiskPath, ImmutableMap.of()); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f8e9284/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java index 3474290..b6277d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.test.GenericTestUtils; @@ -104,8 +105,12 @@ public class TestDataNodeMXBean { String bpActorInfo = (String)mbs.getAttribute(mxbeanName, "BPServiceActorInfo"); Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo); + String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks"); + Assert.assertEquals(datanode.getSlowDisks(), slowDisks); } finally { - if (cluster != null) {cluster.shutdown();} + if (cluster != null) { + cluster.shutdown(); + } } } @@ -209,4 +214,30 @@ public class TestDataNodeMXBean { } return totalBlocks; } + + @Test + public void testDataNodeMXBeanSlowDisksEnabled() throws Exception { + Configuration conf = new Configuration(); + conf.setDouble(DFSConfigKeys + .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY, 1.0); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + + try { + List<DataNode> datanodes = cluster.getDataNodes(); + Assert.assertEquals(datanodes.size(), 1); + DataNode datanode = datanodes.get(0); + String slowDiskPath = "test/data1/slowVolume"; + datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath); + + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=DataNode,name=DataNodeInfo"); + + String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks"); + Assert.assertEquals(datanode.getSlowDisks(), slowDisks); + Assert.assertTrue(slowDisks.contains(slowDiskPath)); + } finally { + if (cluster != null) {cluster.shutdown();} + } + } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
