Author: todd
Date: Tue Jul 3 20:45:21 2012
New Revision: 1356928
URL: http://svn.apache.org/viewvc?rev=1356928&view=rev
Log:
HDFS-3343. Improve metrics for DN read latency. Contributed by Andrew Wang.
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1356928&r1=1356927&r2=1356928&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Jul 3
20:45:21 2012
@@ -266,6 +266,8 @@ Branch-2 ( Unreleased changes )
HDFS-3475. Make the replication monitor multipliers configurable.
(harsh via eli)
+ HDFS-3343. Improve metrics for DN read latency (Andrew Wang via todd)
+
OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1356928&r1=1356927&r2=1356928&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
Tue Jul 3 20:45:21 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.da
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
+import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.ChecksumExce
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.ReadaheadPool;
@@ -142,6 +144,7 @@ class BlockSender implements java.io.Clo
/** Format used to print client trace log messages */
private final String clientTraceFmt;
private volatile ChunkChecksum lastChunkChecksum = null;
+ private DataNode datanode;
/** The file descriptor of the block being sent */
private FileDescriptor blockInFd;
@@ -184,6 +187,7 @@ class BlockSender implements java.io.Clo
this.clientTraceFmt = clientTraceFmt;
this.readaheadLength = datanode.getDnConf().readaheadLength;
this.shouldDropCacheBehindRead =
datanode.getDnConf().dropCacheBehindReads;
+ this.datanode = datanode;
final Replica replica;
final long replicaVisibleLength;
@@ -478,9 +482,11 @@ class BlockSender implements java.io.Clo
SocketOutputStream sockOut = (SocketOutputStream)out;
sockOut.write(buf, 0, dataOff); // First write checksum
- // no need to flush. since we know out is not a buffered stream.
- sockOut.transferToFully(((FileInputStream)blockIn).getChannel(),
- blockInPosition, dataLen);
+ // no need to flush since we know out is not a buffered stream
+ FileChannel fileCh = ((FileInputStream)blockIn).getChannel();
+ sockOut.transferToFully(fileCh, blockInPosition, dataLen,
+ datanode.metrics.getSendDataPacketBlockedOnNetworkNanos(),
+ datanode.metrics.getSendDataPacketTransferNanos());
blockInPosition += dataLen;
} else {
// normal transfer
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=1356928&r1=1356927&r2=1356928&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
Tue Jul 3 20:45:21 2012
@@ -75,6 +75,9 @@ public class DataNodeMetrics {
@Metric MutableRate blockReports;
@Metric MutableRate fsync;
+
+ @Metric MutableRate sendDataPacketBlockedOnNetworkNanos;
+ @Metric MutableRate sendDataPacketTransferNanos;
final MetricsRegistry registry = new MetricsRegistry("datanode");
final String name;
@@ -183,4 +186,12 @@ public class DataNodeMetrics {
public void incrBlocksGetLocalPathInfo() {
blocksGetLocalPathInfo.incr();
}
+
+ public MutableRate getSendDataPacketBlockedOnNetworkNanos() {
+ return sendDataPacketBlockedOnNetworkNanos;
+ }
+
+ public MutableRate getSendDataPacketTransferNanos() {
+ return sendDataPacketTransferNanos;
+ }
}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java?rev=1356928&r1=1356927&r2=1356928&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
Tue Jul 3 20:45:21 2012
@@ -17,21 +17,28 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.*;
+
import java.util.List;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import static org.apache.hadoop.test.MetricsAsserts.*;
+import org.junit.Test;
-import junit.framework.TestCase;
-
-public class TestDataNodeMetrics extends TestCase {
+public class TestDataNodeMetrics {
+
+ MiniDFSCluster cluster = null;
+ FileSystem fs = null;
+ @Test
public void testDataNodeMetrics() throws Exception {
Configuration conf = new HdfsConfiguration();
SimulatedFSDataset.setFactory(conf);
@@ -50,4 +57,29 @@ public class TestDataNodeMetrics extends
if (cluster != null) {cluster.shutdown();}
}
}
+
+ @Test
+ public void testSendDataPacket() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ // Create and read a 1 byte file
+ Path tmpfile = new Path("/tmp.txt");
+ DFSTestUtil.createFile(fs, tmpfile,
+ (long)1, (short)1, 1L);
+ DFSTestUtil.readFile(fs, tmpfile);
+ List<DataNode> datanodes = cluster.getDataNodes();
+ assertEquals(datanodes.size(), 1);
+ DataNode datanode = datanodes.get(0);
+ MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+
+ // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
+ // signaling the end of the block
+ assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
+ assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
+ } finally {
+ if (cluster != null) {cluster.shutdown();}
+ }
+ }
}