Author: szetszwo
Date: Thu May 14 18:08:53 2009
New Revision: 774859
URL: http://svn.apache.org/viewvc?rev=774859&view=rev
Log:
HADOOP-5824. Deprecate DataTransferProtocol.OP_READ_METADATA and remove the
corresponding unused codes. Contributed by Kan Zhang
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=774859&r1=774858&r2=774859&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu May 14 18:08:53 2009
@@ -350,6 +350,9 @@
UTF8 related javac warnings. These warnings are removed in
FSEditLog.java as a use case. (Raghu Angadi)
+ HADOOP-5824. Deprecate DataTransferProtocol.OP_READ_METADATA and remove
+ the corresponding unused codes. (Kan Zhang via szetszwo)
+
OPTIMIZATIONS
HADOOP-5595. NameNode does not need to run a replicator to choose a
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java?rev=774859&r1=774858&r2=774859&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
Thu May 14 18:08:53 2009
@@ -40,7 +40,10 @@
// Processed at datanode stream-handler
public static final byte OP_WRITE_BLOCK = (byte) 80;
public static final byte OP_READ_BLOCK = (byte) 81;
- public static final byte OP_READ_METADATA = (byte) 82;
+ /**
+ * @deprecated As of version 15, OP_READ_METADATA is no longer supported
+ */
+ @Deprecated public static final byte OP_READ_METADATA = (byte) 82;
public static final byte OP_REPLACE_BLOCK = (byte) 83;
public static final byte OP_COPY_BLOCK = (byte) 84;
public static final byte OP_BLOCK_CHECKSUM = (byte) 85;
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java?rev=774859&r1=774858&r2=774859&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
Thu May 14 18:08:53 2009
@@ -109,10 +109,6 @@
else
datanode.myMetrics.writesFromRemoteClient.inc();
break;
- case DataTransferProtocol.OP_READ_METADATA:
- readMetadata( in );
- datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
- break;
case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose;
send to a destination
replaceBlock(in);
datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
@@ -417,44 +413,6 @@
}
/**
- * Reads the metadata and sends the data in one 'DATA_CHUNK'.
- * @param in
- */
- void readMetadata(DataInputStream in) throws IOException {
- Block block = new Block( in.readLong(), 0 , in.readLong());
- MetaDataInputStream checksumIn = null;
- DataOutputStream out = null;
-
- try {
-
- checksumIn = datanode.data.getMetaDataInputStream(block);
-
- long fileSize = checksumIn.getLength();
-
- if (fileSize >= 1L<<31 || fileSize <= 0) {
- throw new IOException("Unexpected size for checksumFile of block" +
- block);
- }
-
- byte [] buf = new byte[(int)fileSize];
- IOUtils.readFully(checksumIn, buf, 0, buf.length);
-
- out = new DataOutputStream(
- NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
-
- out.writeByte(DataTransferProtocol.OP_STATUS_SUCCESS);
- out.writeInt(buf.length);
- out.write(buf);
-
- //last DATA_CHUNK
- out.writeInt(0);
- } finally {
- IOUtils.closeStream(out);
- IOUtils.closeStream(checksumIn);
- }
- }
-
- /**
* Get block checksum (MD5 of CRC32).
* @param in
*/
Modified:
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java?rev=774859&r1=774858&r2=774859&view=diff
==============================================================================
---
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
(original)
+++
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
Thu May 14 18:08:53 2009
@@ -78,8 +78,6 @@
new MetricsTimeVaryingRate("readBlockOp", registry);
public MetricsTimeVaryingRate writeBlockOp =
new MetricsTimeVaryingRate("writeBlockOp", registry);
- public MetricsTimeVaryingRate readMetadataOp =
- new MetricsTimeVaryingRate("readMetadataOp", registry);
public MetricsTimeVaryingRate blockChecksumOp =
new MetricsTimeVaryingRate("blockChecksumOp", registry);
public MetricsTimeVaryingRate copyBlockOp =
@@ -128,7 +126,6 @@
public void resetAllMinMax() {
readBlockOp.resetMinMax();
writeBlockOp.resetMinMax();
- readMetadataOp.resetMinMax();
blockChecksumOp.resetMinMax();
copyBlockOp.resetMinMax();
replaceBlockOp.resetMinMax();