[01/50] [abbrv] hadoop git commit: Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao."

2016-12-12 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5085 8c4680852 -> b0aace21b


Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. 
Contributed by Xiaoyu Yao."

This reverts commit aeecfa24f4fb6af289920cbf8830c394e66bd78e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcedb72a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcedb72a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcedb72a

Branch: refs/heads/YARN-5085
Commit: dcedb72af468128458e597f08d22f5c34b744ae5
Parents: 15dd1f3
Author: Xiaoyu Yao 
Authored: Mon Dec 5 12:08:48 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Dec 5 12:44:20 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 ---
 .../hdfs/server/datanode/BlockSender.java   | 105 +++
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 -
 .../hdfs/server/datanode/DataStorage.java   |   5 -
 .../hdfs/server/datanode/LocalReplica.java  | 179 ++-
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 +--
 .../fsdataset/ReplicaOutputStreams.java | 107 +--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 238 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcedb72a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index f372072..39419c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,7 +24,10 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -50,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
+  private OutputStream out = null; // to block file at local disk
+  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -244,8 +250,7 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
-  datanodeSlowLogThresholdMs);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -255,6 +260,13 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
+  this.out = streams.getDataOut();
+  if (out instanceof FileOutputStream) {
+this.outFd = ((FileOutputStream)out).getFD();
+  } else {
+LOG.warn("Could not get file descriptor for 

[05/50] [abbrv] hadoop git commit: Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao."

2016-12-09 Thread xgong
Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. 
Contributed by Xiaoyu Yao."

This reverts commit aeecfa24f4fb6af289920cbf8830c394e66bd78e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcedb72a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcedb72a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcedb72a

Branch: refs/heads/YARN-5734
Commit: dcedb72af468128458e597f08d22f5c34b744ae5
Parents: 15dd1f3
Author: Xiaoyu Yao 
Authored: Mon Dec 5 12:08:48 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Dec 5 12:44:20 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 ---
 .../hdfs/server/datanode/BlockSender.java   | 105 +++
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 -
 .../hdfs/server/datanode/DataStorage.java   |   5 -
 .../hdfs/server/datanode/LocalReplica.java  | 179 ++-
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 +--
 .../fsdataset/ReplicaOutputStreams.java | 107 +--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 238 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcedb72a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index f372072..39419c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,7 +24,10 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -50,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
+  private OutputStream out = null; // to block file at local disk
+  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -244,8 +250,7 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
-  datanodeSlowLogThresholdMs);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -255,6 +260,13 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
+  this.out = streams.getDataOut();
+  if (out instanceof FileOutputStream) {
+this.outFd = ((FileOutputStream)out).getFD();
+  } else {
+LOG.warn("Could not get file descriptor for outputstream of class " +
+out.getClass());
+  }
   

[12/50] [abbrv] hadoop git commit: Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao."

2016-12-08 Thread stevel
Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. 
Contributed by Xiaoyu Yao."

This reverts commit aeecfa24f4fb6af289920cbf8830c394e66bd78e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcedb72a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcedb72a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcedb72a

Branch: refs/heads/HADOOP-13345
Commit: dcedb72af468128458e597f08d22f5c34b744ae5
Parents: 15dd1f3
Author: Xiaoyu Yao 
Authored: Mon Dec 5 12:08:48 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Dec 5 12:44:20 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 ---
 .../hdfs/server/datanode/BlockSender.java   | 105 +++
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 -
 .../hdfs/server/datanode/DataStorage.java   |   5 -
 .../hdfs/server/datanode/LocalReplica.java  | 179 ++-
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 +--
 .../fsdataset/ReplicaOutputStreams.java | 107 +--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 238 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcedb72a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index f372072..39419c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,7 +24,10 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -50,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
+  private OutputStream out = null; // to block file at local disk
+  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -244,8 +250,7 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
-  datanodeSlowLogThresholdMs);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -255,6 +260,13 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
+  this.out = streams.getDataOut();
+  if (out instanceof FileOutputStream) {
+this.outFd = ((FileOutputStream)out).getFD();
+  } else {
+LOG.warn("Could not get file descriptor for outputstream of class " +
+out.getClass());
+  }
   

hadoop git commit: Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao."

2016-12-05 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 15dd1f338 -> dcedb72af


Revert "HADOOP-10930. Refactor: Wrap Datanode IO related operations. 
Contributed by Xiaoyu Yao."

This reverts commit aeecfa24f4fb6af289920cbf8830c394e66bd78e.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcedb72a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcedb72a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcedb72a

Branch: refs/heads/trunk
Commit: dcedb72af468128458e597f08d22f5c34b744ae5
Parents: 15dd1f3
Author: Xiaoyu Yao 
Authored: Mon Dec 5 12:08:48 2016 -0800
Committer: Xiaoyu Yao 
Committed: Mon Dec 5 12:44:20 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 ---
 .../hdfs/server/datanode/BlockSender.java   | 105 +++
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 -
 .../hdfs/server/datanode/DataStorage.java   |   5 -
 .../hdfs/server/datanode/LocalReplica.java  | 179 ++-
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 +--
 .../fsdataset/ReplicaOutputStreams.java | 107 +--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 238 insertions(+), 445 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcedb72a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index f372072..39419c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,7 +24,10 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -50,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -84,6 +88,8 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
+  private OutputStream out = null; // to block file at local disk
+  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -244,8 +250,7 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
-  datanodeSlowLogThresholdMs);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -255,6 +260,13 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
+  this.out = streams.getDataOut();
+  if (out instanceof FileOutputStream) {
+this.outFd = ((FileOutputStream)out).getFD();
+  } else {
+LOG.warn("Could not get file descriptor for