hadoop git commit: HDFS-13468. Add erasure coding metrics into ReadStatistics. (Contributed by Lei (Eddy) Xu)

2018-04-25 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 626690612 -> 96a57c967


HDFS-13468. Add erasure coding metrics into ReadStatistics. (Contributed by Lei 
(Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96a57c96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96a57c96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96a57c96

Branch: refs/heads/trunk
Commit: 96a57c967d6cca32df0534c15da8a573cd155814
Parents: 6266906
Author: Lei Xu <l...@apache.org>
Authored: Wed Apr 25 13:59:03 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Wed Apr 25 14:02:42 2018 -0700

--
 .../hadoop/hdfs/DFSStripedInputStream.java  |  2 +
 .../org/apache/hadoop/hdfs/ReadStatistics.java  | 29 ++
 .../org/apache/hadoop/hdfs/StripeReader.java|  8 +++
 .../hdfs/client/impl/TestBlockReaderLocal.java  | 60 
 4 files changed, 99 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a57c96/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index e7d90ed..339a02c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -92,6 +93,7 @@ public class DFSStripedInputStream extends DFSInputStream {
   LocatedBlocks locatedBlocks) throws IOException {
 super(dfsClient, src, verifyChecksum, locatedBlocks);
 
+this.readStatistics.setBlockType(BlockType.STRIPED);
 assert ecPolicy != null;
 this.ecPolicy = ecPolicy;
 this.cellSize = ecPolicy.getCellSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a57c96/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReadStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReadStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReadStatistics.java
index 59b1418..af53f0a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReadStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ReadStatistics.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.hdfs.protocol.BlockType;
+
 /**
  * A utility class that maintains statistics for reading.
  */
@@ -26,6 +28,9 @@ public class ReadStatistics {
   private long totalShortCircuitBytesRead;
   private long totalZeroCopyBytesRead;
 
+  private BlockType blockType = BlockType.CONTIGUOUS;
+  private long totalEcDecodingTimeMillis;
+
   public ReadStatistics() {
 clear();
   }
@@ -75,6 +80,21 @@ public class ReadStatistics {
 return totalBytesRead - totalLocalBytesRead;
   }
 
+  /**
+   * @return block type of the input stream. If block type != CONTIGUOUS,
+   * it is reading erasure coded data.
+   */
+  public synchronized BlockType getBlockType() {
+return blockType;
+  }
+
+  /**
+   * Return the total time in milliseconds used for erasure coding decoding.
+   */
+  public synchronized long getTotalEcDecodingTimeMillis() {
+return totalEcDecodingTimeMillis;
+  }
+
   public synchronized void addRemoteBytes(long amt) {
 this.totalBytesRead += amt;
   }
@@ -97,10 +117,19 @@ public class ReadStatistics {
 this.totalZeroCopyBytesRead += amt;
   }
 
+  public synchronized void addErasureCodingDecodingTime(long millis) {
+this.totalEcDecodingTimeMillis += millis;
+  }
+
+  synchronized void setBlockType(BlockType blockType) {
+this.blockType = blockType;
+  }
+
   public synchronized void clear() {
 this.totalBytesRead = 0;
 this.totalLocalBytesRead = 0;
 this.totalShortCircuitBytesRead = 0;
 this.totalZeroCopyBytesRead = 0;
+this.totalEcDecodingTimeMillis = 0;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96a57c96/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apa

svn commit: r1830116 - in /hadoop/common/site/main/publish/docs: r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html

2018-04-25 Thread lei
Author: lei
Date: Wed Apr 25 20:27:25 2018
New Revision: 1830116

URL: http://svn.apache.org/viewvc?rev=1830116=rev
Log:
Fix merge conflicts in ViewFs.html

Modified:

hadoop/common/site/main/publish/docs/r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html

hadoop/common/site/main/publish/docs/r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html

Modified: 
hadoop/common/site/main/publish/docs/r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/docs/r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html?rev=1830116=1830115=1830116=diff
==
--- 
hadoop/common/site/main/publish/docs/r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html
 (original)
+++ 
hadoop/common/site/main/publish/docs/r3.0.1/hadoop-project-dist/hadoop-hdfs/ViewFs.html
 Wed Apr 25 20:27:25 2018
@@ -1,18 +1,18 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
 
 http://www.w3.org/1999/xhtml;>
   
-Apache Hadoop 3.0.1  ViewFs Guide
+Apache Hadoop 3.0.2  ViewFs Guide
 
   @import url("./css/maven-base.css");
   @import url("./css/maven-theme.css");
   @import url("./css/site.css");
 
 
-
+
 
 
   
@@ -36,7 +36,7 @@
 
   Apache Hadoop Project Dist POM
 
-  Apache Hadoop 3.0.1
+  Apache Hadoop 3.0.2
 
 ViewFs Guide
 
@@ -46,8 +46,8 @@
 |
 http://hadoop.apache.org/; 
class="externalLink">Apache Hadoop
   
-   | Last Published: 2018-03-16
-  | Version: 3.0.1
+   | Last Published: 2018-04-25
+  | Version: 3.0.2
 
   
 
@@ -570,7 +570,6 @@
 /property
 
 
- HEAD The authority following the 
viewfs:// scheme in the URI is the mount table name. It is recommanded 
that the mount table of a cluster should be named by the cluster name. Then 
Hadoop system will look for a mount table with the name 
clusterX in the Hadoop configuration files. Operations arrange 
all gateways and service machines to contain the mount tables for ALL clusters 
such that, for each cluster, the default file system is set to the ViewFs mount 
table for that cluster as described above.
 The authority following the viewfs:// scheme in the URI is the 
mount table name. It is recommended that the mount table of a cluster should be 
named by the cluster name. Then Hadoop system will look for a mount table with 
the name clusterX in the Hadoop configuration files. Operations 
arrange all gateways and service machines to contain the mount tables for ALL 
clusters such that, for each cluster, the default file system is set to the 
ViewFs mount table for that cluster as described above.
 The mount points of a mount table are specified in the standard Hadoop 
configuration files. All the mount table config entries for viewfs are 
prefixed by fs.viewfs.mounttable.. The mount points that are linking 
other filesystems are specified using link tags. The recommendation is 
to have mount points name same as in the linked filesystem target locations. 
For all namespaces that are not configured in the mount table, we can have them 
fallback to a default filesystem via linkFallback.
 In the below mount table configuration, namespace /data is linked 
to the filesystem hdfs://nn1-clusterx.example.com:8020/data, 
/project is linked to the filesystem 
hdfs://nn2-clusterx.example.com:8020/project. All namespaces that are 
not configured in the mount table, like /logs are linked to the 
filesystem hdfs://nn5-clusterx.example.com:8020/home.
@@ -612,29 +611,7 @@
   /property
 /configuration
 
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-4304fcd5bdf HDFS-12990. Change default NameNode RPC port back to 
8020. Contributed by Xiao Chen.
-
-
-
-
-
-
-
+
 
 Pathname Usage Patterns
 Hence on Cluster X, where the core-site.xml is set to make the 
default fs to use the mount table of that cluster, the typical pathnames are

Modified: 
hadoop/common/site/main/publish/docs/r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/docs/r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html?rev=1830116=1830115=1830116=diff
==
--- 
hadoop/common/site/main/publish/docs/r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html
 (original)
+++ 
hadoop/common/site/main/publish/docs/r3.0.2/hadoop-project-dist/hadoop-hdfs/ViewFs.html
 Wed Apr 25 20:27:25 2018
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
 
 http://www.w3.org/1999/xhtml;>
@@ -12,7 +12,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
   
@@ -46,7 +46,7 @@

svn commit: r1829956 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/r3.0.2/ publish/docs/r3.0.2/api/ publish/docs/r3.0.2/api/org/ publish/docs/r3.0.2/api/

2018-04-23 Thread lei
Author: lei
Date: Tue Apr 24 03:56:08 2018
New Revision: 1829956

URL: http://svn.apache.org/viewvc?rev=1829956=rev
Log:
Updated site for release 3.0.2


[This commit notification would consist of 4451 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

2018-04-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk f411de6a7 -> 42e82f028


Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

(cherry picked from commit f6ecb76d0b919b9836600fe28ec9e637b223cd54)

Conflicts:
hadoop-project-dist/pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42e82f02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42e82f02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42e82f02

Branch: refs/heads/trunk
Commit: 42e82f02812c38f2965bd5fccbf71bed6ff89992
Parents: f411de6
Author: Lei Xu <edd...@gmail.com>
Authored: Mon Apr 23 14:07:43 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Apr 23 14:14:05 2018 -0700

--
 .../markdown/release/3.0.2/CHANGES.3.0.2.md |  31 ++
 .../release/3.0.2/RELEASENOTES.3.0.2.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.2.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 387 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e82f02/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
new file mode 100644
index 000..96953ee
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.2 - 2018-04-13
+
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | Apache 
Hadoop release 3.0.2 to fix deploying shaded jars in artifacts. |  Major | . | 
Lei (Eddy) Xu | Lei (Eddy) Xu |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e82f02/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
new file mode 100644
index 000..5132bc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop  3.0.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | *Major* 
| **Apache Hadoop release 3.0.2 to fix deploying shaded jars in artifacts.**
+
+Release Apache Hadoop 3.0.2 on the same code base as Hadoop 3.0.1, but deploy 
with shaded jars.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e82f02/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
new file mode 100644
index 000..b60de84
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42e82f02/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/p

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

2018-04-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 956ab12ed -> 463fcfb50


Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

(cherry picked from commit f6ecb76d0b919b9836600fe28ec9e637b223cd54)

Conflicts:
hadoop-project-dist/pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/463fcfb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/463fcfb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/463fcfb5

Branch: refs/heads/branch-3.0
Commit: 463fcfb50ade14112f71a4c547c609691c3a7d2d
Parents: 956ab12
Author: Lei Xu <edd...@gmail.com>
Authored: Mon Apr 23 14:07:43 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Apr 23 14:17:29 2018 -0700

--
 .../markdown/release/3.0.2/CHANGES.3.0.2.md |  31 ++
 .../release/3.0.2/RELEASENOTES.3.0.2.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.2.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 387 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/463fcfb5/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
new file mode 100644
index 000..96953ee
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.2 - 2018-04-13
+
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | Apache 
Hadoop release 3.0.2 to fix deploying shaded jars in artifacts. |  Major | . | 
Lei (Eddy) Xu | Lei (Eddy) Xu |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463fcfb5/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
new file mode 100644
index 000..5132bc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop  3.0.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | *Major* 
| **Apache Hadoop release 3.0.2 to fix deploying shaded jars in artifacts.**
+
+Release Apache Hadoop 3.0.2 on the same code base as Hadoop 3.0.1, but deploy 
with shaded jars.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463fcfb5/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
new file mode 100644
index 000..b60de84
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463fcfb5/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project

hadoop git commit: Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.

2018-04-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.2 5c141f7c0 -> f6ecb76d0


Update CHANGES, RELEASENOTES, and jdiff for 3.0.2 release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6ecb76d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6ecb76d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6ecb76d

Branch: refs/heads/branch-3.0.2
Commit: f6ecb76d0b919b9836600fe28ec9e637b223cd54
Parents: 5c141f7
Author: Lei Xu <edd...@gmail.com>
Authored: Mon Apr 23 14:07:43 2018 -0700
Committer: Lei Xu <edd...@gmail.com>
Committed: Mon Apr 23 14:07:43 2018 -0700

--
 .../markdown/release/3.0.2/CHANGES.3.0.2.md |  31 ++
 .../release/3.0.2/RELEASENOTES.3.0.2.md |  31 ++
 .../jdiff/Apache_Hadoop_HDFS_3.0.2.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 4 files changed, 387 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ecb76d/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
new file mode 100644
index 000..96953ee
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/CHANGES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.2 - 2018-04-13
+
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | Apache 
Hadoop release 3.0.2 to fix deploying shaded jars in artifacts. |  Major | . | 
Lei (Eddy) Xu | Lei (Eddy) Xu |
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ecb76d/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
new file mode 100644
index 000..5132bc0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.2/RELEASENOTES.3.0.2.md
@@ -0,0 +1,31 @@
+
+
+# Apache Hadoop  3.0.2 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HADOOP-15368](https://issues.apache.org/jira/browse/HADOOP-15368) | *Major* 
| **Apache Hadoop release 3.0.2 to fix deploying shaded jars in artifacts.**
+
+Release Apache Hadoop 3.0.2 on the same code base as Hadoop 3.0.1, but deploy 
with shaded jars.
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ecb76d/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
new file mode 100644
index 000..b60de84
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.2.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6ecb76d/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 14a013b..7693b85 100644
--- a/hadoop-project-dist/pom.xml
+++ b

[hadoop] Git Push Summary

2018-04-23 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/rel/release-3.0.2 [created] 612f94c65

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-04-16 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.2-RC1 [created] a8897f84b

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Preparing for 3.0.3 development

2018-04-12 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 43d2ee9de -> 3717df89e


Preparing for 3.0.3 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3717df89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3717df89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3717df89

Branch: refs/heads/branch-3.0
Commit: 3717df89ee149a5c8f391ba252b4409ae265e257
Parents: 43d2ee9
Author: Lei Xu <l...@apache.org>
Authored: Thu Apr 12 13:57:46 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Apr 12 13:57:46 2018 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoo

[hadoop] Git Push Summary

2018-04-06 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.2-RC0 [created] 3d134ba68

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-05 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f2d89d7bf -> 9fe337096


HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).

(cherry picked from commit 1cbf23df145af01692b8aaa438642b64e330cd05)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fe33709
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fe33709
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fe33709

Branch: refs/heads/branch-3.0
Commit: 9fe337096dbf5887456c04657ab80a85d4943383
Parents: f2d89d7
Author: Lei Xu <l...@apache.org>
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Apr 5 09:54:15 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 8463023..7fcd698 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -237,6 +237,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 9b8f74c..8761d31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -450,7 +450,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1667,7 +1668,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fe33709/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement

hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-05 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk e52539b46 -> d737bf99d


HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d737bf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d737bf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d737bf99

Branch: refs/heads/trunk
Commit: d737bf99d44ce34cd01baad716d23df269267c95
Parents: e52539b
Author: Lei Xu <l...@apache.org>
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Apr 5 09:59:10 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/m

hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-05 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 fa464c12a -> 2543a9dd7


HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).

(cherry picked from commit 1cbf23df145af01692b8aaa438642b64e330cd05)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2543a9dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2543a9dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2543a9dd

Branch: refs/heads/branch-3.1
Commit: 2543a9dd77163aca714e95ed4a0ae030492154bb
Parents: fa464c1
Author: Lei Xu <l...@apache.org>
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Apr 5 10:01:05 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2543a9dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2543a9dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2543a9dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement

hadoop git commit: Preparing for 3.0.2 release

2018-04-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.2 [created] 5c141f7c0


Preparing for 3.0.2 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c141f7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c141f7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c141f7c

Branch: refs/heads/branch-3.0.2
Commit: 5c141f7c0f24c12cb8704a6ccc1ff8ec991f41ee
Parents: 55f778a
Author: Lei Xu <l...@apache.org>
Authored: Wed Apr 4 15:20:29 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Wed Apr 4 15:20:29 2018 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop

svn commit: r1827741 - /hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml

2018-03-26 Thread lei
Author: lei
Date: Mon Mar 26 06:18:04 2018
New Revision: 1827741

URL: http://svn.apache.org/viewvc?rev=1827741=rev
Log:
fix link of 3.0.1 on release page

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1827741=1827740=1827741=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Mon Mar 26 06:18:04 2018
@@ -30,7 +30,7 @@
  SHA-256

   
- 3.0.1
+ 3.0.1
  25 March, 2018
  http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.0.1/hadoop-3.0.1-src.tar.gz;>source
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.1/hadoop-3.0.1-src.tar.gz.asc;>signature



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



svn commit: r1827740 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/ publish/docs/r3.0.1/ publish/docs/r3.0.1/api/ publish/docs/r3.0.1/api/org/ publish/do

2018-03-26 Thread lei
Author: lei
Date: Mon Mar 26 05:54:10 2018
New Revision: 1827740

URL: http://svn.apache.org/viewvc?rev=1827740=rev
Log:
Update site for release 3.0.1


[This commit notification would consist of 4450 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: Update releasenotes and changelogs for 3.0.1 release

2018-03-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1.0 b72856287 -> 704d43603


Update releasenotes and changelogs for 3.0.1 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6a10e2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6a10e2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6a10e2b

Branch: refs/heads/branch-3.1.0
Commit: b6a10e2b2b7a09953f7649da95bbc3bed3782ab4
Parents: b728562
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:57:45 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6a10e2b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  Major | er

[2/2] hadoop git commit: Update 3.0.1 jdiff file and jdiff stable api version

2018-03-23 Thread lei
Update 3.0.1 jdiff file and jdiff stable api version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/704d4360
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/704d4360
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/704d4360

Branch: refs/heads/branch-3.1.0
Commit: 704d43603b9b32ddd73ca7dfd8257277cbda1453
Parents: b6a10e2
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:48:36 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:58:14 2018 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_3.0.1.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 2 files changed, 325 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/704d4360/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
new file mode 100644
index 000..91c8a6b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/704d4360/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index fa78dbe..a75ed5e 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
 false
   
   
-3.0.0
+3.0.1
 -unstable
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: Update releasenotes and changelogs for 3.0.1 release

2018-03-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 d17aa8d05 -> 4859cd7cc


Update releasenotes and changelogs for 3.0.1 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98d7a5aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98d7a5aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98d7a5aa

Branch: refs/heads/branch-3.1
Commit: 98d7a5aaef2bbef46e0e7b6c876490f9235c59f5
Parents: d17aa8d
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:53:16 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98d7a5aa/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  Major | erasure-c

[2/2] hadoop git commit: Update 3.0.1 jdiff file and jdiff stable api version

2018-03-23 Thread lei
Update 3.0.1 jdiff file and jdiff stable api version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4859cd7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4859cd7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4859cd7c

Branch: refs/heads/branch-3.1
Commit: 4859cd7cc936d5fcf115a2f1cb06fe45a742ff5d
Parents: 98d7a5a
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:48:36 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:53:30 2018 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_3.0.1.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 2 files changed, 325 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4859cd7c/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
new file mode 100644
index 000..91c8a6b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4859cd7c/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 9118a71..7b714ee 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
 false
   
   
-3.0.0
+3.0.1
 -unstable
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: Update 3.0.1 jdiff file and jdiff stable api version

2018-03-23 Thread lei
Update 3.0.1 jdiff file and jdiff stable api version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beb98bb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beb98bb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beb98bb4

Branch: refs/heads/branch-3.0
Commit: beb98bb4193fe204e7d9c37f106003d399a7dfb2
Parents: 7b47400
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:48:36 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:51:40 2018 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_3.0.1.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 2 files changed, 325 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb98bb4/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
new file mode 100644
index 000..91c8a6b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb98bb4/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index dcf24ce..cc909a7 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
 false
   
   
-3.0.0
+3.0.1
 -unstable
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: Update releasenotes and changelogs for 3.0.1 release

2018-03-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 f4d6fee96 -> beb98bb41


Update releasenotes and changelogs for 3.0.1 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b47400c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b47400c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b47400c

Branch: refs/heads/branch-3.0
Commit: 7b47400ce077c9a31e26d4853587336cd9c01d45
Parents: f4d6fee
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 23 11:51:32 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b47400c/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  Major | erasure-c

[2/2] hadoop git commit: Update 3.0.1 jdiff file and jdiff stable api version

2018-03-23 Thread lei
Update 3.0.1 jdiff file and jdiff stable api version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7616683d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7616683d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7616683d

Branch: refs/heads/trunk
Commit: 7616683db59d43e704def1bbb8c0ac74daa835cb
Parents: f9427b7
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:48:36 2018 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Fri Mar 23 11:48:36 2018 -0700

--
 .../jdiff/Apache_Hadoop_HDFS_3.0.1.xml  | 324 +++
 hadoop-project-dist/pom.xml |   2 +-
 2 files changed, 325 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7616683d/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
new file mode 100644
index 000..91c8a6b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.1.xml
@@ -0,0 +1,324 @@
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7616683d/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index b0ed311..e554be4 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -145,7 +145,7 @@
 false
   
   
-3.0.0
+3.0.1
 -unstable
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: Update releasenotes and changelogs for 3.0.1 release

2018-03-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6e31a0908 -> 7616683db


Update releasenotes and changelogs for 3.0.1 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9427b73
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9427b73
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9427b73

Branch: refs/heads/trunk
Commit: f9427b73a2791afeb128c26314c5a0070f1d3682
Parents: 6e31a09
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Fri Mar 23 11:46:41 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9427b73/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  Major | erasure-coding | Le

hadoop git commit: Update releasenotes and changelogs for 3.0.1 release

2018-03-23 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 496dc57cc -> 55f778a3a


Update releasenotes and changelogs for 3.0.1 release


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55f778a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55f778a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55f778a3

Branch: refs/heads/branch-3.0.1
Commit: 55f778a3a081f46cfc4dc1fd5a190e96746f272a
Parents: 496dc57
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Mar 23 11:43:09 2018 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Fri Mar 23 11:43:09 2018 -0700

--
 .../markdown/release/3.0.1/CHANGES.3.0.1.md | 241 +++
 .../release/3.0.1/RELEASENOTES.3.0.1.md |  54 +
 2 files changed, 295 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f778a3/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
new file mode 100644
index 000..d24a8f4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.1/CHANGES.3.0.1.md
@@ -0,0 +1,241 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.0.1 - 2018-03-16
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-12990](https://issues.apache.org/jira/browse/HDFS-12990) | Change 
default NameNode RPC port back to 8020 |  Critical | namenode | Xiao Chen | 
Xiao Chen |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-14872](https://issues.apache.org/jira/browse/HADOOP-14872) | 
CryptoInputStream should implement unbuffer |  Major | fs, security | John 
Zhuge | John Zhuge |
+| [YARN-7414](https://issues.apache.org/jira/browse/YARN-7414) | 
FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() |  
Minor | fairscheduler | Daniel Templeton | Soumabrata Chakraborty |
+| [HADOOP-15023](https://issues.apache.org/jira/browse/HADOOP-15023) | 
ValueQueue should also validate (lowWatermark \* numValues) \> 0 on 
construction |  Minor | . | Xiao Chen | Xiao Chen |
+| [HDFS-12814](https://issues.apache.org/jira/browse/HDFS-12814) | Add blockId 
when warning slow mirror/disk in BlockReceiver |  Trivial | hdfs | Jiandan Yang 
| Jiandan Yang |
+| [YARN-7524](https://issues.apache.org/jira/browse/YARN-7524) | Remove unused 
FairSchedulerEventLog |  Major | fairscheduler | Wilfred Spiegelenburg | 
Wilfred Spiegelenburg |
+| [YARN-7495](https://issues.apache.org/jira/browse/YARN-7495) | Improve 
robustness of the AggregatedLogDeletionService |  Major | log-aggregation | 
Jonathan Eagles | Jonathan Eagles |
+| [YARN-7611](https://issues.apache.org/jira/browse/YARN-7611) | Node manager 
web UI should display container type in containers page |  Major | nodemanager, 
webapp | Weiwei Yang | Weiwei Yang |
+| [YARN-6483](https://issues.apache.org/jira/browse/YARN-6483) | Add nodes 
transitioning to DECOMMISSIONING state to the list of updated nodes returned to 
the AM |  Major | resourcemanager | Juan Rodríguez Hortalá | Juan Rodríguez 
Hortalá |
+| [HADOOP-15056](https://issues.apache.org/jira/browse/HADOOP-15056) | Fix 
TestUnbuffer#testUnbufferException failure |  Minor | test | Jack Bearden | 
Jack Bearden |
+| [HADOOP-15012](https://issues.apache.org/jira/browse/HADOOP-15012) | Add 
readahead, dropbehind, and unbuffer to StreamCapabilities |  Major | fs | John 
Zhuge | John Zhuge |
+| [HADOOP-15104](https://issues.apache.org/jira/browse/HADOOP-15104) | 
AliyunOSS: change the default value of max error retry |  Major | fs/oss | 
wujinhu | wujinhu |
+| [HDFS-12910](https://issues.apache.org/jira/browse/HDFS-12910) | Secure 
Datanode Starter should log the port when it fails to bind |  Minor | datanode 
| Stephen O'Donnell | Stephen O'Donnell |
+| [HDFS-12819](https://issues.apache.org/jira/browse/HDFS-12819) | 
Setting/Unsetting EC policy shows warning if the directory is not empty |  
Minor | erasure-coding | Lei (Eddy) Xu | Lei (Eddy) Xu |
+| [HDFS-12927](https://issues.apache.org/jira/browse/HDFS-12927) | Update 
erasure coding doc to address unsupported APIs |  Major | er

[hadoop] Git Push Summary

2018-03-23 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/rel/release-3.0.1 [created] f8f37e901

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-03-16 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.1-RC1 [created] 9aa609c8d

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "YARN-7190. Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath. Contributed by Varun Saxena."

2018-03-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 65c78d35a -> 496dc57cc


Revert "YARN-7190. Ensure only NM classpath in 2.x gets TSv2 related hbase 
jars, not the user classpath. Contributed by Varun Saxena."

This reverts commit 53033c69f3cc05660d3af82995b88b3bc4b11bed.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/496dc57c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/496dc57c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/496dc57c

Branch: refs/heads/branch-3.0.1
Commit: 496dc57cc2e4f4da117f7a8e3840aaeac0c1d2d0
Parents: 65c78d3
Author: Lei Xu <l...@apache.org>
Authored: Fri Mar 16 10:22:19 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Mar 16 10:22:19 2018 -0700

--
 .../resources/assemblies/hadoop-yarn-dist.xml   | 27 --
 hadoop-yarn-project/hadoop-yarn/bin/yarn|  6 
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd|  6 
 .../hadoop-yarn-server-resourcemanager/pom.xml  |  1 -
 .../pom.xml | 23 
 .../hadoop-yarn-server-timelineservice/pom.xml  |  1 -
 hadoop-yarn-project/pom.xml | 38 +++-
 7 files changed, 5 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/496dc57c/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 4de6240..289061f 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -186,37 +186,10 @@
 **/*
   
 
-
-
-  
hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/target/lib
-  
share/hadoop/${hadoop.component}/timelineservice/lib
-
   
   
 
-  
-org.apache.hadoop:hadoop-yarn-server-timelineservice
-
org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase
-  
-  
-
share/hadoop/${hadoop.component}/timelineservice
-false
-false
-  
-
-
-  
-
org.apache.hadoop:hadoop-yarn-server-timelineservice-hbase-tests
-  
-  
-
share/hadoop/${hadoop.component}/timelineservice/test
-false
-false
-  
-
-
   
-
org.apache.hadoop:hadoop-yarn-server-timelineservice*
 org.apache.hadoop:hadoop-yarn-ui
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496dc57c/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index cc5a5e5..0f19989 100755
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -111,8 +111,6 @@ function yarncmd_case
 ;;
 nodemanager)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
   HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
   # Backwards compatibility
   if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
@@ -132,8 +130,6 @@ function yarncmd_case
 ;;
 resourcemanager)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
   
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
   # Backwards compatibility
   if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
@@ -159,8 +155,6 @@ function yarncmd_case
 ;;
 timelinereader)
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*"
-  hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
   
HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderServer'
 ;;
 timelineserver)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496dc57c/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index e1ac112..fed3d90 100644
--- a/hadoop-yarn-project/hadoop-

hadoop git commit: Revert "HADOOP-13707. If kerberos is enabled while HTTP SPNEGO is not configured, some links cannot be accessed. Contributed by Yuanbo Liu"

2018-03-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 3b922cdb0 -> 94b0df839


Revert "HADOOP-13707. If kerberos is enabled while HTTP SPNEGO is not 
configured, some links cannot be accessed. Contributed by Yuanbo Liu"

Change-Id: I946a466a43d56c73bb0135384e73cb8513595461
(cherry picked from commit 80ee5248b2dda1cb8d122d4f362f2f8cf02b9467)
(cherry picked from commit 252c2b4d52e0dd8984d6f2a8f292f40e1c347fab)
(cherry picked from commit 78f103d6e623692d00060e554dead5473b18d4ca)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94b0df83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94b0df83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94b0df83

Branch: refs/heads/branch-3.0.1
Commit: 94b0df839d36cf5d5e927b3642566c67d0689474
Parents: 3b922cd
Author: Wangda Tan <wan...@apache.org>
Authored: Wed Mar 14 10:47:35 2018 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Mar 15 11:57:31 2018 -0700

--
 .../org/apache/hadoop/conf/ConfServlet.java |  8 +
 .../hadoop/http/AdminAuthorizedServlet.java | 11 ++-
 .../org/apache/hadoop/http/HttpServer2.java | 32 ++--
 .../org/apache/hadoop/jmx/JMXJsonServlet.java   |  8 +
 .../java/org/apache/hadoop/log/LogLevel.java| 11 ++-
 .../org/apache/hadoop/http/TestHttpServer.java  | 17 +--
 6 files changed, 12 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94b0df83/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index 2128de7..cce744e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.conf;
 import java.io.IOException;
 import java.io.Writer;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -59,12 +58,7 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   throws ServletException, IOException {
 
-// If user is a static user and auth Type is null, that means
-// there is a non-security environment and no need authorization,
-// otherwise, do the authorization.
-final ServletContext servletContext = getServletContext();
-if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
-!HttpServer2.isInstrumentationAccessAllowed(servletContext,
+if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
request, response)) {
   return;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94b0df83/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
index 456e89f..a4b05a1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.http;
 
 import java.io.IOException;
 
-import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
@@ -36,13 +35,9 @@ public class AdminAuthorizedServlet extends DefaultServlet {
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse 
response)
-  throws ServletException, IOException {
-// If user is a static user and auth Type is null, that means
-// there is a non-security environment and no need authorization,
-// otherwise, do the authorization.
-final ServletContext servletContext = getServletContext();
-if (HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) ||
-HttpServer2.hasAdministratorAccess(servletContext, request,
+ throws ServletException, IOException {
+// Do the authorization
+if (HttpServer2.hasAdministratorAccess(getServletCont

[3/3] hadoop git commit: Updated timeline reader to use AuthenticationFilter

2018-03-10 Thread lei
Updated timeline reader to use AuthenticationFilter

Change-Id: I961771589180c1eb377d36c37a79aa23754effbf
(cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eaf7b038
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eaf7b038
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eaf7b038

Branch: refs/heads/branch-3.0.1
Commit: eaf7b0382d0de5627e21c7027d7328305593e1ec
Parents: 493924d
Author: Wangda Tan <wan...@apache.org>
Authored: Thu Mar 8 09:23:45 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:46:31 2018 -0800

--
 .../TimelineReaderAuthenticationFilterInitializer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaf7b038/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
index e0e1f4d..6a3658d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -20,11 +20,11 @@ package 
org.apache.hadoop.yarn.server.timelineservice.reader.security;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import 
org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
 
 /**
- * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * Filter initializer to initialize {@link AuthenticationFilter}
  * for ATSv2 timeline reader server with timeline service specific
  * configurations.
  */
@@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
 TimelineAuthenticationFilterInitializer{
 
   /**
-   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * Initializes {@link AuthenticationFilter}
* 
-   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value TimelineAuthenticationFilterInitializer#PREFIX}.
*
@@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
   public void initFilter(FilterContainer container, Configuration conf) {
 setAuthFilterConfig(conf);
 container.addGlobalFilter("Timeline Reader Authentication Filter",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 getFilterConfig());
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu."

2018-03-10 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 6b23e5dc2 -> eaf7b0382


Revert "HADOOP-14077. Add ability to access jmx via proxy.  Contributed by 
Yuanbo Liu."

This reverts commit 172b23af33554b7d58fd41b022d983bcc2433da7.

(cherry picked from commit d0d2d4c51e9534e08893ae14cf3fff7b2ee70b1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f49b044c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f49b044c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f49b044c

Branch: refs/heads/branch-3.0.1
Commit: f49b044cbf08e797b25c6e0c8e6ede98e904773a
Parents: 6b23e5d
Author: Owen O'Malley <omal...@apache.org>
Authored: Thu Mar 1 09:59:08 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:46:07 2018 -0800

--
 .../AuthenticationWithProxyUserFilter.java  |  43 ---
 .../hadoop/http/TestHttpServerWithSpengo.java   |  15 +--
 .../mapreduce/v2/app/webapp/AppController.java  |   7 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 113 ++-
 4 files changed, 85 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f49b044c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
index c97f8ad..ea9b282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
@@ -20,10 +20,9 @@ package org.apache.hadoop.security;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.servlet.FilterChain;
 import javax.servlet.ServletException;
@@ -42,9 +41,6 @@ import java.util.List;
  */
 public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
 
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class);
-
   /**
* Constant used in URL's query string to perform a proxy user request, the
* value of the DO_AS parameter is the user the request will be
@@ -70,30 +66,29 @@ public class AuthenticationWithProxyUserFilter extends 
AuthenticationFilter {
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
 
-final String proxyUser = getDoAs(request);
+// authorize proxy user before calling next filter.
+String proxyUser = getDoAs(request);
 if (proxyUser != null) {
+  UserGroupInformation realUser =
+  UserGroupInformation.createRemoteUser(request.getRemoteUser());
+  UserGroupInformation proxyUserInfo =
+  UserGroupInformation.createProxyUser(proxyUser, realUser);
 
-  // Change the remote user after proxy user is authorized.
-  final HttpServletRequest finalReq = request;
-  request = new HttpServletRequestWrapper(finalReq) {
-
-private String getRemoteOrProxyUser() throws AuthorizationException {
-  UserGroupInformation realUser =
-  UserGroupInformation.createRemoteUser(finalReq.getRemoteUser());
-  UserGroupInformation proxyUserInfo =
-  UserGroupInformation.createProxyUser(proxyUser, realUser);
-  ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr());
-  return proxyUserInfo.getUserName();
-}
+  try {
+ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
+  } catch (AuthorizationException ex) {
+HttpExceptionUtils.createServletExceptionResponse(response,
+HttpServletResponse.SC_FORBIDDEN, ex);
+// stop filter chain if there is an Authorization Exception.
+return;
+  }
 
+  final UserGroupInformation finalProxyUser = proxyUserInfo;
+  // Change the remote user after proxy user is authorized.
+  request = new HttpServletRequestWrapper(request) {
 @Override
 public String getRemoteUser() {
-  try {
-return getRemoteOrProxyUser();
-  } catch (

[2/3] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."

2018-03-10 Thread lei
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.  
Contribute by Yuanbo Liu."

This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.

Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/493924de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/493924de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/493924de

Branch: refs/heads/branch-3.0.1
Commit: 493924def8f184e74d57153afc9c22b0eeb95e9c
Parents: f49b044
Author: Owen O'Malley <omal...@apache.org>
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:46:18 2018 -0800

--
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 119 -
 .../hadoop/http/TestHttpServerWithSpengo.java   | 480 ---
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 ---
 5 files changed, 13 insertions(+), 687 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/493924de/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
 import java.util.Map;
 
 /**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
  * 
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends 
FilterInitializer {
   public void initFilter(FilterContainer container, Configuration conf) {
 Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
 
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
 container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 filterConfig);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/493924de/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache

[2/3] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."

2018-03-10 Thread lei
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.  
Contribute by Yuanbo Liu."

This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.

Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)

Conflicts:

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f3b6102
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f3b6102
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f3b6102

Branch: refs/heads/branch-3.0
Commit: 0f3b6102efd54f58020af601329fe70547df1120
Parents: 0f106df
Author: Owen O'Malley <omal...@apache.org>
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:35:47 2018 -0800

--
 .../AuthenticationFilterInitializer.java|   9 +-
 .../AuthenticationWithProxyUserFilter.java  | 119 -
 .../hadoop/http/TestHttpServerWithSpengo.java   | 480 ---
 .../security/TestAuthenticationFilter.java  |  13 +-
 .../TestAuthenticationWithProxyUserFilter.java  |  79 ---
 5 files changed, 13 insertions(+), 687 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3b6102/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
 import java.util.Map;
 
 /**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
  * 
  * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
  * authentication  for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends 
FilterInitializer {
   public void initFilter(FilterContainer container, Configuration conf) {
 Map<String, String> filterConfig = getFilterConfigMap(conf, PREFIX);
 
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
 container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 filterConfig);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3b6102/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache

[1/3] hadoop git commit: Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu."

2018-03-10 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2e607f210 -> 8034faa88


Revert "HADOOP-14077. Add ability to access jmx via proxy.  Contributed by 
Yuanbo Liu."

This reverts commit 172b23af33554b7d58fd41b022d983bcc2433da7.

(cherry picked from commit d0d2d4c51e9534e08893ae14cf3fff7b2ee70b1d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f106df4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f106df4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f106df4

Branch: refs/heads/branch-3.0
Commit: 0f106df4d6de589650c6acdfb34049ae17ac55b0
Parents: 2e607f2
Author: Owen O'Malley <omal...@apache.org>
Authored: Thu Mar 1 09:59:08 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:33:52 2018 -0800

--
 .../AuthenticationWithProxyUserFilter.java  |  43 ---
 .../hadoop/http/TestHttpServerWithSpengo.java   |  15 +--
 .../mapreduce/v2/app/webapp/AppController.java  |   7 +-
 .../hadoop/yarn/server/webapp/AppBlock.java | 113 ++-
 4 files changed, 85 insertions(+), 93 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f106df4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
index c97f8ad..ea9b282 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
@@ -20,10 +20,9 @@ package org.apache.hadoop.security;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.http.NameValuePair;
 import org.apache.http.client.utils.URLEncodedUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import javax.servlet.FilterChain;
 import javax.servlet.ServletException;
@@ -42,9 +41,6 @@ import java.util.List;
  */
 public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
 
-  public static final Logger LOG =
-  LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class);
-
   /**
* Constant used in URL's query string to perform a proxy user request, the
* value of the DO_AS parameter is the user the request will be
@@ -70,30 +66,29 @@ public class AuthenticationWithProxyUserFilter extends 
AuthenticationFilter {
   protected void doFilter(FilterChain filterChain, HttpServletRequest request,
   HttpServletResponse response) throws IOException, ServletException {
 
-final String proxyUser = getDoAs(request);
+// authorize proxy user before calling next filter.
+String proxyUser = getDoAs(request);
 if (proxyUser != null) {
+  UserGroupInformation realUser =
+  UserGroupInformation.createRemoteUser(request.getRemoteUser());
+  UserGroupInformation proxyUserInfo =
+  UserGroupInformation.createProxyUser(proxyUser, realUser);
 
-  // Change the remote user after proxy user is authorized.
-  final HttpServletRequest finalReq = request;
-  request = new HttpServletRequestWrapper(finalReq) {
-
-private String getRemoteOrProxyUser() throws AuthorizationException {
-  UserGroupInformation realUser =
-  UserGroupInformation.createRemoteUser(finalReq.getRemoteUser());
-  UserGroupInformation proxyUserInfo =
-  UserGroupInformation.createProxyUser(proxyUser, realUser);
-  ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr());
-  return proxyUserInfo.getUserName();
-}
+  try {
+ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
+  } catch (AuthorizationException ex) {
+HttpExceptionUtils.createServletExceptionResponse(response,
+HttpServletResponse.SC_FORBIDDEN, ex);
+// stop filter chain if there is an Authorization Exception.
+return;
+  }
 
+  final UserGroupInformation finalProxyUser = proxyUserInfo;
+  // Change the remote user after proxy user is authorized.
+  request = new HttpServletRequestWrapper(request) {
 @Override
 public String getRemoteUser() {
-  try {
-return getRemoteOrProxyUser();
-  } catch (

[3/3] hadoop git commit: Updated timeline reader to use AuthenticationFilter

2018-03-10 Thread lei
Updated timeline reader to use AuthenticationFilter

Change-Id: I961771589180c1eb377d36c37a79aa23754effbf
(cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8034faa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8034faa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8034faa8

Branch: refs/heads/branch-3.0
Commit: 8034faa8885d0a77cfec855a0e04cb59985af7e3
Parents: 0f3b610
Author: Wangda Tan <wan...@apache.org>
Authored: Thu Mar 8 09:23:45 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Sat Mar 10 13:36:09 2018 -0800

--
 .../TimelineReaderAuthenticationFilterInitializer.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8034faa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
index e0e1f4d..6a3658d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java
@@ -20,11 +20,11 @@ package 
org.apache.hadoop.yarn.server.timelineservice.reader.security;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
-import org.apache.hadoop.security.AuthenticationWithProxyUserFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import 
org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer;
 
 /**
- * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter}
+ * Filter initializer to initialize {@link AuthenticationFilter}
  * for ATSv2 timeline reader server with timeline service specific
  * configurations.
  */
@@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
 TimelineAuthenticationFilterInitializer{
 
   /**
-   * Initializes {@link AuthenticationWithProxyUserFilter}
+   * Initializes {@link AuthenticationFilter}
* 
-   * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all
+   * Propagates to {@link AuthenticationFilter} configuration all
* YARN configuration properties prefixed with
* {@value TimelineAuthenticationFilterInitializer#PREFIX}.
*
@@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer 
extends
   public void initFilter(FilterContainer container, Configuration conf) {
 setAuthFilterConfig(conf);
 container.addGlobalFilter("Timeline Reader Authentication Filter",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
 getFilterConfig());
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] Git Push Summary

2018-02-15 Thread lei
Repository: hadoop
Updated Tags:  refs/tags/release-3.0.1-RC0 [created] 88191e920

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Update version to 3.0.1 for release.

2018-02-08 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 4a3e96a6b -> e716b4359


Update version to 3.0.1 for release.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e716b435
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e716b435
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e716b435

Branch: refs/heads/branch-3.0.1
Commit: e716b4359b328d25429e5e2b3f3dabd843c1c9d9
Parents: 4a3e96a
Author: Lei Xu <l...@apache.org>
Authored: Thu Feb 8 12:02:05 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Feb 8 12:02:05 2018 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-t

hadoop git commit: Preparing for 3.0.2 development

2018-02-06 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 07f779483 -> 7b377f43d


Preparing for 3.0.2 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b377f43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b377f43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b377f43

Branch: refs/heads/branch-3.0
Commit: 7b377f43d00c16686b9cc4e2e2531f8152d5b51d
Parents: 07f7794
Author: Lei Xu <l...@apache.org>
Authored: Tue Feb 6 10:50:37 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Tue Feb 6 10:50:37 2018 -0800

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-resourceestimator/pom.xml| 2 +-
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/had

hadoop git commit: HDFS-12997. Move logging to slf4j in BlockPoolSliceStorage and Storage. Contributed by Ajay Kumar.

2018-02-05 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 7ea100fcb -> 1807c19b1


HDFS-12997. Move logging to slf4j in BlockPoolSliceStorage and Storage. 
Contributed by Ajay Kumar.

(cherry picked from commit b3ae11d59790bb08b81848e9f944db7d3afbbd8a)
(cherry picked from commit 784b4541226b4b363eb2580a120ae0859fb756cc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1807c19b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1807c19b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1807c19b

Branch: refs/heads/branch-3.0.1
Commit: 1807c19b1fd581beff20acb6a9fc4ba3aca8dd02
Parents: 7ea100f
Author: Xiaoyu Yao <x...@apache.org>
Authored: Wed Jan 31 23:10:54 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Mon Feb 5 17:43:32 2018 -0800

--
 .../hadoop/hdfs/qjournal/server/JNStorage.java  |   9 +-
 .../hadoop/hdfs/server/common/Storage.java  |  75 ++--
 .../server/datanode/BlockPoolSliceStorage.java  |  88 +++---
 .../hdfs/server/datanode/DataStorage.java   | 115 +--
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  62 +-
 .../datanode/TestBlockPoolSliceStorage.java |  14 ++-
 6 files changed, 180 insertions(+), 183 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1807c19b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 7226cae..6bf4903 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -193,10 +193,9 @@ class JNStorage extends Storage {
   // /\d+/ in the regex itself.
   long txid = Long.parseLong(matcher.group(1));
   if (txid < minTxIdToKeep) {
-LOG.info("Purging no-longer needed file " + txid);
+LOG.info("Purging no-longer needed file {}", txid);
 if (!f.delete()) {
-  LOG.warn("Unable to delete no-longer-needed data " +
-  f);
+  LOG.warn("Unable to delete no-longer-needed data {}", f);
 }
 break;
   }
@@ -214,7 +213,7 @@ class JNStorage extends Storage {
 }
 setStorageInfo(nsInfo);
 
-LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
+LOG.info("Formatting journal {} with nsid: {}", sd, getNamespaceID());
 // Unlock the directory before formatting, because we will
 // re-analyze it after format(). The analyzeStorage() call
 // below is reponsible for re-locking it. This is a no-op
@@ -278,7 +277,7 @@ class JNStorage extends Storage {
   }
 
   public void close() throws IOException {
-LOG.info("Closing journal storage for " + sd);
+LOG.info("Closing journal storage for {}", sd);
 unlockAll();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1807c19b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 414d3a7..cb7f979 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -35,8 +35,6 @@ import java.util.Properties;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -52,7 +50,8 @@ import org.apache.hadoop.util.VersionInfo;
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Preconditions;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -75,7 +74,9 @@ import com.google.common.base.Preconditions;
  */
 @InterfaceAudience.Private
 public abstract class Storage extends StorageInfo {
-  public static final Log LOG = LogFactory.getLog(Storage.class.getName());
+
+  public static final Logger LOG 

[hadoop] Git Push Summary

2018-02-01 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.1 [created] 7491aaeb8

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. (Lei (Eddy) Xu)

2018-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk c191538ed -> 2ed9d61aa


HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. 
(Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ed9d61a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ed9d61a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ed9d61a

Branch: refs/heads/trunk
Commit: 2ed9d61aadaffbf56ae0ed124520edab97646b49
Parents: c191538
Author: Lei Xu <l...@apache.org>
Authored: Fri Jan 19 16:40:58 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Jan 19 16:40:58 2018 -0800

--
 .../hdfs/server/datanode/erasurecode/StripedBlockReader.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed9d61a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 39ef67e..5e77de5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -105,6 +105,7 @@ class StripedBlockReader {
 if (offsetInBlock >= block.getNumBytes()) {
   return null;
 }
+Peer peer = null;
 try {
   InetSocketAddress dnAddr =
   stripedReader.getSocketAddress4Transfer(source);
@@ -120,7 +121,7 @@ class StripedBlockReader {
  *
  * TODO: add proper tracer
  */
-  Peer peer = newConnectedPeer(block, dnAddr, blockToken, source);
+  peer = newConnectedPeer(block, dnAddr, blockToken, source);
   if (peer.isLocal()) {
 this.isLocal = true;
   }
@@ -131,6 +132,7 @@ class StripedBlockReader {
 } catch (IOException e) {
   LOG.info("Exception while creating remote block reader, datanode {}",
   source, e);
+  IOUtils.closeStream(peer);
   return null;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. (Lei (Eddy) Xu)

2018-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fc669778b -> d33050b8c


HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. 
(Lei (Eddy) Xu)

(cherry picked from commit 2ed9d61aadaffbf56ae0ed124520edab97646b49)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d33050b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d33050b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d33050b8

Branch: refs/heads/branch-3.0
Commit: d33050b8cd30bae1afa3513d3dd2025abf361945
Parents: fc66977
Author: Lei Xu <l...@apache.org>
Authored: Fri Jan 19 16:40:58 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Jan 19 16:42:22 2018 -0800

--
 .../hdfs/server/datanode/erasurecode/StripedBlockReader.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d33050b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 39ef67e..5e77de5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -105,6 +105,7 @@ class StripedBlockReader {
 if (offsetInBlock >= block.getNumBytes()) {
   return null;
 }
+Peer peer = null;
 try {
   InetSocketAddress dnAddr =
   stripedReader.getSocketAddress4Transfer(source);
@@ -120,7 +121,7 @@ class StripedBlockReader {
  *
  * TODO: add proper tracer
  */
-  Peer peer = newConnectedPeer(block, dnAddr, blockToken, source);
+  peer = newConnectedPeer(block, dnAddr, blockToken, source);
   if (peer.isLocal()) {
 this.isLocal = true;
   }
@@ -131,6 +132,7 @@ class StripedBlockReader {
 } catch (IOException e) {
   LOG.info("Exception while creating remote block reader, datanode {}",
   source, e);
+  IOUtils.closeStream(peer);
   return null;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. (Lei (Eddy) Xu)

2018-01-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 702d280c8 -> a2718c955


HDFS-13039. StripedBlockReader.createBlockReader leaks socket on IOException. 
(Lei (Eddy) Xu)

(cherry picked from commit 2ed9d61aadaffbf56ae0ed124520edab97646b49)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2718c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2718c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2718c95

Branch: refs/heads/branch-3
Commit: a2718c9557aee61edff8f68b373a2f746f00880a
Parents: 702d280
Author: Lei Xu <l...@apache.org>
Authored: Fri Jan 19 16:40:58 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Jan 19 16:41:50 2018 -0800

--
 .../hdfs/server/datanode/erasurecode/StripedBlockReader.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2718c95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
index 39ef67e..5e77de5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReader.java
@@ -105,6 +105,7 @@ class StripedBlockReader {
 if (offsetInBlock >= block.getNumBytes()) {
   return null;
 }
+Peer peer = null;
 try {
   InetSocketAddress dnAddr =
   stripedReader.getSocketAddress4Transfer(source);
@@ -120,7 +121,7 @@ class StripedBlockReader {
  *
  * TODO: add proper tracer
  */
-  Peer peer = newConnectedPeer(block, dnAddr, blockToken, source);
+  peer = newConnectedPeer(block, dnAddr, blockToken, source);
   if (peer.isLocal()) {
 this.isLocal = true;
   }
@@ -131,6 +132,7 @@ class StripedBlockReader {
 } catch (IOException e) {
   LOG.info("Exception while creating remote block reader, datanode {}",
   source, e);
+  IOUtils.closeStream(peer);
   return null;
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)

2018-01-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 370f1c628 -> 3bd9ea63d


HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when 
safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bd9ea63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bd9ea63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bd9ea63

Branch: refs/heads/trunk
Commit: 3bd9ea63df769345a9d02a404cfb61323a4cd7e3
Parents: 370f1c6
Author: Lei Xu <l...@apache.org>
Authored: Tue Jan 16 15:15:11 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Tue Jan 16 15:15:11 2018 -0800

--
 .../java/org/apache/hadoop/hdfs/StripedFileTestUtil.java |  7 ---
 .../org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java | 11 +++
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd9ea63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 08bf20a..13ca390 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -363,11 +363,12 @@ public class StripedFileTestUtil {
 List<List> blockGroupList = new ArrayList<>();
 LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 
0L,
 Long.MAX_VALUE);
-int expectedNumGroup = 0;
+
 if (length > 0) {
-  expectedNumGroup = (length - 1) / blkGroupSize + 1;
+  int expectedNumGroup = (length - 1) / blkGroupSize + 1;
+
+  assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 }
-assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 
 final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
 final int cellSize = ecPolicy.getCellSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bd9ea63/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 36ac8b3..d74f193 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -85,6 +85,7 @@ public class TestLeaseRecoveryStriped {
   private Configuration conf;
   private final Path dir = new Path("/" + this.getClass().getSimpleName());
   final Path p = new Path(dir, "testfile");
+  private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
 
   @Before
   public void setup() throws IOException {
@@ -191,17 +192,20 @@ public class TestLeaseRecoveryStriped {
 
   private void runTest(int[] blockLengths, long safeLength) throws Exception {
 writePartialBlocks(blockLengths);
+
+int checkDataLength = Math.min(testFileLength, (int)safeLength);
+
 recoverLease();
 
 List oldGS = new ArrayList<>();
 oldGS.add(1001L);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
 // After recovery, storages are reported by primary DN. we should verify
 // storages reported by blockReport.
 cluster.restartNameNode(true);
 cluster.waitFirstBRCompleted(0, 1);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
   }
 
@@ -219,12 +223,11 @@ public class TestLeaseRecoveryStriped {
 final FSDataOutputStream out = dfs.create(p);
 final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
 .getWrappedStream();
-int length = (stripesPerBlock - 1) * stripeSize;
 int[] posToKill = getPosToKill(blockLengths);
 int checkingPos = nextCheckingPos(posToKill, 0);
 Set stoppedStreamerIndexes = new HashSet<>();
 try {
-  for (int pos = 0; pos < length; pos++) {
+  for (int pos = 0; pos < testFileLength; pos++) {
 out.write(StripedFileTestUtil.getByte

hadoop git commit: HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)

2018-01-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3 82741091a -> db8345fa9


HDFS-13004. TestLeaseRecoveryStriped.testLeaseRecovery is failing when 
safeLength is 0MB or larger than the test file. (Zsolt Venczel via lei)

(cherry picked from commit 3bd9ea63df769345a9d02a404cfb61323a4cd7e3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db8345fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db8345fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db8345fa

Branch: refs/heads/branch-3
Commit: db8345fa9cd124728d935f725525e2626438b4c1
Parents: 8274109
Author: Lei Xu <l...@apache.org>
Authored: Tue Jan 16 15:15:11 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Tue Jan 16 15:19:09 2018 -0800

--
 .../java/org/apache/hadoop/hdfs/StripedFileTestUtil.java |  7 ---
 .../org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java | 11 +++
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8345fa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 08bf20a..13ca390 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -363,11 +363,12 @@ public class StripedFileTestUtil {
 List<List> blockGroupList = new ArrayList<>();
 LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 
0L,
 Long.MAX_VALUE);
-int expectedNumGroup = 0;
+
 if (length > 0) {
-  expectedNumGroup = (length - 1) / blkGroupSize + 1;
+  int expectedNumGroup = (length - 1) / blkGroupSize + 1;
+
+  assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 }
-assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
 
 final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
 final int cellSize = ecPolicy.getCellSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8345fa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
index 36ac8b3..d74f193 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
@@ -85,6 +85,7 @@ public class TestLeaseRecoveryStriped {
   private Configuration conf;
   private final Path dir = new Path("/" + this.getClass().getSimpleName());
   final Path p = new Path(dir, "testfile");
+  private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
 
   @Before
   public void setup() throws IOException {
@@ -191,17 +192,20 @@ public class TestLeaseRecoveryStriped {
 
   private void runTest(int[] blockLengths, long safeLength) throws Exception {
 writePartialBlocks(blockLengths);
+
+int checkDataLength = Math.min(testFileLength, (int)safeLength);
+
 recoverLease();
 
 List oldGS = new ArrayList<>();
 oldGS.add(1001L);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
 // After recovery, storages are reported by primary DN. we should verify
 // storages reported by blockReport.
 cluster.restartNameNode(true);
 cluster.waitFirstBRCompleted(0, 1);
-StripedFileTestUtil.checkData(dfs, p, (int)safeLength,
+StripedFileTestUtil.checkData(dfs, p, checkDataLength,
 new ArrayList(), oldGS, blockGroupSize);
   }
 
@@ -219,12 +223,11 @@ public class TestLeaseRecoveryStriped {
 final FSDataOutputStream out = dfs.create(p);
 final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
 .getWrappedStream();
-int length = (stripesPerBlock - 1) * stripeSize;
 int[] posToKill = getPosToKill(blockLengths);
 int checkingPos = nextCheckingPos(posToKill, 0);
 Set stoppedStreamerIndexes = new HashSet<>();
 try {
-  for (int pos = 0; pos < length; pos++) {
+  for (int pos = 0; pos < tes

hadoop git commit: HDFS-12994. TestReconstructStripedFile.testNNSendsErasureCodingTasks fails due to socket timeout. (Contributed by Lei (Eddy) Xu)

2018-01-09 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 cf816bc75 -> 92f63d04a


HDFS-12994. TestReconstructStripedFile.testNNSendsErasureCodingTasks fails due 
to socket timeout. (Contributed by Lei (Eddy) Xu)

(cherry picked from commit 47563d86fe6ba1a2de934c9ed740d0aafbf72d4e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92f63d04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92f63d04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92f63d04

Branch: refs/heads/branch-3.0
Commit: 92f63d04a1f3e5a85caa712f84a45143b0c9009b
Parents: cf816bc
Author: Lei Xu <l...@apache.org>
Authored: Tue Jan 9 11:53:49 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Tue Jan 9 12:26:30 2018 -0800

--
 .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92f63d04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index e3843a0..7201e11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -37,6 +37,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -438,7 +439,7 @@ public class TestReconstructStripedFile {
   }
 
   // HDFS-12044
-  @Test(timeout = 6)
+  @Test(timeout = 12)
   public void testNNSendsErasureCodingTasks() throws Exception {
 testNNSendsErasureCodingTasks(1);
 testNNSendsErasureCodingTasks(2);
@@ -453,6 +454,9 @@ public class TestReconstructStripedFile {
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 20);
 conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY,
 2);
+// Set shorter socket timeout, to allow the recovery task to be reschedule,
+// if it is connecting to a dead DataNode.
+conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5 * 1000);
 cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numDataNodes).build();
 cluster.waitActive();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12994. TestReconstructStripedFile.testNNSendsErasureCodingTasks fails due to socket timeout. (Contributed by Lei (Eddy) Xu)

2018-01-09 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk b62a5ece9 -> 47563d86f


HDFS-12994. TestReconstructStripedFile.testNNSendsErasureCodingTasks fails due 
to socket timeout. (Contributed by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47563d86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47563d86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47563d86

Branch: refs/heads/trunk
Commit: 47563d86fe6ba1a2de934c9ed740d0aafbf72d4e
Parents: b62a5ec
Author: Lei Xu <l...@apache.org>
Authored: Tue Jan 9 11:53:49 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Tue Jan 9 11:53:49 2018 -0800

--
 .../org/apache/hadoop/hdfs/TestReconstructStripedFile.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47563d86/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index e3843a0..7201e11 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -37,6 +37,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -438,7 +439,7 @@ public class TestReconstructStripedFile {
   }
 
   // HDFS-12044
-  @Test(timeout = 6)
+  @Test(timeout = 12)
   public void testNNSendsErasureCodingTasks() throws Exception {
 testNNSendsErasureCodingTasks(1);
 testNNSendsErasureCodingTasks(2);
@@ -453,6 +454,9 @@ public class TestReconstructStripedFile {
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 20);
 conf.setInt(DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_THREADS_KEY,
 2);
+// Set shorter socket timeout, to allow the recovery task to be reschedule,
+// if it is connecting to a dead DataNode.
+conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5 * 1000);
 cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numDataNodes).build();
 cluster.waitActive();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12860. StripedBlockUtil#getRangesInternalBlocks throws exception for the block group size larger than 2GB. (Contributed by Lei (Eddy) Xu)

2018-01-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 739d3c394 -> dc735b286


HDFS-12860. StripedBlockUtil#getRangesInternalBlocks throws exception for the 
block group size larger than 2GB. (Contributed by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc735b28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc735b28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc735b28

Branch: refs/heads/trunk
Commit: dc735b286bb656903df49aee776d22ee0c61f860
Parents: 739d3c3
Author: Lei Xu <l...@apache.org>
Authored: Thu Jan 4 10:16:40 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Jan 4 10:54:56 2018 -0800

--
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 63 +++-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 41 -
 2 files changed, 86 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc735b28/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 9e24576..9bad45d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -396,7 +396,9 @@ public class StripedBlockUtil {
   long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
 Preconditions.checkArgument(
 rangeStartInBlockGroup <= rangeEndInBlockGroup &&
-rangeEndInBlockGroup < blockGroup.getBlockSize());
+rangeEndInBlockGroup < blockGroup.getBlockSize(),
+"start=%s end=%s blockSize=%s", rangeStartInBlockGroup,
+rangeEndInBlockGroup, blockGroup.getBlockSize());
 long len = rangeEndInBlockGroup - rangeStartInBlockGroup + 1;
 int firstCellIdxInBG = (int) (rangeStartInBlockGroup / cellSize);
 int lastCellIdxInBG = (int) (rangeEndInBlockGroup / cellSize);
@@ -578,28 +580,39 @@ public class StripedBlockUtil {
   public static class StripingCell {
 final ErasureCodingPolicy ecPolicy;
 /** Logical order in a block group, used when doing I/O to a block group. 
*/
-final int idxInBlkGroup;
-final int idxInInternalBlk;
-final int idxInStripe;
+private final long idxInBlkGroup;
+private final long idxInInternalBlk;
+private final int idxInStripe;
 /**
  * When a logical byte range is mapped to a set of cells, it might
  * partially overlap with the first and last cells. This field and the
  * {@link #size} variable represent the start offset and size of the
  * overlap.
  */
-final int offset;
-final int size;
+private final long offset;
+private final int size;
 
-StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, int idxInBlkGroup,
-int offset) {
+StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, long 
idxInBlkGroup,
+long offset) {
   this.ecPolicy = ecPolicy;
   this.idxInBlkGroup = idxInBlkGroup;
   this.idxInInternalBlk = idxInBlkGroup / ecPolicy.getNumDataUnits();
-  this.idxInStripe = idxInBlkGroup -
-  this.idxInInternalBlk * ecPolicy.getNumDataUnits();
+  this.idxInStripe = (int)(idxInBlkGroup -
+  this.idxInInternalBlk * ecPolicy.getNumDataUnits());
   this.offset = offset;
   this.size = cellSize;
 }
+
+int getIdxInStripe() {
+  return idxInStripe;
+}
+
+@Override
+public String toString() {
+  return String.format("StripingCell(idxInBlkGroup=%d, " +
+  "idxInInternalBlk=%d, idxInStrip=%d, offset=%d, size=%d)",
+  idxInBlkGroup, idxInInternalBlk, idxInStripe, offset, size);
+}
   }
 
   /**
@@ -646,7 +659,9 @@ public class StripedBlockUtil {
 public int missingChunksNum = 0;
 
 public AlignedStripe(long offsetInBlock, long length, int width) {
-  Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0);
+  Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0,
+  "OffsetInBlock(%s) and length(%s) must be non-negative",
+  offsetInBlock, length);
   this.range = new VerticalRange(offsetInBlock, length);
   this.chunks = new StripingChunk[width];
 }
@@ -665,9 +680,9 @@ public class StripedBlockUtil {
 
 @Override
 public String toString() {
-  return "Offset=" 

hadoop git commit: HDFS-12860. StripedBlockUtil#getRangesInternalBlocks throws exception for the block group size larger than 2GB. (Contributed by Lei (Eddy) Xu)

2018-01-04 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c155d8e75 -> 47773d68b


HDFS-12860. StripedBlockUtil#getRangesInternalBlocks throws exception for the 
block group size larger than 2GB. (Contributed by Lei (Eddy) Xu)

(cherry picked from commit 85067eaa8e352afeed9c094da3815100873c84e4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47773d68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47773d68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47773d68

Branch: refs/heads/branch-3.0
Commit: 47773d68b2ba7a1724425efff0ded8c8f8a400bc
Parents: c155d8e
Author: Lei Xu <l...@apache.org>
Authored: Thu Jan 4 10:16:40 2018 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Jan 4 10:33:34 2018 -0800

--
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 63 +++-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 41 -
 2 files changed, 86 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47773d68/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 896ebc6..c19ab28 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -396,7 +396,9 @@ public class StripedBlockUtil {
   long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
 Preconditions.checkArgument(
 rangeStartInBlockGroup <= rangeEndInBlockGroup &&
-rangeEndInBlockGroup < blockGroup.getBlockSize());
+rangeEndInBlockGroup < blockGroup.getBlockSize(),
+"start=%s end=%s blockSize=%s", rangeStartInBlockGroup,
+rangeEndInBlockGroup, blockGroup.getBlockSize());
 long len = rangeEndInBlockGroup - rangeStartInBlockGroup + 1;
 int firstCellIdxInBG = (int) (rangeStartInBlockGroup / cellSize);
 int lastCellIdxInBG = (int) (rangeEndInBlockGroup / cellSize);
@@ -578,28 +580,39 @@ public class StripedBlockUtil {
   static class StripingCell {
 final ErasureCodingPolicy ecPolicy;
 /** Logical order in a block group, used when doing I/O to a block group. 
*/
-final int idxInBlkGroup;
-final int idxInInternalBlk;
-final int idxInStripe;
+private final long idxInBlkGroup;
+private final long idxInInternalBlk;
+private final int idxInStripe;
 /**
  * When a logical byte range is mapped to a set of cells, it might
  * partially overlap with the first and last cells. This field and the
  * {@link #size} variable represent the start offset and size of the
  * overlap.
  */
-final int offset;
-final int size;
+private final long offset;
+private final int size;
 
-StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, int idxInBlkGroup,
-int offset) {
+StripingCell(ErasureCodingPolicy ecPolicy, int cellSize, long 
idxInBlkGroup,
+long offset) {
   this.ecPolicy = ecPolicy;
   this.idxInBlkGroup = idxInBlkGroup;
   this.idxInInternalBlk = idxInBlkGroup / ecPolicy.getNumDataUnits();
-  this.idxInStripe = idxInBlkGroup -
-  this.idxInInternalBlk * ecPolicy.getNumDataUnits();
+  this.idxInStripe = (int)(idxInBlkGroup -
+  this.idxInInternalBlk * ecPolicy.getNumDataUnits());
   this.offset = offset;
   this.size = cellSize;
 }
+
+int getIdxInStripe() {
+  return idxInStripe;
+}
+
+@Override
+public String toString() {
+  return String.format("StripingCell(idxInBlkGroup=%d, " +
+  "idxInInternalBlk=%d, idxInStrip=%d, offset=%d, size=%d)",
+  idxInBlkGroup, idxInInternalBlk, idxInStripe, offset, size);
+}
   }
 
   /**
@@ -646,7 +659,9 @@ public class StripedBlockUtil {
 public int missingChunksNum = 0;
 
 public AlignedStripe(long offsetInBlock, long length, int width) {
-  Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0);
+  Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0,
+  "OffsetInBlock(%s) and length(%s) must be non-negative",
+  offsetInBlock, length);
   this.range = new VerticalRange(offsetInBlock, length);
   this.chunks = new StripingChunk[width];
 }
@@ -665,9 +680,9 @@ public class StripedBlockUtil {
 
 @Override
 public

hadoop git commit: HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)

2017-12-29 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk a55884c68 -> 6e3e1b8cd


HDFS-12915. Fix findbugs warning in 
INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e3e1b8c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e3e1b8c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e3e1b8c

Branch: refs/heads/trunk
Commit: 6e3e1b8cde737e4c03b0f5279cab0239e7069a72
Parents: a55884c
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 29 12:21:57 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 29 12:21:57 2017 -0800

--
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 71 +---
 1 file changed, 46 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e3e1b8c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3f2fb33..906a940 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -33,10 +33,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
@@ -53,11 +53,11 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
+import org.apache.hadoop.util.StringUtils;
+import static 
org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
-import org.apache.hadoop.util.StringUtils;
 
 /** I-node for closed file. */
 @InterfaceAudience.Private
@@ -186,28 +186,49 @@ public class INodeFile extends INodeWithAdditionalFields
  * Construct block layout redundancy based on the given BlockType,
  * replication factor and EC PolicyID.
  */
-static long getBlockLayoutRedundancy(final BlockType blockType,
-final Short replication, final Byte erasureCodingPolicyID) {
-  long layoutRedundancy = 0;
-  if (blockType == STRIPED) {
-Preconditions.checkArgument(replication == null &&
-erasureCodingPolicyID != null);
-Preconditions.checkArgument(ErasureCodingPolicyManager.getInstance()
-.getByID(erasureCodingPolicyID) != null,
-"Could not find EC policy with ID 0x" + StringUtils
-.byteToHexString(erasureCodingPolicyID));
+static long getBlockLayoutRedundancy(BlockType blockType,
+Short replication, Byte erasureCodingPolicyID) {
+  if (null == erasureCodingPolicyID) {
+erasureCodingPolicyID = REPLICATION_POLICY_ID;
+  }
+  long layoutRedundancy = 0xFF & erasureCodingPolicyID;
+  switch (blockType) {
+  case STRIPED:
+if (replication != null) {
+  throw new IllegalArgumentException(
+  "Illegal replication for STRIPED block type");
+}
+if (erasureCodingPolicyID == REPLICATION_POLICY_ID) {
+  throw new IllegalArgumentExcept

hadoop git commit: HDFS-12915. Fix findbugs warning in INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)

2017-12-29 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 bd4dcc777 -> 8f9603cb9


HDFS-12915. Fix findbugs warning in 
INodeFile$HeaderFormat.getBlockLayoutRedundancy. (Contributed by Chris Douglas)

(cherry picked from commit 6e3e1b8cde737e4c03b0f5279cab0239e7069a72)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f9603cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f9603cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f9603cb

Branch: refs/heads/branch-3.0
Commit: 8f9603cb9daf219ef881843f9340130f66a9a715
Parents: bd4dcc7
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 29 12:21:57 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 29 12:23:33 2017 -0800

--
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 71 +---
 1 file changed, 46 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f9603cb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 3f2fb33..906a940 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -33,10 +33,11 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -45,7 +46,6 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.protocol.BlockType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
@@ -53,11 +53,11 @@ import 
org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.LongBitFormat;
+import org.apache.hadoop.util.StringUtils;
+import static 
org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
-import org.apache.hadoop.util.StringUtils;
 
 /** I-node for closed file. */
 @InterfaceAudience.Private
@@ -186,28 +186,49 @@ public class INodeFile extends INodeWithAdditionalFields
  * Construct block layout redundancy based on the given BlockType,
  * replication factor and EC PolicyID.
  */
-static long getBlockLayoutRedundancy(final BlockType blockType,
-final Short replication, final Byte erasureCodingPolicyID) {
-  long layoutRedundancy = 0;
-  if (blockType == STRIPED) {
-Preconditions.checkArgument(replication == null &&
-erasureCodingPolicyID != null);
-Preconditions.checkArgument(ErasureCodingPolicyManager.getInstance()
-.getByID(erasureCodingPolicyID) != null,
-"Could not find EC policy with ID 0x" + StringUtils
-.byteToHexString(erasureCodingPolicyID));
+static long getBlockLayoutRedundancy(BlockType blockType,
+Short replication, Byte erasureCodingPolicyID) {
+  if (null == erasureCodingPolicyID) {
+erasureCodingPolicyID = REPLICATION_POLICY_ID;
+  }
+  long layoutRedundancy = 0xFF & erasureCodingPolicyID;
+  switch (blockType) {
+  case STRIPED:
+if (replication != null) {
+  throw new IllegalArgumentException(
+  "Illegal replication for STRIPED block type");
+}
+if (erasureCodingPolicy

hadoop git commit: HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)

2017-12-21 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk c8ff0cc30 -> b318bed01


HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by 
Ajay Kumar)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b318bed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b318bed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b318bed0

Branch: refs/heads/trunk
Commit: b318bed01affa150d70661f263efff9a5c9422f6
Parents: c8ff0cc
Author: Lei Xu <l...@apache.org>
Authored: Thu Dec 21 10:28:24 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Dec 21 10:28:24 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/testErasureCodingConf.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b318bed0/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index bd451eb..fc0c060 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -311,7 +311,7 @@
   
 
   SubstringComparator
-  Warning: setting erasure coding policy on an 
non-empty directory will not automatically convert existing data to 
RS-6-3-1024
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024
 
   
 
@@ -353,7 +353,7 @@
   
 
   SubstringComparator
-  Warning: unsetting erasure coding policy on an 
non-empty directory will not automatically convert existing data to replicated 
data
+  Warning: unsetting erasure coding policy on a 
non-empty directory will not automatically convert existing files to replicated 
data
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by Ajay Kumar)

2017-12-21 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7d42433f3 -> 2fbfaac6c


HDFS-12938. TestErasureCodigCLI testAll failing consistently. (Contributed by 
Ajay Kumar)

(cherry picked from commit b318bed01affa150d70661f263efff9a5c9422f6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fbfaac6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fbfaac6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fbfaac6

Branch: refs/heads/branch-3.0
Commit: 2fbfaac6c053768a8b8c10f54d63b9a8949ba4bb
Parents: 7d42433
Author: Lei Xu <l...@apache.org>
Authored: Thu Dec 21 10:28:24 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Dec 21 10:30:40 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/testErasureCodingConf.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fbfaac6/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index e667213..265e628 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -311,7 +311,7 @@
   
 
   SubstringComparator
-  Warning: setting erasure coding policy on an 
non-empty directory will not automatically convert existing data to 
RS-6-3-1024
+  Warning: setting erasure coding policy on a 
non-empty directory will not automatically convert existing files to 
RS-6-3-1024
 
   
 
@@ -353,7 +353,7 @@
   
 
   SubstringComparator
-  Warning: unsetting erasure coding policy on an 
non-empty directory will not automatically convert existing data to replicated 
data
+  Warning: unsetting erasure coding policy on a 
non-empty directory will not automatically convert existing files to replicated 
data
 
   
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12927. Update erasure coding doc to address unsupported APIs. (Contributed by Lei (Eddy) Xu)

2017-12-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 2821edf6d -> 0191a862a


HDFS-12927. Update erasure coding doc to address unsupported APIs. (Contributed 
by Lei (Eddy) Xu)

(cherry picked from commit 949be14b0881186d76c3b60ee2f39ce67dc1654c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0191a862
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0191a862
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0191a862

Branch: refs/heads/branch-3.0
Commit: 0191a862a36453170358726da114b6bf7ec3e169
Parents: 2821edf
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 15 10:20:29 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 15 10:29:08 2017 -0800

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md| 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0191a862/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index a884ed8..4459c94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -220,11 +220,14 @@ Below are the details about each command.
 Limitations
 ---
 
-Certain HDFS file write operations, i.e., `hflush`, `hsync` and `append`,
+Certain HDFS operations, i.e., `hflush`, `hsync`, `concat`, `setReplication`, 
`truncate` and `append`,
 are not supported on erasure coded files due to substantial technical
 challenges.
 
-* `append()` on an erasure coded file will throw `IOException`.
+* `append()` and `truncate()` on an erasure coded file will throw 
`IOException`.
+* `concat()` will throw `IOException` if files are mixed with different erasure
+coding policies or with replicated files.
+* `setReplication()` is no-op on erasure coded files.
 * `hflush()` and `hsync()` on `DFSStripedOutputStream` are no-op. Thus calling
 `hflush()` or `hsync()` on an erasure coded file can not guarantee data
 being persistent.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12927. Update erasure coding doc to address unsupported APIs. (Contributed by Lei (Eddy) Xu)

2017-12-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1c15b1751 -> 949be14b0


HDFS-12927. Update erasure coding doc to address unsupported APIs. (Contributed 
by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/949be14b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/949be14b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/949be14b

Branch: refs/heads/trunk
Commit: 949be14b0881186d76c3b60ee2f39ce67dc1654c
Parents: 1c15b17
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 15 10:20:29 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 15 10:20:29 2017 -0800

--
 .../hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md| 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/949be14b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index a884ed8..4459c94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -220,11 +220,14 @@ Below are the details about each command.
 Limitations
 ---
 
-Certain HDFS file write operations, i.e., `hflush`, `hsync` and `append`,
+Certain HDFS operations, i.e., `hflush`, `hsync`, `concat`, `setReplication`, 
`truncate` and `append`,
 are not supported on erasure coded files due to substantial technical
 challenges.
 
-* `append()` on an erasure coded file will throw `IOException`.
+* `append()` and `truncate()` on an erasure coded file will throw 
`IOException`.
+* `concat()` will throw `IOException` if files are mixed with different erasure
+coding policies or with replicated files.
+* `setReplication()` is no-op on erasure coded files.
 * `hflush()` and `hsync()` on `DFSStripedOutputStream` are no-op. Thus calling
 `hflush()` or `hsync()` on an erasure coded file can not guarantee data
 being persistent.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12819. Setting/Unsetting EC policy shows warning if the directory is not empty. (Contributed by Lei (Eddy) Xu)

2017-12-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6681dd100 -> 1c15b1751


HDFS-12819. Setting/Unsetting EC policy shows warning if the directory is not 
empty. (Contributed by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c15b175
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c15b175
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c15b175

Branch: refs/heads/trunk
Commit: 1c15b1751c0698bd3063d5c25f556d4821b161d2
Parents: 6681dd1
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 15 10:04:43 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 15 10:04:43 2017 -0800

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 14 
 .../test/resources/testErasureCodingConf.xml| 37 
 2 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c15b175/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index a28f227..e30b083 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.tools;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -362,6 +364,12 @@ public class ECAdmin extends Configured implements Tool {
   System.out.println("Set erasure coding policy " + ecPolicyName +
   " on " + path);
 }
+RemoteIterator dirIt = dfs.listStatusIterator(p);
+if (dirIt.hasNext()) {
+  System.out.println("Warning: setting erasure coding policy on a " +
+  "non-empty directory will not automatically convert existing" +
+  " files to " + ecPolicyName);
+}
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));
 return 3;
@@ -412,6 +420,12 @@ public class ECAdmin extends Configured implements Tool {
   try {
 dfs.unsetErasureCodingPolicy(p);
 System.out.println("Unset erasure coding policy from " + path);
+RemoteIterator dirIt = dfs.listStatusIterator(p);
+if (dirIt.hasNext()) {
+  System.out.println("Warning: unsetting erasure coding policy on a " +
+  "non-empty directory will not automatically convert existing" +
+  " files to replicated data.");
+}
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));
 return 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c15b175/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 9988ff3..e667213 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -299,6 +299,24 @@
 
 
 
+  setPolicy : set policy on non-empty directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -touchz /ecdir/file1
+-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path 
/ecdir
+  
+  
+-fs NAMENODE -rm -R /ecdir
+  
+  
+
+  SubstringComparator
+  Warning: setting erasure coding policy on an 
non-empty directory will not automatically convert existing data to 
RS-6-3-1024
+
+  
+
+
+
   unsetPolicy : unset inherited EC policy, has no 
effect
   
 -fs NAMENODE -mkdir /ecdir
@@ -322,6 +340,25 @@
 
 
 
+  unsetPolicy : unset policy on non-empty 
directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path 
/ecdir
+-fs NAMENODE -touchz /ecdir/f

hadoop git commit: HDFS-12819. Setting/Unsetting EC policy shows warning if the directory is not empty. (Contributed by Lei (Eddy) Xu)

2017-12-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 53033c69f -> 2821edf6d


HDFS-12819. Setting/Unsetting EC policy shows warning if the directory is not 
empty. (Contributed by Lei (Eddy) Xu)

(cherry picked from commit 1c15b1751c0698bd3063d5c25f556d4821b161d2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2821edf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2821edf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2821edf6

Branch: refs/heads/branch-3.0
Commit: 2821edf6d0ae78a939af86b824f253397e5af024
Parents: 53033c6
Author: Lei Xu <l...@apache.org>
Authored: Fri Dec 15 10:04:43 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Fri Dec 15 10:06:00 2017 -0800

--
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 14 
 .../test/resources/testErasureCodingConf.xml| 37 
 2 files changed, 51 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2821edf6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
index a28f227..e30b083 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.tools;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -362,6 +364,12 @@ public class ECAdmin extends Configured implements Tool {
   System.out.println("Set erasure coding policy " + ecPolicyName +
   " on " + path);
 }
+RemoteIterator dirIt = dfs.listStatusIterator(p);
+if (dirIt.hasNext()) {
+  System.out.println("Warning: setting erasure coding policy on a " +
+  "non-empty directory will not automatically convert existing" +
+  " files to " + ecPolicyName);
+}
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));
 return 3;
@@ -412,6 +420,12 @@ public class ECAdmin extends Configured implements Tool {
   try {
 dfs.unsetErasureCodingPolicy(p);
 System.out.println("Unset erasure coding policy from " + path);
+RemoteIterator dirIt = dfs.listStatusIterator(p);
+if (dirIt.hasNext()) {
+  System.out.println("Warning: unsetting erasure coding policy on a " +
+  "non-empty directory will not automatically convert existing" +
+  " files to replicated data.");
+}
   } catch (Exception e) {
 System.err.println(AdminHelper.prettifyException(e));
 return 2;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2821edf6/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 9988ff3..e667213 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -299,6 +299,24 @@
 
 
 
+  setPolicy : set policy on non-empty directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -touchz /ecdir/file1
+-fs NAMENODE -setPolicy -policy RS-6-3-1024k -path 
/ecdir
+  
+  
+-fs NAMENODE -rm -R /ecdir
+  
+  
+
+  SubstringComparator
+  Warning: setting erasure coding policy on an 
non-empty directory will not automatically convert existing data to 
RS-6-3-1024
+
+  
+
+
+
   unsetPolicy : unset inherited EC policy, has no 
effect
   
 -fs NAMENODE -mkdir /ecdir
@@ -322,6 +340,25 @@
 
 
 
+  unsetPolicy : unset policy on non-empty 
directory
+  
+-fs NAMENODE -mkdir /ecdir
+-fs NAMENODE -setPolicy -policy RS-6-3-1024k 

hadoop git commit: HDFS-12840. Creating a file with non-default EC policy in a EC zone is not correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.

2017-12-07 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 cb307d5b8 -> e309d25d2


HDFS-12840. Creating a file with non-default EC policy in a EC zone is not 
correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.

(cherry picked from commit 67662e2ac9e68f32b725c8118cf2be79a662fca5)
(cherry picked from commit f3143d225afec0ad95d9e4b81b91d760b5b77c52)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e309d25d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e309d25d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e309d25d

Branch: refs/heads/branch-3.0.0
Commit: e309d25d2bc8367758a0af5b8f081485e728327f
Parents: cb307d5
Author: Lei Xu <l...@apache.org>
Authored: Thu Dec 7 11:15:40 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Dec 7 11:24:24 2017 -0800

--
 .../io/erasurecode/ErasureCodeConstants.java|   2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  21 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   3 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  26 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  23 +
 .../TestDistributedFileSystemWithECFile.java|  55 ++
 .../namenode/OfflineEditsViewerHelper.java  |   4 +-
 .../server/namenode/TestNamenodeRetryCache.java |  14 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  13 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6753 -> 7909 bytes
 .../src/test/resources/editsStored.xml  | 536 +--
 13 files changed, 531 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e309d25d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index 73b8f56..2eac016 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -52,6 +52,6 @@ public final class ErasureCodeConstants {
 
   public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
-  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final byte REPLICATION_POLICY_ID = (byte) 0;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e309d25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index b202212..c4041a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
@@ -415,22 +416,28 @@ class FSDirWriteFileOp {
   PermissionStatus permissions, List aclEntries,
   List xAttrs, short replication, long modificationTime, long atime,
   long preferredBlockSize, boolean underConstruction, String clientName,
-  String clientMachine, byte storagePolicyId) {
+  String clientMachine, byte storagePolicyId, byte ecPolicyID) {
 final INodeFile newNode;
 Preconditions.checkNotNull(existing);
 assert fsd.hasWriteLock();
 try {
   // check if the file has an EC policy
-  boolean isStriped = false;
-  ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
-  unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
-  if (ecPolicy != null) {
-isStriped = tr

hadoop git commit: HDFS-12840. Creating a file with non-default EC policy in a EC zone is not correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.

2017-12-07 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 67b2661e3 -> 67662e2ac


HDFS-12840. Creating a file with non-default EC policy in a EC zone is not 
correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67662e2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67662e2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67662e2a

Branch: refs/heads/trunk
Commit: 67662e2ac9e68f32b725c8118cf2be79a662fca5
Parents: 67b2661
Author: Lei Xu <l...@apache.org>
Authored: Thu Dec 7 11:15:40 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Dec 7 11:15:40 2017 -0800

--
 .../io/erasurecode/ErasureCodeConstants.java|   2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  21 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   3 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  26 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  23 +
 .../TestDistributedFileSystemWithECFile.java|  55 ++
 .../namenode/OfflineEditsViewerHelper.java  |   4 +-
 .../server/namenode/TestNamenodeRetryCache.java |  14 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  13 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6753 -> 7909 bytes
 .../src/test/resources/editsStored.xml  | 536 +--
 13 files changed, 531 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67662e2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index 73b8f56..2eac016 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -52,6 +52,6 @@ public final class ErasureCodeConstants {
 
   public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
-  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final byte REPLICATION_POLICY_ID = (byte) 0;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/67662e2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index b202212..c4041a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
@@ -415,22 +416,28 @@ class FSDirWriteFileOp {
   PermissionStatus permissions, List aclEntries,
   List xAttrs, short replication, long modificationTime, long atime,
   long preferredBlockSize, boolean underConstruction, String clientName,
-  String clientMachine, byte storagePolicyId) {
+  String clientMachine, byte storagePolicyId, byte ecPolicyID) {
 final INodeFile newNode;
 Preconditions.checkNotNull(existing);
 assert fsd.hasWriteLock();
 try {
   // check if the file has an EC policy
-  boolean isStriped = false;
-  ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
-  unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
-  if (ecPolicy != null) {
-isStriped = true;
+  boolean isStriped =
+  ecPolicyID != ErasureCodeConstants.REPLICATION_POLICY_ID;
+  ErasureCodingPolicy ecPolicy = null;
+ 

hadoop git commit: HDFS-12840. Creating a file with non-default EC policy in a EC zone is not correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.

2017-12-07 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 afcbfbf7f -> f3143d225


HDFS-12840. Creating a file with non-default EC policy in a EC zone is not 
correctly serialized in the editlog. Contributed by Lei (Eddy) Xu.

(cherry picked from commit 67662e2ac9e68f32b725c8118cf2be79a662fca5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3143d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3143d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3143d22

Branch: refs/heads/branch-3.0
Commit: f3143d225afec0ad95d9e4b81b91d760b5b77c52
Parents: afcbfbf
Author: Lei Xu <l...@apache.org>
Authored: Thu Dec 7 11:15:40 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Thu Dec 7 11:20:19 2017 -0800

--
 .../io/erasurecode/ErasureCodeConstants.java|   2 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  21 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   3 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   2 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  26 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  14 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  23 +
 .../TestDistributedFileSystemWithECFile.java|  55 ++
 .../namenode/OfflineEditsViewerHelper.java  |   4 +-
 .../server/namenode/TestNamenodeRetryCache.java |  14 +-
 .../namenode/ha/TestRetryCacheWithHA.java   |  13 +-
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6753 -> 7909 bytes
 .../src/test/resources/editsStored.xml  | 536 +--
 13 files changed, 531 insertions(+), 182 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3143d22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index 73b8f56..2eac016 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -52,6 +52,6 @@ public final class ErasureCodeConstants {
 
   public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
-  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final byte REPLICATION_POLICY_ID = (byte) 0;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3143d22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index b202212..c4041a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
@@ -415,22 +416,28 @@ class FSDirWriteFileOp {
   PermissionStatus permissions, List aclEntries,
   List xAttrs, short replication, long modificationTime, long atime,
   long preferredBlockSize, boolean underConstruction, String clientName,
-  String clientMachine, byte storagePolicyId) {
+  String clientMachine, byte storagePolicyId, byte ecPolicyID) {
 final INodeFile newNode;
 Preconditions.checkNotNull(existing);
 assert fsd.hasWriteLock();
 try {
   // check if the file has an EC policy
-  boolean isStriped = false;
-  ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
-  unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
-  if (ecPolicy != null) {
-isStriped = true;
+  boolean isStriped =
+  

hadoop git commit: HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. Contributed by Lei (Eddy) Xu.

2017-11-22 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk d42a336cf -> 785732c13


HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/785732c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/785732c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/785732c1

Branch: refs/heads/trunk
Commit: 785732c13e2ebe9f27350b6be82eb2fb782d7dc4
Parents: d42a336
Author: Lei Xu <l...@apache.org>
Authored: Wed Nov 22 10:19:58 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Wed Nov 22 10:22:32 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  | 750 +++
 2 files changed, 423 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 8029575..3f2817a 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/785732c1/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 0a1c25e..2a57c73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -13,8 +13,8 @@
   2
   
 1
-1423097579620
-ef3f2032e2797e8e
+1512000829976
+e7457bcc6ab95a84
   
 
   
@@ -24,8 +24,8 @@
   3
   
 2
-1423097579622
-b978ed731a0b4a65
+1512000829980
+07cc38caf6c47bb4
   
 
   
@@ -37,19 +37,19 @@
   16386
   /file_create
   1
-  1422406380345
-  1422406380345
+  1511309632199
+  1511309632199
   512
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   true
   
-    xyao
+lei
 supergroup
 420
   
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  6
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  5
 
   
   
@@ -60,14 +60,14 @@
   0
   /file_create
   1
-  1422406380369
-  1422406380345
+  1511309632248
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
@@ -78,11 +78,11 @@
 
   6
   /file_create
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   false
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  8
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  7
 
   
   
@@ -93,23 +93,118 @@
   0
   /file_create
   1
-  1422406380376
-  1422406380345
+  1511309632263
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
 
   
   
-OP_SET_STORAGE_POLICY
+OP_ADD
 
   8
+  0
+  16387
+  /update_blocks
+  1
+  1511309632266
+  1511309632266
+  4096
+  DFSClient_NONMAPREDUCE_2134933941_1
+  127.0.0.1
+  true
+  
+lei
+supergroup
+420
+  
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  9
+
+  
+  
+OP_ALLOCATE_BLOCK_ID
+
+  9
+  1073741825
+
+  
+  
+OP_SET_GENSTAMP_V2
+
+  10
+  1001
+
+  
+  
+OP_ADD_BLOCK
+
+  11
+  /update_blocks
+  
+1073741825
+0
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  12
+  /update_blocks
+  
+1073741825
+1
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  13
+  /update_blocks
+  
+  -2
+
+  
+  
+OP_CLOSE
+
+  14
+  0
+  0
+  /update_blocks
+  1
+  1511309632454
+  1511309632266
+  4096
+  
+  
+  false
+  
+lei
+supergroup
+420
+  
+
+  
+  
+OP_SET_STORAGE_POLICY
+
+  15
   /file_create
   7
 
@@ -117,36 +212,36 @@
   
 OP_RENAME_OLD
 
-  9

hadoop git commit: HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. Contributed by Lei (Eddy) Xu.

2017-11-22 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 03f0b8b3f -> a5a86680a


HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a86680
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a86680
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a86680

Branch: refs/heads/branch-3.0
Commit: a5a86680a40021ef075a00049abc4f0090f78625
Parents: 03f0b8b
Author: Lei Xu <l...@apache.org>
Authored: Wed Nov 22 10:19:58 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Wed Nov 22 10:23:10 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  | 750 +++
 2 files changed, 423 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a86680/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 8029575..3f2817a 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a86680/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 0a1c25e..2a57c73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -13,8 +13,8 @@
   2
   
 1
-1423097579620
-ef3f2032e2797e8e
+1512000829976
+e7457bcc6ab95a84
   
 
   
@@ -24,8 +24,8 @@
   3
   
 2
-1423097579622
-b978ed731a0b4a65
+1512000829980
+07cc38caf6c47bb4
   
 
   
@@ -37,19 +37,19 @@
   16386
   /file_create
   1
-  1422406380345
-  1422406380345
+  1511309632199
+  1511309632199
   512
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   true
   
-    xyao
+lei
 supergroup
 420
   
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  6
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  5
 
   
   
@@ -60,14 +60,14 @@
   0
   /file_create
   1
-  1422406380369
-  1422406380345
+  1511309632248
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
@@ -78,11 +78,11 @@
 
   6
   /file_create
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   false
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  8
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  7
 
   
   
@@ -93,23 +93,118 @@
   0
   /file_create
   1
-  1422406380376
-  1422406380345
+  1511309632263
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
 
   
   
-OP_SET_STORAGE_POLICY
+OP_ADD
 
   8
+  0
+  16387
+  /update_blocks
+  1
+  1511309632266
+  1511309632266
+  4096
+  DFSClient_NONMAPREDUCE_2134933941_1
+  127.0.0.1
+  true
+  
+lei
+supergroup
+420
+  
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  9
+
+  
+  
+OP_ALLOCATE_BLOCK_ID
+
+  9
+  1073741825
+
+  
+  
+OP_SET_GENSTAMP_V2
+
+  10
+  1001
+
+  
+  
+OP_ADD_BLOCK
+
+  11
+  /update_blocks
+  
+1073741825
+0
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  12
+  /update_blocks
+  
+1073741825
+1
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  13
+  /update_blocks
+  
+  -2
+
+  
+  
+OP_CLOSE
+
+  14
+  0
+  0
+  /update_blocks
+  1
+  1511309632454
+  1511309632266
+  4096
+  
+  
+  false
+  
+lei
+supergroup
+420
+  
+
+  
+  
+OP_SET_STORAGE_POLICY
+
+  15
   /file_create
   7
 
@@ -117,36 +212,36 @@
   
 OP_RENAME_OLD
 
-  9

hadoop git commit: HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. Contributed by Lei (Eddy) Xu.

2017-11-22 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0.0 098eb777c -> 781c01aa2


HDFS-12847. Regenerate editsStored and editsStored.xml in HDFS tests. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/781c01aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/781c01aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/781c01aa

Branch: refs/heads/branch-3.0.0
Commit: 781c01aa2cd982f82b3e76c560518ddb55968647
Parents: 098eb77
Author: Lei Xu <l...@apache.org>
Authored: Wed Nov 22 10:19:58 2017 -0800
Committer: Lei Xu <l...@apache.org>
Committed: Wed Nov 22 10:23:25 2017 -0800

--
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 6293 -> 6753 bytes
 .../src/test/resources/editsStored.xml  | 750 +++
 2 files changed, 423 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/781c01aa/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
index 8029575..3f2817a 100644
Binary files a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored 
and b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/781c01aa/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
index 0a1c25e..2a57c73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
@@ -13,8 +13,8 @@
   2
   
 1
-1423097579620
-ef3f2032e2797e8e
+1512000829976
+e7457bcc6ab95a84
   
 
   
@@ -24,8 +24,8 @@
   3
   
 2
-1423097579622
-b978ed731a0b4a65
+1512000829980
+07cc38caf6c47bb4
   
 
   
@@ -37,19 +37,19 @@
   16386
   /file_create
   1
-  1422406380345
-  1422406380345
+  1511309632199
+  1511309632199
   512
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   true
   
-    xyao
+lei
 supergroup
 420
   
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  6
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  5
 
   
   
@@ -60,14 +60,14 @@
   0
   /file_create
   1
-  1422406380369
-  1422406380345
+  1511309632248
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
@@ -78,11 +78,11 @@
 
   6
   /file_create
-  DFSClient_NONMAPREDUCE_-156773767_1
+  DFSClient_NONMAPREDUCE_2134933941_1
   127.0.0.1
   false
-  7334ec24-dd6b-4efd-807d-ed0d18625534
-  8
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  7
 
   
   
@@ -93,23 +93,118 @@
   0
   /file_create
   1
-  1422406380376
-  1422406380345
+  1511309632263
+  1511309632199
   512
   
   
   false
   
-    xyao
+lei
 supergroup
 420
   
 
   
   
-OP_SET_STORAGE_POLICY
+OP_ADD
 
   8
+  0
+  16387
+  /update_blocks
+  1
+  1511309632266
+  1511309632266
+  4096
+  DFSClient_NONMAPREDUCE_2134933941_1
+  127.0.0.1
+  true
+  
+lei
+supergroup
+420
+  
+  a4dc081c-6d6f-42d6-af5b-d260228f1aad
+  9
+
+  
+  
+OP_ALLOCATE_BLOCK_ID
+
+  9
+  1073741825
+
+  
+  
+OP_SET_GENSTAMP_V2
+
+  10
+  1001
+
+  
+  
+OP_ADD_BLOCK
+
+  11
+  /update_blocks
+  
+1073741825
+0
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  12
+  /update_blocks
+  
+1073741825
+1
+1001
+  
+  
+  -2
+
+  
+  
+OP_UPDATE_BLOCKS
+
+  13
+  /update_blocks
+  
+  -2
+
+  
+  
+OP_CLOSE
+
+  14
+  0
+  0
+  /update_blocks
+  1
+  1511309632454
+  1511309632266
+  4096
+  
+  
+  false
+  
+lei
+supergroup
+420
+  
+
+  
+  
+OP_SET_STORAGE_POLICY
+
+  15
   /file_create
   7
 
@@ -117,36 +212,36 @@
   
 OP_RENAME_OLD
 

hadoop git commit: HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks to adjust the speed of recovery. (lei)

2017-10-31 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed24da3dd -> 9367c25db


HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks 
to adjust the speed of recovery. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9367c25d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9367c25d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9367c25d

Branch: refs/heads/trunk
Commit: 9367c25dbdfedf60cdbd65611281cf9c667829e6
Parents: ed24da3
Author: Lei Xu <l...@apache.org>
Authored: Tue Oct 31 21:58:14 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Oct 31 21:58:14 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../server/datanode/DataNodeFaultInjector.java  |  6 ++
 .../erasurecode/ErasureCodingWorker.java| 12 +++-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +
 .../src/main/resources/hdfs-default.xml | 13 
 .../src/site/markdown/HDFSErasureCoding.md  |  6 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java | 64 
 7 files changed, 106 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3d1f0b6..37071b6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -596,6 +596,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT = 5000; //5s
   public static final String  DFS_DN_EC_RECONSTRUCTION_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.threads";
   public static final int DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT = 8;
+  public static final String  DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_KEY =
+  "dfs.datanode.ec.reconstruction.xmits.weight";
+  public static final float   DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_DEFAULT =
+  0.5f;
 
   public static final String
   DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0a2a60b..1dd779e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -89,4 +89,10 @@ public class DataNodeFaultInjector {
 
   public void throwTooManyOpenFiles() throws FileNotFoundException {
   }
+
+  /**
+   * Used as a hook to inject failure in erasure coding reconstruction
+   * process.
+   */
+  public void stripedBlockReconstruction() throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9367c25d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 63498bc..45e29ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.erasurecode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -47,6 +48,7 @@ public final class ErasureCodingWorker {
 
   private final DataNode datanode;
   private final Configuration conf;
+  private f

hadoop git commit: HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks to adjust the speed of recovery. (lei)

2017-10-31 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c025c9ac6 -> 9ee04006c


HDFS-12482. Provide a configuration to adjust the weight of EC recovery tasks 
to adjust the speed of recovery. (lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ee04006
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ee04006
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ee04006

Branch: refs/heads/branch-3.0
Commit: 9ee04006cb56deff6956da1f9495f122bc361746
Parents: c025c9a
Author: Lei Xu <l...@apache.org>
Authored: Tue Oct 31 21:58:14 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Oct 31 22:03:44 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../server/datanode/DataNodeFaultInjector.java  |  6 ++
 .../erasurecode/ErasureCodingWorker.java| 12 +++-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +
 .../src/main/resources/hdfs-default.xml | 13 
 .../src/site/markdown/HDFSErasureCoding.md  |  6 ++
 .../hadoop/hdfs/TestReconstructStripedFile.java | 64 
 7 files changed, 106 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ee04006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0fc75f1..2c2ebe2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -596,6 +596,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT = 5000; //5s
   public static final String  DFS_DN_EC_RECONSTRUCTION_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.threads";
   public static final int DFS_DN_EC_RECONSTRUCTION_THREADS_DEFAULT = 8;
+  public static final String  DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_KEY =
+  "dfs.datanode.ec.reconstruction.xmits.weight";
+  public static final float   DFS_DN_EC_RECONSTRUCTION_XMITS_WEIGHT_DEFAULT =
+  0.5f;
 
   public static final String
   DFS_DATANODE_DIRECTORYSCAN_THROTTLE_LIMIT_MS_PER_SEC_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ee04006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index 0a2a60b..1dd779e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -89,4 +89,10 @@ public class DataNodeFaultInjector {
 
   public void throwTooManyOpenFiles() throws FileNotFoundException {
   }
+
+  /**
+   * Used as a hook to inject failure in erasure coding reconstruction
+   * process.
+   */
+  public void stripedBlockReconstruction() throws IOException {}
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ee04006/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 63498bc..45e29ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.erasurecode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -47,6 +48,7 @@ public final class ErasureCodingWorker {
 
   private final DataNode datanode;
   private final Configuration conf;
+  private f

hadoop git commit: HDFS-12612. DFSStripedOutputStream.close will throw if called a second time with a failed streamer. (Lei (Eddy) Xu)

2017-10-17 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75323394f -> f27a4ad03


HDFS-12612. DFSStripedOutputStream.close will throw if called a second time 
with a failed streamer. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f27a4ad0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f27a4ad0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f27a4ad0

Branch: refs/heads/trunk
Commit: f27a4ad0324aa0b4080a1c4c6bf4cd560c927e20
Parents: 7532339
Author: Lei Xu <l...@apache.org>
Authored: Tue Oct 17 15:52:09 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Oct 17 15:52:09 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 40 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java| 31 ++--
 .../apache/hadoop/hdfs/ExceptionLastSeen.java   | 75 +++
 .../TestDFSStripedOutputStreamWithFailure.java  | 76 
 4 files changed, 184 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27a4ad0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 1b83959..39717ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -82,6 +82,12 @@ public class DFSStripedOutputStream extends DFSOutputStream
 implements StreamCapabilities {
   private static final ByteBufferPool BUFFER_POOL = new 
ElasticByteBufferPool();
 
+  /**
+   * OutputStream level last exception, will be used to indicate the fatal
+   * exception of this stream, i.e., being aborted.
+   */
+  private final ExceptionLastSeen exceptionLastSeen = new ExceptionLastSeen();
+
   static class MultipleBlockingQueue {
 private final List<BlockingQueue> queues;
 
@@ -971,12 +977,9 @@ public class DFSStripedOutputStream extends DFSOutputStream
   if (isClosed()) {
 return;
   }
-  for (StripedDataStreamer streamer : streamers) {
-streamer.getLastException().set(
-new IOException("Lease timeout of "
-+ (dfsClient.getConf().getHdfsTimeout() / 1000)
-+ " seconds expired."));
-  }
+  exceptionLastSeen.set(new IOException("Lease timeout of "
+  + (dfsClient.getConf().getHdfsTimeout() / 1000)
+  + " seconds expired."));
 
   try {
 closeThreads(true);
@@ -1133,18 +1136,26 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
+  exceptionLastSeen.check(true);
+
+  // Writing to at least {dataUnits} replicas can be considered as success,
+  // and the rest of data can be recovered.
+  final int minReplication = ecPolicy.getNumDataUnits();
+  int goodStreamers = 0;
   final MultipleIOException.Builder b = new MultipleIOException.Builder();
-  for(int i = 0; i < streamers.size(); i++) {
-final StripedDataStreamer si = getStripedDataStreamer(i);
+  for (final StripedDataStreamer si : streamers) {
 try {
   si.getLastException().check(true);
+  goodStreamers++;
 } catch (IOException e) {
   b.add(e);
 }
   }
-  final IOException ioe = b.build();
-  if (ioe != null) {
-throw ioe;
+  if (goodStreamers < minReplication) {
+final IOException ioe = b.build();
+if (ioe != null) {
+  throw ioe;
+}
   }
   return;
 }
@@ -1183,9 +1194,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
 }
   } finally {
 // Failures may happen when flushing data/parity data out. Exceptions
-// may be thrown if more than 3 streamers fail, or updatePipeline RPC
-// fails. Streamers may keep waiting for the new block/GS information.
-// Thus need to force closing these threads.
+// may be thrown if the number of failed streamers is more than the
+// number of parity blocks, or updatePipeline RPC fails. Streamers may
+// keep waiting for the new block/GS information. Thus need to force
+// closing these threads.
 closeThreads(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f27a4ad0/hadoop-h

hadoop git commit: HDFS-12612. DFSStripedOutputStream.close will throw if called a second time with a failed streamer. (Lei (Eddy) Xu)

2017-10-17 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 81a86860b -> 6959db9c2


HDFS-12612. DFSStripedOutputStream.close will throw if called a second time 
with a failed streamer. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6959db9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6959db9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6959db9c

Branch: refs/heads/branch-3.0
Commit: 6959db9c20217f6adb12e9f3140f5db9a26c38c4
Parents: 81a8686
Author: Lei Xu <l...@apache.org>
Authored: Tue Oct 17 15:52:09 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Oct 17 15:53:07 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 40 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java| 31 ++--
 .../apache/hadoop/hdfs/ExceptionLastSeen.java   | 75 +++
 .../TestDFSStripedOutputStreamWithFailure.java  | 76 
 4 files changed, 184 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6959db9c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 1b83959..39717ef 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -82,6 +82,12 @@ public class DFSStripedOutputStream extends DFSOutputStream
 implements StreamCapabilities {
   private static final ByteBufferPool BUFFER_POOL = new 
ElasticByteBufferPool();
 
+  /**
+   * OutputStream level last exception, will be used to indicate the fatal
+   * exception of this stream, i.e., being aborted.
+   */
+  private final ExceptionLastSeen exceptionLastSeen = new ExceptionLastSeen();
+
   static class MultipleBlockingQueue {
 private final List<BlockingQueue> queues;
 
@@ -971,12 +977,9 @@ public class DFSStripedOutputStream extends DFSOutputStream
   if (isClosed()) {
 return;
   }
-  for (StripedDataStreamer streamer : streamers) {
-streamer.getLastException().set(
-new IOException("Lease timeout of "
-+ (dfsClient.getConf().getHdfsTimeout() / 1000)
-+ " seconds expired."));
-  }
+  exceptionLastSeen.set(new IOException("Lease timeout of "
+  + (dfsClient.getConf().getHdfsTimeout() / 1000)
+  + " seconds expired."));
 
   try {
 closeThreads(true);
@@ -1133,18 +1136,26 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
+  exceptionLastSeen.check(true);
+
+  // Writing to at least {dataUnits} replicas can be considered as success,
+  // and the rest of data can be recovered.
+  final int minReplication = ecPolicy.getNumDataUnits();
+  int goodStreamers = 0;
   final MultipleIOException.Builder b = new MultipleIOException.Builder();
-  for(int i = 0; i < streamers.size(); i++) {
-final StripedDataStreamer si = getStripedDataStreamer(i);
+  for (final StripedDataStreamer si : streamers) {
 try {
   si.getLastException().check(true);
+  goodStreamers++;
 } catch (IOException e) {
   b.add(e);
 }
   }
-  final IOException ioe = b.build();
-  if (ioe != null) {
-throw ioe;
+  if (goodStreamers < minReplication) {
+final IOException ioe = b.build();
+if (ioe != null) {
+  throw ioe;
+}
   }
   return;
 }
@@ -1183,9 +1194,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
 }
   } finally {
 // Failures may happen when flushing data/parity data out. Exceptions
-// may be thrown if more than 3 streamers fail, or updatePipeline RPC
-// fails. Streamers may keep waiting for the new block/GS information.
-// Thus need to force closing these threads.
+// may be thrown if the number of failed streamers is more than the
+// number of parity blocks, or updatePipeline RPC fails. Streamers may
+// keep waiting for the new block/GS information. Thus need to force
+// closing these threads.
 closeThreads(true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6959d

hadoop git commit: HDFS-12613. Native EC coder should implement release() as idempotent function. (Lei (Eddy) Xu)

2017-10-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a9cbfb039 -> cdfcd384b


HDFS-12613. Native EC coder should implement release() as idempotent function. 
(Lei (Eddy) Xu)

(cherry picked from commit 31ebccc96238136560f4210bdf6766fe18e0650c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdfcd384
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdfcd384
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdfcd384

Branch: refs/heads/branch-3.0
Commit: cdfcd384b24decab21749d68040d3e49d26d9621
Parents: a9cbfb0
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 16 19:44:30 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 16 19:46:26 2017 -0700

--
 .../io/erasurecode/coder/ErasureCodingStep.java |  5 +-
 .../erasurecode/coder/ErasureDecodingStep.java  |  5 +-
 .../erasurecode/coder/ErasureEncodingStep.java  |  5 +-
 .../coder/HHXORErasureDecodingStep.java | 12 ++--
 .../coder/HHXORErasureEncodingStep.java | 10 +++-
 .../io/erasurecode/coder/util/HHUtil.java   |  4 +-
 .../rawcoder/AbstractNativeRawDecoder.java  | 14 -
 .../rawcoder/AbstractNativeRawEncoder.java  | 14 -
 .../rawcoder/NativeRSRawDecoder.java| 11 ++--
 .../rawcoder/NativeRSRawEncoder.java| 11 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 14 +++--
 .../rawcoder/NativeXORRawEncoder.java   |  9 +--
 .../rawcoder/RSLegacyRawDecoder.java|  6 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java | 17 --
 .../erasurecode/rawcoder/RawErasureEncoder.java | 16 +++--
 .../apache/hadoop/io/erasurecode/jni_common.c   |  5 +-
 .../hadoop/io/erasurecode/jni_rs_decoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_rs_encoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_decoder.c |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_encoder.c |  9 ++-
 .../erasurecode/coder/TestErasureCoderBase.java | 18 +-
 .../coder/TestHHErasureCoderBase.java   | 10 +++-
 .../rawcoder/RawErasureCoderBenchmark.java  |  9 +--
 .../erasurecode/rawcoder/TestDummyRawCoder.java | 15 -
 .../rawcoder/TestNativeRSRawCoder.java  |  6 ++
 .../rawcoder/TestNativeXORRawCoder.java |  7 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 61 ++--
 .../hadoop/hdfs/DFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/PositionStripeReader.java   |  3 +-
 .../hadoop/hdfs/StatefulStripeReader.java   |  3 +-
 .../org/apache/hadoop/hdfs/StripeReader.java|  7 ++-
 .../StripedBlockChecksumReconstructor.java  |  2 +-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |  6 +-
 34 files changed, 269 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfcd384/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
index 9dd0aed..fb89d99 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECChunk;
 
+import java.io.IOException;
+
 /**
  * Erasure coding step that's involved in encoding/decoding of a block group.
  */
@@ -47,7 +49,8 @@ public interface ErasureCodingStep {
* @param inputChunks
* @param outputChunks
*/
-  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks);
+  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks)
+  throws IOException;
 
   /**
* Notify erasure coder that all the chunks of input blocks are processed so

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdfcd384/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
index ae396a2..24f5547 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/er

hadoop git commit: HDFS-12613. Native EC coder should implement release() as idempotent function. (Lei (Eddy) Xu)

2017-10-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk b406d8e37 -> 31ebccc96


HDFS-12613. Native EC coder should implement release() as idempotent function. 
(Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ebccc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ebccc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ebccc9

Branch: refs/heads/trunk
Commit: 31ebccc96238136560f4210bdf6766fe18e0650c
Parents: b406d8e
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 16 19:44:30 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 16 19:44:30 2017 -0700

--
 .../io/erasurecode/coder/ErasureCodingStep.java |  5 +-
 .../erasurecode/coder/ErasureDecodingStep.java  |  5 +-
 .../erasurecode/coder/ErasureEncodingStep.java  |  5 +-
 .../coder/HHXORErasureDecodingStep.java | 12 ++--
 .../coder/HHXORErasureEncodingStep.java | 10 +++-
 .../io/erasurecode/coder/util/HHUtil.java   |  4 +-
 .../rawcoder/AbstractNativeRawDecoder.java  | 14 -
 .../rawcoder/AbstractNativeRawEncoder.java  | 14 -
 .../rawcoder/NativeRSRawDecoder.java| 11 ++--
 .../rawcoder/NativeRSRawEncoder.java| 11 ++--
 .../rawcoder/NativeXORRawDecoder.java   | 14 +++--
 .../rawcoder/NativeXORRawEncoder.java   |  9 +--
 .../rawcoder/RSLegacyRawDecoder.java|  6 +-
 .../erasurecode/rawcoder/RawErasureDecoder.java | 17 --
 .../erasurecode/rawcoder/RawErasureEncoder.java | 16 +++--
 .../apache/hadoop/io/erasurecode/jni_common.c   |  5 +-
 .../hadoop/io/erasurecode/jni_rs_decoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_rs_encoder.c  |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_decoder.c |  9 ++-
 .../hadoop/io/erasurecode/jni_xor_encoder.c |  9 ++-
 .../erasurecode/coder/TestErasureCoderBase.java | 18 +-
 .../coder/TestHHErasureCoderBase.java   | 10 +++-
 .../rawcoder/RawErasureCoderBenchmark.java  |  9 +--
 .../erasurecode/rawcoder/TestDummyRawCoder.java | 15 -
 .../rawcoder/TestNativeRSRawCoder.java  |  6 ++
 .../rawcoder/TestNativeXORRawCoder.java |  7 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 61 ++--
 .../hadoop/hdfs/DFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/PositionStripeReader.java   |  3 +-
 .../hadoop/hdfs/StatefulStripeReader.java   |  3 +-
 .../org/apache/hadoop/hdfs/StripeReader.java|  7 ++-
 .../StripedBlockChecksumReconstructor.java  |  2 +-
 .../erasurecode/StripedBlockReconstructor.java  |  2 +-
 .../apache/hadoop/hdfs/StripedFileTestUtil.java |  6 +-
 34 files changed, 269 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ebccc9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
index 9dd0aed..fb89d99 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCodingStep.java
@@ -21,6 +21,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECChunk;
 
+import java.io.IOException;
+
 /**
  * Erasure coding step that's involved in encoding/decoding of a block group.
  */
@@ -47,7 +49,8 @@ public interface ErasureCodingStep {
* @param inputChunks
* @param outputChunks
*/
-  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks);
+  void performCoding(ECChunk[] inputChunks, ECChunk[] outputChunks)
+  throws IOException;
 
   /**
* Notify erasure coder that all the chunks of input blocks are processed so

http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ebccc9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
index ae396a2..24f5547 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureDecodingStep.java
+++ 
b/hadoop-common-project/hadoop-

hadoop git commit: HDFS-12606. When using native decoder, DFSStripedStream.close crashes JVM after being called multiple times. (Lei (Eddy) Xu)

2017-10-09 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6d6ca4c92 -> 46644319e


HDFS-12606. When using native decoder, DFSStripedStream.close crashes JVM after 
being called multiple times. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46644319
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46644319
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46644319

Branch: refs/heads/trunk
Commit: 46644319e1b3295ddbc7597c060956bf46487d11
Parents: 6d6ca4c
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 9 10:08:30 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 9 10:08:30 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSStripedInputStream.java  |  7 +--
 .../apache/hadoop/hdfs/TestDFSStripedInputStream.java  | 13 +
 2 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46644319/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d4d0646..e7d90ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -68,7 +68,7 @@ public class DFSStripedInputStream extends DFSInputStream {
   private ByteBuffer curStripeBuf;
   private ByteBuffer parityBuf;
   private final ErasureCodingPolicy ecPolicy;
-  private final RawErasureDecoder decoder;
+  private RawErasureDecoder decoder;
 
   /**
* Indicate the start/end offset of the current buffered stripe in the
@@ -188,7 +188,10 @@ public class DFSStripedInputStream extends DFSInputStream {
 BUFFER_POOL.putBuffer(parityBuf);
 parityBuf = null;
   }
-  decoder.release();
+  if (decoder != null) {
+decoder.release();
+decoder = null;
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46644319/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index f94b7ab..de276a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -491,4 +491,17 @@ public class TestDFSStripedInputStream {
 assertEquals(readSize, done);
 assertArrayEquals(expected, readBuffer);
   }
+
+  @Test
+  public void testIdempotentClose() throws Exception {
+final int numBlocks = 2;
+DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
+stripesPerBlock, false, ecPolicy);
+
+try (DFSInputStream in = fs.getClient().open(filePath.toString())) {
+  assertTrue(in instanceof DFSStripedInputStream);
+  // Close twice
+  in.close();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12606. When using native decoder, DFSStripedStream.close crashes JVM after being called multiple times. (Lei (Eddy) Xu)

2017-10-09 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 bec03f706 -> 015eb628b


HDFS-12606. When using native decoder, DFSStripedStream.close crashes JVM after 
being called multiple times. (Lei (Eddy) Xu)

(cherry picked from commit 46644319e1b3295ddbc7597c060956bf46487d11)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/015eb628
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/015eb628
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/015eb628

Branch: refs/heads/branch-3.0
Commit: 015eb628b8a1cebb1247a585fac7f70eabcd6540
Parents: bec03f7
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 9 10:08:30 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 9 10:09:49 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSStripedInputStream.java  |  7 +--
 .../apache/hadoop/hdfs/TestDFSStripedInputStream.java  | 13 +
 2 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/015eb628/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d4d0646..e7d90ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -68,7 +68,7 @@ public class DFSStripedInputStream extends DFSInputStream {
   private ByteBuffer curStripeBuf;
   private ByteBuffer parityBuf;
   private final ErasureCodingPolicy ecPolicy;
-  private final RawErasureDecoder decoder;
+  private RawErasureDecoder decoder;
 
   /**
* Indicate the start/end offset of the current buffered stripe in the
@@ -188,7 +188,10 @@ public class DFSStripedInputStream extends DFSInputStream {
 BUFFER_POOL.putBuffer(parityBuf);
 parityBuf = null;
   }
-  decoder.release();
+  if (decoder != null) {
+decoder.release();
+decoder = null;
+  }
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/015eb628/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index f94b7ab..de276a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -491,4 +491,17 @@ public class TestDFSStripedInputStream {
 assertEquals(readSize, done);
 assertArrayEquals(expected, readBuffer);
   }
+
+  @Test
+  public void testIdempotentClose() throws Exception {
+final int numBlocks = 2;
+DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
+stripesPerBlock, false, ecPolicy);
+
+try (DFSInputStream in = fs.getClient().open(filePath.toString())) {
+  assertTrue(in instanceof DFSStripedInputStream);
+  // Close twice
+  in.close();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu)

2017-10-02 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 280080fad -> 27ffd43b6


HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27ffd43b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27ffd43b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27ffd43b

Branch: refs/heads/trunk
Commit: 27ffd43b6419c9ebe697536bcb6abb858ce791d2
Parents: 280080f
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 2 15:31:20 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 2 15:35:49 2017 -0700

--
 .../hdfs/server/namenode/FSDirErasureCodingOp.java |  3 +--
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  4 
 .../hdfs/TestUnsetAndChangeDirectoryEcPolicy.java  | 13 +
 3 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 181b147..391e392 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -298,8 +298,7 @@ final class FSDirErasureCodingOp {
 
 final List xattrs = Lists.newArrayListWithCapacity(1);
 xattrs.add(ecXAttr);
-FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
-return xattrs;
+return FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index acdade7..3223467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -184,6 +184,10 @@ class FSDirXAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
+  /**
+   * Remove xattrs from the inode, and return the removed xattrs.
+   * @return the removed xattrs.
+   */
   static List unprotectedRemoveXAttrs(
   FSDirectory fsd, final INodesInPath iip, final List toRemove)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
index 529a110..52cf163 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -362,4 +362,17 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
   + ecFilePath, e);
 }
   }
+
+  /**
+   * Test unsetEcPolicy is persisted correctly in edit log.
+   */
+  @Test
+  public void testUnsetEcPolicyInEditLog() throws IOException {
+fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
+Assert.assertEquals(ecPolicy, fs.getErasureCodingPolicy(new Path("/")));
+fs.getClient().unsetErasureCodingPolicy("/");
+
+cluster.restartNameNode(true);
+Assert.assertNull(fs.getErasureCodingPolicy(new Path("/")));
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu)

2017-10-02 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 3dd3f924f -> eb76d3dbe


HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu)

(cherry picked from commit 0eab2fcac615228447e7f8bdf4b3ee742ea796ae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb76d3db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb76d3db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb76d3db

Branch: refs/heads/branch-3.0
Commit: eb76d3dbecdd7363d782726615b4ce1ca6ec0feb
Parents: 3dd3f92
Author: Lei Xu <l...@apache.org>
Authored: Mon Oct 2 15:31:20 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Oct 2 15:33:35 2017 -0700

--
 .../hdfs/server/namenode/FSDirErasureCodingOp.java |  3 +--
 .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java  |  4 
 .../hdfs/TestUnsetAndChangeDirectoryEcPolicy.java  | 13 +
 3 files changed, 18 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb76d3db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 181b147..391e392 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -298,8 +298,7 @@ final class FSDirErasureCodingOp {
 
 final List xattrs = Lists.newArrayListWithCapacity(1);
 xattrs.add(ecXAttr);
-FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
-return xattrs;
+return FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb76d3db/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index acdade7..3223467 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -184,6 +184,10 @@ class FSDirXAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
+  /**
+   * Remove xattrs from the inode, and return the removed xattrs.
+   * @return the removed xattrs.
+   */
   static List unprotectedRemoveXAttrs(
   FSDirectory fsd, final INodesInPath iip, final List toRemove)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb76d3db/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
index 529a110..52cf163 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -362,4 +362,17 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
   + ecFilePath, e);
 }
   }
+
+  /**
+   * Test unsetEcPolicy is persisted correctly in edit log.
+   */
+  @Test
+  public void testUnsetEcPolicyInEditLog() throws IOException {
+fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
+Assert.assertEquals(ecPolicy, fs.getErasureCodingPolicy(new Path("/")));
+fs.getClient().unsetErasureCodingPolicy("/");
+
+cluster.restartNameNode(true);
+Assert.assertNull(fs.getErasureCodingPolicy(new Path("/")));
+  }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. (Lei (Eddy) Xu)

2017-09-29 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7f6118f91 -> f40dbc170


HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. (Lei (Eddy) 
Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f40dbc17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f40dbc17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f40dbc17

Branch: refs/heads/trunk
Commit: f40dbc170e7f63b947d99b7945e42a14a588bea6
Parents: 7f6118f
Author: Lei Xu <l...@apache.org>
Authored: Fri Sep 29 10:46:17 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Sep 29 10:46:17 2017 -0700

--
 .../datanode/TestDataNodeHotSwapVolumes.java| 129 ---
 1 file changed, 83 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f40dbc17/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5d4ac1e..df5e297 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockMissingException;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -46,6 +49,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -68,6 +72,7 @@ import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -83,6 +88,7 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -775,12 +781,11 @@ public class TestDataNodeHotSwapVolumes {
   private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
   throws IOException, ReconfigurationException, TimeoutException,
   InterruptedException, BrokenBarrierException {
-// Starts DFS cluster with 3 DataNodes to form a pipeline.
-startDFSCluster(1, 3);
+startDFSCluster(1, 4);
 
 final short REPLICATION = 3;
-final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
-final FileSystem fs = cluster.getFileSystem();
+final DistributedFileSystem fs = cluster.getFileSystem();
+final DFSClient client = fs.getClient();
 final Path testFile = new Path("/test");
 FSDataOutputStream out = fs.create(testFile, REPLICATION);
 
@@ -790,54 +795,93 @@ public class TestDataNodeHotSwapVolumes {
 out.write(writeBuf);
 out.hflush();
 
-// Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
-// BlockReceiver releases volume reference before finalizeBlock(), the 
blocks
-// on the volume will be removed, and finalizeBlock() throws IOE.
-final FsDatasetSpi data = dn.data;
-dn.data = Mockito.spy(data);

hadoop git commit: HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. (Lei (Eddy) Xu)

2017-09-29 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 7a9479597 -> 337506190


HDFS-12453. TestDataNodeHotSwapVolumes fails in trunk Jenkins runs. (Lei (Eddy) 
Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33750619
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33750619
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33750619

Branch: refs/heads/branch-3.0
Commit: 337506190b559bc712b18a606b68a18677e90f7f
Parents: 7a94795
Author: Lei Xu <l...@apache.org>
Authored: Fri Sep 29 10:46:17 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Sep 29 10:47:37 2017 -0700

--
 .../datanode/TestDataNodeHotSwapVolumes.java| 129 ---
 1 file changed, 83 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33750619/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 5d4ac1e..df5e297 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.BlockMissingException;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -36,6 +37,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -46,6 +49,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
@@ -68,6 +72,7 @@ import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -83,6 +88,7 @@ import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -775,12 +781,11 @@ public class TestDataNodeHotSwapVolumes {
   private void testRemoveVolumeBeingWrittenForDatanode(int dataNodeIdx)
   throws IOException, ReconfigurationException, TimeoutException,
   InterruptedException, BrokenBarrierException {
-// Starts DFS cluster with 3 DataNodes to form a pipeline.
-startDFSCluster(1, 3);
+startDFSCluster(1, 4);
 
 final short REPLICATION = 3;
-final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
-final FileSystem fs = cluster.getFileSystem();
+final DistributedFileSystem fs = cluster.getFileSystem();
+final DFSClient client = fs.getClient();
 final Path testFile = new Path("/test");
 FSDataOutputStream out = fs.create(testFile, REPLICATION);
 
@@ -790,54 +795,93 @@ public class TestDataNodeHotSwapVolumes {
 out.write(writeBuf);
 out.hflush();
 
-// Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
-// BlockReceiver releases volume reference before finalizeBlock(), the 
blocks
-// on the volume will be removed, and finalizeBlock() throws IOE.
-final FsDatasetSpi data = dn.data;
-dn.data = Mockito.spy(data);

hadoop git commit: HDFS-12523. Thread pools in ErasureCodingWorker do not shutdown. (Huafeng Wang via Lei)

2017-09-26 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9d3e4cccf -> 1267ff22c


HDFS-12523. Thread pools in ErasureCodingWorker do not shutdown. (Huafeng Wang 
via Lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1267ff22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1267ff22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1267ff22

Branch: refs/heads/trunk
Commit: 1267ff22ce9226b6dd52e3f33cbe3b3094fb0e35
Parents: 9d3e4cc
Author: Lei Xu <l...@apache.org>
Authored: Thu Sep 21 16:10:32 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Sep 26 10:08:16 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  6 +-
 .../server/datanode/erasurecode/ErasureCodingWorker.java | 11 +--
 .../datanode/erasurecode/StripedReconstructor.java   |  8 +++-
 3 files changed, 17 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1267ff22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 042a627..6163d93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1115,7 +1115,7 @@ public class DataNode extends ReconfigurableBase
   /**
* Shutdown disk balancer.
*/
-  private  void shutdownDiskBalancer() {
+  private void shutdownDiskBalancer() {
 if (this.diskBalancer != null) {
   this.diskBalancer.shutdown();
   this.diskBalancer = null;
@@ -2077,6 +2077,10 @@ public class DataNode extends ReconfigurableBase
   ipcServer.stop();
 }
 
+if (ecWorker != null) {
+  ecWorker.shutDown();
+}
+
 if(blockPoolManager != null) {
   try {
 this.blockPoolManager.shutDownAll(bposArray);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1267ff22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 07d213c..63498bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
 import java.util.Collection;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -149,7 +151,12 @@ public final class ErasureCodingWorker {
 return conf;
   }
 
-  ThreadPoolExecutor getStripedReadPool() {
-return stripedReadPool;
+  CompletionService createReadService() {
+return new ExecutorCompletionService<>(stripedReadPool);
+  }
+
+  public void shutDown() {
+stripedReconstructionPool.shutdown();
+stripedReadPool.shutdown();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1267ff22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
index 3202121..bbffcf5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -39,8 +39,6 @@ import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.BitSet;
 import java.util.concurrent.CompletionService;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Thread

hadoop git commit: HDFS-12523. Thread pools in ErasureCodingWorker do not shutdown. (Huafeng Wang via Lei)

2017-09-21 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 a294bfd80 -> 98612bb2d


HDFS-12523. Thread pools in ErasureCodingWorker do not shutdown. (Huafeng Wang 
via Lei)

(cherry picked from commit 8680131e4b38f0b876173cbee838fcdb91cb52b6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98612bb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98612bb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98612bb2

Branch: refs/heads/branch-3.0
Commit: 98612bb2d047cadce8b018c5f751a9291b0c2e43
Parents: a294bfd
Author: Lei Xu <l...@apache.org>
Authored: Thu Sep 21 16:10:32 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Sep 21 16:12:10 2017 -0700

--
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  6 +-
 .../server/datanode/erasurecode/ErasureCodingWorker.java | 11 +--
 .../datanode/erasurecode/StripedReconstructor.java   |  8 +++-
 3 files changed, 17 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98612bb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 042a627..6163d93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1115,7 +1115,7 @@ public class DataNode extends ReconfigurableBase
   /**
* Shutdown disk balancer.
*/
-  private  void shutdownDiskBalancer() {
+  private void shutdownDiskBalancer() {
 if (this.diskBalancer != null) {
   this.diskBalancer.shutdown();
   this.diskBalancer = null;
@@ -2077,6 +2077,10 @@ public class DataNode extends ReconfigurableBase
   ipcServer.stop();
 }
 
+if (ecWorker != null) {
+  ecWorker.shutDown();
+}
+
 if(blockPoolManager != null) {
   try {
 this.blockPoolManager.shutDownAll(bposArray);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98612bb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 0e00e11..70c5378 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
 import java.util.Collection;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -149,7 +151,12 @@ public final class ErasureCodingWorker {
 return conf;
   }
 
-  ThreadPoolExecutor getStripedReadPool() {
-return stripedReadPool;
+  CompletionService createReadService() {
+return new ExecutorCompletionService<>(stripedReadPool);
+  }
+
+  public void shutDown() {
+stripedReconstructionPool.shutdown();
+stripedReadPool.shutdown();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/98612bb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
index 3202121..bbffcf5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReconstructor.java
@@ -39,8 +39,6 @@ import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.util.BitSet;
 import java.util.concurrent.Compl

hadoop git commit: HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)

2017-09-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1ce365e91 -> 7d9ca2e21


HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly 
cannot finish in 60s. (SammiChen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d9ca2e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d9ca2e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d9ca2e2

Branch: refs/heads/branch-3.0
Commit: 7d9ca2e21bebba22a2c2f47967280ded8c641082
Parents: 1ce365e
Author: Lei Xu <l...@apache.org>
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Sep 19 11:51:44 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d9ca2e2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
 ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
 fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
-final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
-for (int i = 0; i < 100; i++) {
+final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+for (int i = 0; i < 50; i++) {
   writeFile(fs, "/ec-file-" + i, fileLen);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly cannot finish in 60s. (SammiChen via lei)

2017-09-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk fda1221c5 -> 7bbeacb75


HDFS-12449. TestReconstructStripedFile.testNNSendsErasureCodingTasks randomly 
cannot finish in 60s. (SammiChen via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7bbeacb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7bbeacb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7bbeacb7

Branch: refs/heads/trunk
Commit: 7bbeacb75e93261dbda0e8efcde510e5fcf83efb
Parents: fda1221
Author: Lei Xu <l...@apache.org>
Authored: Tue Sep 19 11:50:01 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Sep 19 11:50:01 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7bbeacb7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 72b1412..713a10b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -456,8 +456,8 @@ public class TestReconstructStripedFile {
 ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy();
 fs.getClient().setErasureCodingPolicy("/", policy.getName());
 
-final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
-for (int i = 0; i < 100; i++) {
+final int fileLen = cellSize * ecPolicy.getNumDataUnits();
+for (int i = 0; i < 50; i++) {
   writeFile(fs, "/ec-file-" + i, fileLen);
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (Lei (Eddy) Xu)

2017-09-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 293ab432e -> 2787766b4


HDFS-12349. Improve log message when it could not alloc enough blocks for EC. 
(Lei (Eddy) Xu)

(cherry picked from commit fbe06b58805aac4861fb27dfa273914b69e8bdc6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2787766b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2787766b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2787766b

Branch: refs/heads/branch-3.0
Commit: 2787766b41ec8c33b0557c8ac31a1f1b68e3d37c
Parents: 293ab43
Author: Lei Xu <l...@apache.org>
Authored: Fri Sep 15 12:12:42 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Sep 15 12:15:49 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 17 ++--
 .../server/blockmanagement/BlockManager.java| 26 --
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 24 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 29 ++--
 .../datatransfer/sasl/TestSaslDataTransfer.java |  4 +--
 .../blockmanagement/TestBlockManager.java   |  3 +-
 .../blockmanagement/TestBlockStatsMXBean.java   |  2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |  4 +--
 8 files changed, 63 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2787766b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 7f05338..44db3a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,6 +260,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
   private final Coordinator coordinator;
   private final CellBuffers cellBuffers;
+  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureEncoder encoder;
   private final List streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -286,7 +287,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   LOG.debug("Creating DFSStripedOutputStream for " + src);
 }
 
-final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+ecPolicy = stat.getErasureCodingPolicy();
 final int numParityBlocks = ecPolicy.getNumParityUnits();
 cellSize = ecPolicy.getCellSize();
 numDataBlocks = ecPolicy.getNumDataUnits();
@@ -478,11 +479,6 @@ public class DFSStripedOutputStream extends DFSOutputStream
 final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
 currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
 assert lb.isStriped();
-if (lb.getLocations().length < numDataBlocks) {
-  throw new IOException("Failed to get " + numDataBlocks
-  + " nodes from namenode: blockGroupSize= " + numAllBlocks
-  + ", blocks.length= " + lb.getLocations().length);
-}
 // assign the new block to the current block group
 currentBlockGroup = lb.getBlock();
 blockGroupIndex++;
@@ -494,11 +490,16 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   StripedDataStreamer si = getStripedDataStreamer(i);
   assert si.isHealthy();
   if (blocks[i] == null) {
+// allocBlock() should guarantee that all data blocks are successfully
+// allocated.
+assert i >= numDataBlocks;
 // Set exception and close streamer as there is no block locations
 // found for the parity block.
-LOG.warn("Failed to get block location for parity block, index=" + i);
+LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
+"Not enough datanodes? Exclude nodes={}", i,  ecPolicy.getName(),
+excludedNodes);
 si.getLastException().set(
-new IOException("Failed to get following block, i=" + i));
+new IOException("Failed to get parity block, index=" + i));
 si.getErrorState().setInternalError();
 si.close(true);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2787766b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hd

hadoop git commit: HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (Lei (Eddy) Xu)

2017-09-15 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3a8d57a0a -> fbe06b588


HDFS-12349. Improve log message when it could not alloc enough blocks for EC. 
(Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbe06b58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbe06b58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbe06b58

Branch: refs/heads/trunk
Commit: fbe06b58805aac4861fb27dfa273914b69e8bdc6
Parents: 3a8d57a
Author: Lei Xu <l...@apache.org>
Authored: Fri Sep 15 12:12:42 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Sep 15 12:12:42 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 17 ++--
 .../server/blockmanagement/BlockManager.java| 26 --
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 24 ++--
 .../TestDFSStripedOutputStreamWithFailure.java  | 29 ++--
 .../datatransfer/sasl/TestSaslDataTransfer.java |  4 +--
 .../blockmanagement/TestBlockManager.java   |  3 +-
 .../blockmanagement/TestBlockStatsMXBean.java   |  2 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |  4 +--
 8 files changed, 63 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbe06b58/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 7f05338..44db3a6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,6 +260,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
   private final Coordinator coordinator;
   private final CellBuffers cellBuffers;
+  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureEncoder encoder;
   private final List streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -286,7 +287,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   LOG.debug("Creating DFSStripedOutputStream for " + src);
 }
 
-final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+ecPolicy = stat.getErasureCodingPolicy();
 final int numParityBlocks = ecPolicy.getNumParityUnits();
 cellSize = ecPolicy.getCellSize();
 numDataBlocks = ecPolicy.getNumDataUnits();
@@ -478,11 +479,6 @@ public class DFSStripedOutputStream extends DFSOutputStream
 final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
 currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
 assert lb.isStriped();
-if (lb.getLocations().length < numDataBlocks) {
-  throw new IOException("Failed to get " + numDataBlocks
-  + " nodes from namenode: blockGroupSize= " + numAllBlocks
-  + ", blocks.length= " + lb.getLocations().length);
-}
 // assign the new block to the current block group
 currentBlockGroup = lb.getBlock();
 blockGroupIndex++;
@@ -494,11 +490,16 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   StripedDataStreamer si = getStripedDataStreamer(i);
   assert si.isHealthy();
   if (blocks[i] == null) {
+// allocBlock() should guarantee that all data blocks are successfully
+// allocated.
+assert i >= numDataBlocks;
 // Set exception and close streamer as there is no block locations
 // found for the parity block.
-LOG.warn("Failed to get block location for parity block, index=" + i);
+LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
+"Not enough datanodes? Exclude nodes={}", i,  ecPolicy.getName(),
+excludedNodes);
 si.getLastException().set(
-new IOException("Failed to get following block, i=" + i));
+new IOException("Failed to get parity block, index=" + i));
 si.getErrorState().setInternalError();
 si.close(true);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbe06b58/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 

hadoop git commit: HDFS-12378. TestClientProtocolForPipelineRecovery#testZeroByteBlockRecovery fails on trunk. (Lei (Eddy) Xu)

2017-09-14 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 62e8a5cee -> e14048914


HDFS-12378.  TestClientProtocolForPipelineRecovery#testZeroByteBlockRecovery 
fails on trunk. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1404891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1404891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1404891

Branch: refs/heads/branch-3.0
Commit: e140489147487fa55976e62a9eb7a3140316
Parents: 62e8a5c
Author: Lei Xu <l...@apache.org>
Authored: Thu Sep 14 17:02:48 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Sep 14 17:04:29 2017 -0700

--
 .../apache/hadoop/hdfs/server/datanode/DataNode.java| 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1404891/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 837ac07..042a627 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3000,8 +3000,16 @@ public class DataNode extends ReconfigurableBase
 b.setNumBytes(visible);
 
 if (targets.length > 0) {
-  new Daemon(new DataTransfer(targets, targetStorageTypes,
-  targetStorageIds, b, stage, client)).start();
+  Daemon daemon = new Daemon(threadGroup,
+  new DataTransfer(targets, targetStorageTypes, targetStorageIds, b,
+  stage, client));
+  daemon.start();
+  try {
+daemon.join();
+  } catch (InterruptedException e) {
+throw new IOException(
+"Pipeline recovery for " + b + " is interrupted.", e);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12378. TestClientProtocolForPipelineRecovery#testZeroByteBlockRecovery fails on trunk. (Lei (Eddy) Xu)

2017-09-14 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 390c2b5df -> 61cee3a0b


HDFS-12378.  TestClientProtocolForPipelineRecovery#testZeroByteBlockRecovery 
fails on trunk. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61cee3a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61cee3a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61cee3a0

Branch: refs/heads/trunk
Commit: 61cee3a0b9a8ea2e4f6257c17c2d90c7c930cc34
Parents: 390c2b5
Author: Lei Xu <l...@apache.org>
Authored: Thu Sep 14 17:02:48 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Sep 14 17:03:25 2017 -0700

--
 .../apache/hadoop/hdfs/server/datanode/DataNode.java| 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61cee3a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 837ac07..042a627 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3000,8 +3000,16 @@ public class DataNode extends ReconfigurableBase
 b.setNumBytes(visible);
 
 if (targets.length > 0) {
-  new Daemon(new DataTransfer(targets, targetStorageTypes,
-  targetStorageIds, b, stage, client)).start();
+  Daemon daemon = new Daemon(threadGroup,
+  new DataTransfer(targets, targetStorageTypes, targetStorageIds, b,
+  stage, client));
+  daemon.start();
+  try {
+daemon.join();
+  } catch (InterruptedException e) {
+throw new IOException(
+"Pipeline recovery for " + b + " is interrupted.", e);
+  }
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12409. Add metrics of execution time of different stages in EC recovery task. (Lei (Eddy) Xu)

2017-09-13 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk c3f35c422 -> 73aed34df


HDFS-12409. Add metrics of execution time of different stages in EC recovery 
task. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73aed34d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73aed34d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73aed34d

Branch: refs/heads/trunk
Commit: 73aed34dffa5e79f6f819137b69054c1dee2d4dd
Parents: c3f35c4
Author: Lei Xu <l...@apache.org>
Authored: Wed Sep 13 17:10:16 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Wed Sep 13 17:14:13 2017 -0700

--
 .../erasurecode/StripedBlockReconstructor.java| 11 +++
 .../server/datanode/metrics/DataNodeMetrics.java  | 18 ++
 .../TestDataNodeErasureCodingMetrics.java |  7 +++
 3 files changed, 36 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73aed34d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
index bac013a..34e58ae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedBlockReconstructor.java
@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
+import org.apache.hadoop.util.Time;
 
 /**
  * StripedBlockReconstructor reconstruct one or more missed striped block in
@@ -83,18 +84,28 @@ class StripedBlockReconstructor extends StripedReconstructor
   final int toReconstructLen =
   (int) Math.min(getStripedReader().getBufferSize(), remaining);
 
+  long start = Time.monotonicNow();
   // step1: read from minimum source DNs required for reconstruction.
   // The returned success list is the source DNs we do real read from
   getStripedReader().readMinimumSources(toReconstructLen);
+  long readEnd = Time.monotonicNow();
 
   // step2: decode to reconstruct targets
   reconstructTargets(toReconstructLen);
+  long decodeEnd = Time.monotonicNow();
 
   // step3: transfer data
   if (stripedWriter.transferData2Targets() == 0) {
 String error = "Transfer failed for all targets.";
 throw new IOException(error);
   }
+  long writeEnd = Time.monotonicNow();
+
+  // Only the succeed reconstructions are recorded.
+  final DataNodeMetrics metrics = getDatanode().getMetrics();
+  metrics.incrECReconstructionReadTime(readEnd - start);
+  metrics.incrECReconstructionDecodingTime(decodeEnd - readEnd);
+  metrics.incrECReconstructionWriteTime(writeEnd - decodeEnd);
 
   updatePositionInBlock(toReconstructLen);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73aed34d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index a8a6919..58a2f65 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -151,6 +151,12 @@ public class DataNodeMetrics {
   MutableCounterLong ecReconstructionBytesWritten;
   @Metric("Bytes remote read by erasure coding worker")
   MutableCounterLong ecReconstructionRemoteBytesRead;
+  @Metric("Milliseconds spent on read by erasure coding worker")
+  private MutableCounterLong ecReconstructionReadTimeMillis;
+  @Metric("Milliseconds spent on decoding by erasure coding worker")
+  private MutableCounterLong ecReconstructionDecodingTimeMillis;
+  @Metric("Milliseconds spent on write by erasure coding worker")
+  private MutableCounterLong ecReconstructionWriteTimeMillis;
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final Stri

hadoop git commit: HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. (Lei (Eddy) Xu)

2017-09-12 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk f4b626746 -> 123342cd0


HDFS-12412. Change ErasureCodingWorker.stripedReadPool to cached thread pool. 
(Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/123342cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/123342cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/123342cd

Branch: refs/heads/trunk
Commit: 123342cd0759ff88801d4f5ab10987f6e3f344b0
Parents: f4b6267
Author: Lei Xu <l...@apache.org>
Authored: Tue Sep 12 18:12:07 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Sep 12 18:12:07 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java|  2 --
 .../datanode/erasurecode/ErasureCodingWorker.java | 14 +++---
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   |  9 -
 .../src/site/markdown/HDFSErasureCoding.md|  1 -
 4 files changed, 7 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/123342cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d06e378..322cae4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -571,8 +571,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =
   "RS-6-3-1024k";
-  public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.threads";
-  public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.buffer.size";
   public static final int 
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_DEFAULT = 64 * 1024;
   public static final String  
DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY = 
"dfs.datanode.ec.reconstruction.stripedread.timeout.millis";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/123342cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
index 72c224f..d3de82e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java
@@ -53,19 +53,19 @@ public final class ErasureCodingWorker {
 this.datanode = datanode;
 this.conf = conf;
 
-initializeStripedReadThreadPool(conf.getInt(
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY,
-DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT));
+initializeStripedReadThreadPool();
 initializeStripedBlkReconstructionThreadPool(conf.getInt(
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_BLK_THREADS_KEY,
 DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_BLK_THREADS_DEFAULT));
   }
 
-  private void initializeStripedReadThreadPool(int num) {
-LOG.debug("Using striped reads; pool threads={}", num);
+  private void initializeStripedReadThreadPool() {
+LOG.debug("Using striped reads");
 
-stripedReadPool = new ThreadPoolExecutor(1, num, 60, TimeUnit.SECONDS,
-new SynchronousQueue(),
+// Essentially, this is a cachedThreadPool.
+stripedReadPool = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
+60, TimeUnit.SECONDS,
+new SynchronousQueue<>(),
 new Daemon.DaemonFactory() {
   private final AtomicInteger threadIndex = new AtomicInteger(0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/123342cd/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src

hadoop git commit: Revert "HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (lei)"

2017-09-11 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ab21dfe9 -> de9994bd8


Revert "HDFS-12349. Improve log message when it could not alloc enough blocks 
for EC. (lei)"

This reverts commit 3e6d0ca2b2f79bfa87faa7bbd46d814a48334fbd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de9994bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de9994bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de9994bd

Branch: refs/heads/trunk
Commit: de9994bd893af70fffdd68af6252fc45020e0e69
Parents: 5ab21df
Author: Lei Xu <l...@apache.org>
Authored: Mon Sep 11 10:06:05 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Sep 11 10:06:05 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 17 ++--
 .../server/blockmanagement/BlockManager.java| 26 ++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 14 ++
 .../TestDFSStripedOutputStreamWithFailure.java  | 29 ++--
 .../hdfs/server/namenode/TestDeadDatanode.java  |  4 +--
 5 files changed, 36 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 09dc181..7f05338 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,7 +260,6 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
   private final Coordinator coordinator;
   private final CellBuffers cellBuffers;
-  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureEncoder encoder;
   private final List streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -287,7 +286,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   LOG.debug("Creating DFSStripedOutputStream for " + src);
 }
 
-ecPolicy = stat.getErasureCodingPolicy();
+final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
 final int numParityBlocks = ecPolicy.getNumParityUnits();
 cellSize = ecPolicy.getCellSize();
 numDataBlocks = ecPolicy.getNumDataUnits();
@@ -479,6 +478,11 @@ public class DFSStripedOutputStream extends DFSOutputStream
 final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
 currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
 assert lb.isStriped();
+if (lb.getLocations().length < numDataBlocks) {
+  throw new IOException("Failed to get " + numDataBlocks
+  + " nodes from namenode: blockGroupSize= " + numAllBlocks
+  + ", blocks.length= " + lb.getLocations().length);
+}
 // assign the new block to the current block group
 currentBlockGroup = lb.getBlock();
 blockGroupIndex++;
@@ -490,16 +494,11 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   StripedDataStreamer si = getStripedDataStreamer(i);
   assert si.isHealthy();
   if (blocks[i] == null) {
-// allocBlock() should guarantee that all data blocks are successfully
-// allocated.
-assert i >= numDataBlocks;
 // Set exception and close streamer as there is no block locations
 // found for the parity block.
-LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
-"Not enough datanodes? Excluded nodes={}", i,  ecPolicy.getName(),
-excludedNodes);
+LOG.warn("Failed to get block location for parity block, index=" + i);
 si.getLastException().set(
-new IOException("Failed to get parity block, index=" + i));
+new IOException("Failed to get following block, i=" + i));
 si.getErrorState().setInternalError();
 si.close(true);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockMana

hadoop git commit: HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (lei)

2017-09-07 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3b3be355b -> 3e6d0ca2b


HDFS-12349. Improve log message when it could not alloc enough blocks for EC. 
(lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e6d0ca2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e6d0ca2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e6d0ca2

Branch: refs/heads/trunk
Commit: 3e6d0ca2b2f79bfa87faa7bbd46d814a48334fbd
Parents: 3b3be35
Author: Lei Xu <l...@apache.org>
Authored: Thu Sep 7 18:01:37 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Sep 7 18:01:37 2017 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 17 ++--
 .../server/blockmanagement/BlockManager.java| 26 --
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 14 --
 .../TestDFSStripedOutputStreamWithFailure.java  | 29 ++--
 .../hdfs/server/namenode/TestDeadDatanode.java  |  4 +--
 5 files changed, 54 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6d0ca2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 7f05338..09dc181 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,6 +260,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
   private final Coordinator coordinator;
   private final CellBuffers cellBuffers;
+  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureEncoder encoder;
   private final List streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -286,7 +287,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   LOG.debug("Creating DFSStripedOutputStream for " + src);
 }
 
-final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
+ecPolicy = stat.getErasureCodingPolicy();
 final int numParityBlocks = ecPolicy.getNumParityUnits();
 cellSize = ecPolicy.getCellSize();
 numDataBlocks = ecPolicy.getNumDataUnits();
@@ -478,11 +479,6 @@ public class DFSStripedOutputStream extends DFSOutputStream
 final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
 currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
 assert lb.isStriped();
-if (lb.getLocations().length < numDataBlocks) {
-  throw new IOException("Failed to get " + numDataBlocks
-  + " nodes from namenode: blockGroupSize= " + numAllBlocks
-  + ", blocks.length= " + lb.getLocations().length);
-}
 // assign the new block to the current block group
 currentBlockGroup = lb.getBlock();
 blockGroupIndex++;
@@ -494,11 +490,16 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
   StripedDataStreamer si = getStripedDataStreamer(i);
   assert si.isHealthy();
   if (blocks[i] == null) {
+// allocBlock() should guarantee that all data blocks are successfully
+// allocated.
+assert i >= numDataBlocks;
 // Set exception and close streamer as there is no block locations
 // found for the parity block.
-LOG.warn("Failed to get block location for parity block, index=" + i);
+LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
+"Not enough datanodes? Excluded nodes={}", i,  ecPolicy.getName(),
+excludedNodes);
 si.getLastException().set(
-new IOException("Failed to get following block, i=" + i));
+new IOException("Failed to get parity block, index=" + i));
 si.getErrorState().setInternalError();
 si.close(true);
   } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e6d0ca2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f4e5cb4..40c249d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/

hadoop git commit: HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver thread group. (Lei Xu)

2017-08-25 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk f29a0fc28 -> 36bada303


HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver 
thread group. (Lei Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36bada30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36bada30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36bada30

Branch: refs/heads/trunk
Commit: 36bada3032e438099ada9d865c3945d42c3e7c2a
Parents: f29a0fc
Author: Lei Xu <l...@apache.org>
Authored: Fri Aug 25 16:01:14 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Aug 25 16:01:14 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36bada30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6069487..1a85b46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2999,8 +2999,8 @@ public class DataNode extends ReconfigurableBase
 b.setNumBytes(visible);
 
 if (targets.length > 0) {
-  new DataTransfer(targets, targetStorageTypes, targetStorageIds, b, stage,
-  client).run();
+  new Daemon(new DataTransfer(targets, targetStorageTypes,
+  targetStorageIds, b, stage, client)).start();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver thread group. (Lei Xu)

2017-08-25 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 161774480 -> c22b5108d


HDFS-12215. DataNode.transferBlock does not create its daemon in the xceiver 
thread group. (Lei Xu)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c22b5108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c22b5108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c22b5108

Branch: refs/heads/branch-2
Commit: c22b5108df9328c3f1cefc3f9c99d8e1ca6e56be
Parents: 1617744
Author: Lei Xu <l...@apache.org>
Authored: Fri Aug 25 16:01:14 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Aug 25 16:06:34 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c22b5108/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 40accd6..c1a96cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2919,7 +2919,8 @@ public class DataNode extends ReconfigurableBase
 b.setNumBytes(visible);
 
 if (targets.length > 0) {
-  new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
+  new Daemon(new DataTransfer(targets, targetStorageTypes,
+  b, stage, client)).start();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)

2017-08-17 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4230872dd -> 99e558b13


HADOOP-14398. Modify documents for the FileSystem Builder API. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e558b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e558b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e558b1

Branch: refs/heads/trunk
Commit: 99e558b13ba4d5832aea97374e1d07b4e78e5e39
Parents: 4230872
Author: Lei Xu <l...@apache.org>
Authored: Thu Aug 17 18:06:23 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Thu Aug 17 18:06:23 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  74 ++--
 .../src/site/markdown/filesystem/filesystem.md  |  33 +++-
 .../filesystem/fsdataoutputstreambuilder.md | 182 +++
 .../src/site/markdown/filesystem/index.md   |   1 +
 4 files changed, 272 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e558b1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 1f668eb..86c284a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -54,16 +54,29 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  * options accordingly, for example:
  *
  * 
- * FSDataOutputStreamBuilder builder = fs.createFile(path);
- * builder.permission(perm)
+ *
+ * // Don't
+ * if (fs instanceof FooFileSystem) {
+ *   FooFileSystem fs = (FooFileSystem) fs;
+ *   OutputStream out = dfs.createFile(path)
+ * .optionA()
+ * .optionB("value")
+ * .cache()
+ *   .build()
+ * } else if (fs instanceof BarFileSystem) {
+ *   ...
+ * }
+ *
+ * // Do
+ * OutputStream out = fs.createFile(path)
+ *   .permission(perm)
  *   .bufferSize(bufSize)
- *   .opt("dfs.outputstream.builder.lazy-persist", true)
- *   .opt("dfs.outputstream.builder.ec.policy-name", "rs-3-2-64k")
- *   .opt("fs.local.o-direct", true)
- *   .must("fs.s3a.fast-upload", true)
- *   .must("fs.azure.buffer-size", 256 * 1024 * 1024);
- * FSDataOutputStream out = builder.build();
- * ...
+ *   .opt("foofs:option.a", true)
+ *   .opt("foofs:option.b", "value")
+ *   .opt("barfs:cache", true)
+ *   .must("foofs:cache", true)
+ *   .must("barfs:cache-size", 256 * 1024 * 1024)
+ *   .build();
  * 
  *
  * If the option is not related to the file system, the option will be ignored.
@@ -263,6 +276,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional boolean parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, boolean value) {
 mandatoryKeys.remove(key);
@@ -272,6 +287,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional int parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, int value) {
 mandatoryKeys.remove(key);
@@ -281,6 +298,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional float parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, float value) {
 mandatoryKeys.remove(key);
@@ -290,6 +309,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set optional double parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, double value) {
 mandatoryKeys.remove(key);
@@ -299,6 +320,8 @@ public abstract class FSDataOutputStreamBuilder
 
   /**
* Set an array of string values as optional parameter for the Builder.
+   *
+   * @see #opt(String, String)
*/
   public B opt(@Nonnull final String key, @Nonnull final String... values) {
 mandatoryKeys.remove(key);
@@ -310,8 +333,7 @@ public abstract class FSDataOutputStreamBuilder
* Set mandatory option to the Builder.
*
* If the option is not supported or unavailable on the {@link FileSystem},
-   * the client should expect {@link #build()} throws
-   * {@link IllegalArgumentException}.
+   * the client should expect {@link #build()} throws IllegalArgumentException.
*/
   public B must(@Nonnull final String k

hadoop git commit: HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)

2017-08-14 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk d8f74c396 -> ce797a170


HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce797a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce797a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce797a17

Branch: refs/heads/trunk
Commit: ce797a170669524224cfeaaf70647047e7626816
Parents: d8f74c3
Author: Lei Xu <l...@apache.org>
Authored: Mon Aug 14 10:27:47 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Aug 14 10:27:47 2017 -0700

--
 .../hadoop-client-minicluster/pom.xml   |   6 --
 .../hadoop-client-runtime/pom.xml   |   7 ---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 --
 .../offlineEditsViewer/XmlEditsVisitor.java |  41 
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 5850 -> 5850 bytes
 .../src/test/resources/editsStored.xml  |  62 +--
 .../hadoop-mapreduce-client/pom.xml |  10 +--
 hadoop-project-dist/pom.xml |  10 +--
 hadoop-project/pom.xml  |   8 ---
 hadoop-yarn-project/hadoop-yarn/pom.xml |  10 +--
 10 files changed, 62 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 5255640..5cf1fad 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -629,12 +629,6 @@
   
 
 
-  xerces:xercesImpl
-  
-**/*
-  
-
-
   
org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
   
 testjar/*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 2f64152..24c6b7a 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -174,13 +174,6 @@
 
org/apache/jasper/compiler/Localizer.class
   
 
-
-
-  xerces:xercesImpl
-  
-META-INF/services/*
-  
-
 
 
   com.sun.jersey:*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1c50d31..fa1044d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -174,11 +174,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   compile
 
 
-  xerces
-  xercesImpl
-  compile
-
-
   org.apache.htrace
   htrace-core4
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index 7a39ba6..ddf7933 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.

hadoop git commit: HADOOP-14397. Pull up the builder pattern to FileSystem and add AbstractContractCreateTest for it. (Lei (Eddy) Xu)

2017-07-31 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk abbf4129a -> 9586b0e24


HADOOP-14397. Pull up the builder pattern to FileSystem and add 
AbstractContractCreateTest for it. (Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9586b0e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9586b0e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9586b0e2

Branch: refs/heads/trunk
Commit: 9586b0e24fce29c278134658e68b8c47cd9d8c51
Parents: abbf412
Author: Lei Xu <l...@cloudera.com>
Authored: Mon Jul 31 20:04:57 2017 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Mon Jul 31 20:12:40 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  4 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   | 24 --
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  2 +-
 .../fs/contract/AbstractContractAppendTest.java | 33 ++-
 .../fs/contract/AbstractContractCreateTest.java | 90 ++--
 .../hadoop/fs/contract/ContractTestUtils.java   | 43 --
 .../hadoop/hdfs/DistributedFileSystem.java  |  3 +-
 7 files changed, 154 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 0527202..8608a7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -44,8 +44,8 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  *
  * To create missing parent directory, use {@link #recursive()}.
  */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public abstract class FSDataOutputStreamBuilder
 > {
   private final FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9586b0e2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index d7cd7dd..fc7b9b2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4153,9 +4153,21 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 
 @Override
 public FSDataOutputStream build() throws IOException {
-  return getFS().create(getPath(), getPermission(), getFlags(),
-  getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-  getChecksumOpt());
+  if (getFlags().contains(CreateFlag.CREATE) ||
+  getFlags().contains(CreateFlag.OVERWRITE)) {
+if (isRecursive()) {
+  return getFS().create(getPath(), getPermission(), getFlags(),
+  getBufferSize(), getReplication(), getBlockSize(), getProgress(),
+  getChecksumOpt());
+} else {
+  return getFS().createNonRecursive(getPath(), getPermission(),
+  getFlags(), getBufferSize(), getReplication(), getBlockSize(),
+  getProgress());
+}
+  } else if (getFlags().contains(CreateFlag.APPEND)) {
+return getFS().append(getPath(), getBufferSize(), getProgress());
+  }
+  throw new IOException("Must specify either create, overwrite or append");
 }
 
 @Override
@@ -4174,8 +4186,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* HADOOP-14384. Temporarily reduce the visibility of method before the
* builder interface becomes stable.
*/
-  @InterfaceAudience.Private
-  protected FSDataOutputStreamBuilder createFile(Path path) {
+  public FSDataOutputStreamBuilder createFile(Path path) {
 return new FileSystemDataOutputStreamBuilder(this, path)
 .create().overwrite(true);
   }
@@ -4185,8 +4196,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* @param path file path.
* @return a {@link FSDataOutputStreamBuilder} to build file append request.
*/
-  @InterfaceAudience.Private
-  protected FSDataOutputStreamBuilder appen

hadoop git commit: HADOOP-14397. Pull up the builder pattern to FileSystem and add AbstractContractCreateTest for it. (Lei (Eddy) Xu)

2017-07-31 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 60ae10b14 -> f09d20cff


HADOOP-14397. Pull up the builder pattern to FileSystem and add 
AbstractContractCreateTest for it. (Lei (Eddy) Xu)

(cherry picked from commit 667ee003bf47e44beb3fdff8d06a7264a13dd22c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f09d20cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f09d20cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f09d20cf

Branch: refs/heads/branch-2
Commit: f09d20cffbdbf4ce40458b2c52693d0a2e8e98cd
Parents: 60ae10b
Author: Lei Xu <l...@cloudera.com>
Authored: Mon Jul 31 20:04:57 2017 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Mon Jul 31 20:07:13 2017 -0700

--
 .../hadoop/fs/FSDataOutputStreamBuilder.java|  4 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   | 24 --
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  2 +-
 .../fs/contract/AbstractContractAppendTest.java | 33 ++-
 .../fs/contract/AbstractContractCreateTest.java | 90 ++--
 .../hadoop/fs/contract/ContractTestUtils.java   | 43 --
 .../hadoop/hdfs/DistributedFileSystem.java  |  3 +-
 7 files changed, 154 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09d20cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
index 0527202..8608a7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java
@@ -44,8 +44,8 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
  *
  * To create missing parent directory, use {@link #recursive()}.
  */
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public abstract class FSDataOutputStreamBuilder
 > {
   private final FileSystem fs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09d20cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 38e53a4..e7f3624 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4137,9 +4137,21 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 
 @Override
 public FSDataOutputStream build() throws IOException {
-  return getFS().create(getPath(), getPermission(), getFlags(),
-  getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-  getChecksumOpt());
+  if (getFlags().contains(CreateFlag.CREATE) ||
+  getFlags().contains(CreateFlag.OVERWRITE)) {
+if (isRecursive()) {
+  return getFS().create(getPath(), getPermission(), getFlags(),
+  getBufferSize(), getReplication(), getBlockSize(), getProgress(),
+  getChecksumOpt());
+} else {
+  return getFS().createNonRecursive(getPath(), getPermission(),
+  getFlags(), getBufferSize(), getReplication(), getBlockSize(),
+  getProgress());
+}
+  } else if (getFlags().contains(CreateFlag.APPEND)) {
+return getFS().append(getPath(), getBufferSize(), getProgress());
+  }
+  throw new IOException("Must specify either create, overwrite or append");
 }
 
 @Override
@@ -4158,8 +4170,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* HADOOP-14384. Temporarily reduce the visibility of method before the
* builder interface becomes stable.
*/
-  @InterfaceAudience.Private
-  protected FSDataOutputStreamBuilder createFile(Path path) {
+  public FSDataOutputStreamBuilder createFile(Path path) {
 return new FileSystemDataOutputStreamBuilder(this, path)
 .create().overwrite(true);
   }
@@ -4169,8 +4180,7 @@ public abstract class FileSystem extends Configured 
implements Closeable {
* @param path file path.
* @return a {@link FSDataOutputStreamBuilder} to build file append request.
*/
-

hadoop git commit: HDFS-12044. Mismatch between BlockManager.maxReplicationStreams and ErasureCodingWorker.stripedReconstructionPool pool size causes slow and bursty recovery. (Contributed by Lei (Edd

2017-07-28 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9ea01fd95 -> 77791e4c3


HDFS-12044. Mismatch between BlockManager.maxReplicationStreams and 
ErasureCodingWorker.stripedReconstructionPool pool size causes slow and bursty 
recovery. (Contributed by Lei (Eddy) Xu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77791e4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77791e4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77791e4c

Branch: refs/heads/trunk
Commit: 77791e4c36ddc9305306c83806bf486d4d32575d
Parents: 9ea01fd
Author: Lei Xu <l...@cloudera.com>
Authored: Fri Jul 28 10:49:23 2017 -0700
Committer: Lei Xu <l...@cloudera.com>
Committed: Fri Jul 28 10:50:49 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 23 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 21 ++
 .../erasurecode/ErasureCodingWorker.java| 15 +++-
 .../erasurecode/StripedBlockReconstructor.java  |  3 +-
 .../datanode/erasurecode/StripedReader.java | 20 ++
 .../erasurecode/StripedReconstructionInfo.java  | 15 
 .../erasurecode/StripedReconstructor.java   |  8 ++-
 .../hadoop/hdfs/TestReconstructStripedFile.java | 74 ++--
 8 files changed, 169 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2e770cc..e7cd0d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -83,6 +83,7 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -811,10 +812,30 @@ public class DFSUtilClient {
   public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
   int maxPoolSize, long keepAliveTimeSecs, String threadNamePrefix,
   boolean runRejectedExec) {
+return getThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTimeSecs,
+new SynchronousQueue<>(), threadNamePrefix, runRejectedExec);
+}
+
+  /**
+   * Utility to create a {@link ThreadPoolExecutor}.
+   *
+   * @param corePoolSize - min threads in the pool, even if idle
+   * @param maxPoolSize - max threads in the pool
+   * @param keepAliveTimeSecs - max seconds beyond which excess idle threads
+   *will be terminated
+   * @param queue - the queue to use for holding tasks before they are 
executed.
+   * @param threadNamePrefix - name prefix for the pool threads
+   * @param runRejectedExec - when true, rejected tasks from
+   *ThreadPoolExecutor are run in the context of calling thread
+   * @return ThreadPoolExecutor
+   */
+  public static ThreadPoolExecutor getThreadPoolExecutor(int corePoolSize,
+  int maxPoolSize, long keepAliveTimeSecs, BlockingQueue queue,
+  String threadNamePrefix, boolean runRejectedExec) {
 Preconditions.checkArgument(corePoolSize > 0);
 ThreadPoolExecutor threadPoolExecutor = new 
ThreadPoolExecutor(corePoolSize,
 maxPoolSize, keepAliveTimeSecs, TimeUnit.SECONDS,
-new SynchronousQueue(), new Daemon.DaemonFactory() {
+queue, new Daemon.DaemonFactory() {
   private final AtomicInteger threadIndex = new AtomicInteger(0);
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77791e4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 2730393..6069487 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2204,12 +2204,33 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
+   * Increments the xmitInProgress count by given value.
+   *
+   * @param delta the amount of xmitsInProgress to increase.
+   * @see #

hadoop git commit: Add -E option in 'ls' to list erasure coding policy of each file and directory if applicable. Contributed by luhuichun via lei.

2017-06-28 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25d891a78 -> d6df0fdbb


Add -E option in 'ls' to list erasure coding policy of each file and directory 
if applicable. Contributed by luhuichun via lei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6df0fdb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6df0fdb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6df0fdb

Branch: refs/heads/trunk
Commit: d6df0fdbbda42b4ddab3810b5ac57336c6241ba7
Parents: 25d891a
Author: Lei Xu <l...@apache.org>
Authored: Wed Jun 28 13:47:23 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Wed Jun 28 13:47:23 2017 -0700

--
 .../java/org/apache/hadoop/fs/shell/Ls.java | 63 +++-
 .../src/site/markdown/FileSystemShell.md|  4 +-
 .../src/test/resources/testConf.xml |  6 +-
 .../test/resources/testErasureCodingConf.xml| 34 +++
 4 files changed, 89 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6df0fdb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
index 47e87f5..221b3cb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ContentSummary;
 
 /**
  * Get a listing of all files in that match the file patterns.
@@ -54,13 +55,14 @@ class Ls extends FsCommand {
   private static final String OPTION_MTIME = "t";
   private static final String OPTION_ATIME = "u";
   private static final String OPTION_SIZE = "S";
+  private static final String OPTION_ECPOLICY = "e";
 
   public static final String NAME = "ls";
   public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" +
   OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" +
   OPTION_HIDENONPRINTABLE + "] [-" + OPTION_RECURSIVE + "] [-" +
   OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" +
-  OPTION_ATIME + "] [ ...]";
+  OPTION_ATIME + "] [-" + OPTION_ECPOLICY +"] [ ...]";
 
   public static final String DESCRIPTION =
   "List the contents that match the specified file pattern. If " +
@@ -91,7 +93,9 @@ class Ls extends FsCommand {
   "  Reverse the order of the sort.\n" +
   "  -" + OPTION_ATIME +
   "  Use time of last access instead of modification for\n" +
-  "  display and sorting.";
+  "  display and sorting.\n"+
+  "  -" + OPTION_ECPOLICY +
+  "  Display the erasure coding policy of files and directories.\n";
 
   protected final SimpleDateFormat dateFormat =
 new SimpleDateFormat("-MM-dd HH:mm");
@@ -104,6 +108,7 @@ class Ls extends FsCommand {
   private boolean orderTime;
   private boolean orderSize;
   private boolean useAtime;
+  private boolean displayECPolicy;
   private Comparator orderComparator;
 
   protected boolean humanReadable = false;
@@ -129,7 +134,7 @@ class Ls extends FsCommand {
 CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
 OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
 OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
-OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
+OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
 cf.parse(args);
 pathOnly = cf.getOpt(OPTION_PATHONLY);
 dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
@@ -140,6 +145,7 @@ class Ls extends FsCommand {
 orderTime = cf.getOpt(OPTION_MTIME);
 orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
 useAtime = cf.getOpt(OPTION_ATIME);
+displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
 if (args.isEmpty()) args.add(Path.CUR_DIR);
 
 initialiseOrderComparator();
@@ -245,25 +251,42 @@ class Ls extends FsCommand {
   return;
 }
 FileStatus stat = item.stat;
-String line = String.format(lineFormat,
-(stat.isDirectory() ? "d" : "-"),
-  

hadoop git commit: HDFS-12033. DatanodeManager picking EC recovery tasks should also consider the number of regular replication tasks. Contributed by Lei (Eddy) Xu.

2017-06-26 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk a9d3412b4 -> 144753e87


HDFS-12033. DatanodeManager picking EC recovery tasks should also consider the 
number of regular replication tasks. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/144753e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/144753e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/144753e8

Branch: refs/heads/trunk
Commit: 144753e87f4a9daa51200be05ff2bb760bf38169
Parents: a9d3412
Author: Lei Xu <l...@apache.org>
Authored: Mon Jun 26 15:43:50 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Jun 26 15:43:50 2017 -0700

--
 .../server/blockmanagement/DatanodeManager.java |  1 +
 .../blockmanagement/TestDatanodeManager.java| 51 
 2 files changed, 52 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/144753e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index a786c6a..1d09751 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1661,6 +1661,7 @@ public class DatanodeManager {
 if (pendingList != null) {
   cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
   pendingList));
+  maxTransfers -= pendingList.size();
 }
 // check pending erasure coding tasks
 List pendingECList = nodeinfo

http://git-wip-us.apache.org/repos/asf/hadoop/blob/144753e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
index 30e2aaf..de002f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
@@ -44,13 +44,21 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand;
+import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
+import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
@@ -491,4 +499,47 @@ public class TestDatanodeManager {
 Assert.assertEquals("Unexpected host or host in unexpected position",
 "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
   }
+
+  @Test
+  public void testPendingRecoveryTasks() throws IOException {
+FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+Configuration conf = new Configuration();
+DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));
+
+int maxTransfers = 20;
+int numPendingTasks = 7;
+int numECTasks = maxTransfers - numPendingTasks;
+
+DatanodeDescriptor nodeInfo = Mockito.mock(DatanodeD

hadoop git commit: HDFS-11647. Add -E option in hdfs "count" command to show erasure policy summarization. Contributed by luhuichun.

2017-06-20 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b654a493 -> 45ff4d38e


HDFS-11647. Add -E option in hdfs "count" command to show erasure policy 
summarization. Contributed by luhuichun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45ff4d38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45ff4d38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45ff4d38

Branch: refs/heads/trunk
Commit: 45ff4d38e6175bc59b126633fc46927f8af9b641
Parents: 2b654a4
Author: Lei Xu <l...@apache.org>
Authored: Tue Jun 20 11:55:09 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Tue Jun 20 11:55:09 2017 -0700

--
 .../org/apache/hadoop/fs/ContentSummary.java| 16 ++-
 .../java/org/apache/hadoop/fs/shell/Count.java  | 41 +
 .../src/site/markdown/FileSystemShell.md|  9 +++-
 .../java/org/apache/hadoop/cli/TestCLI.java |  2 +-
 .../org/apache/hadoop/fs/shell/TestCount.java   |  5 +-
 .../src/test/resources/testConf.xml |  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  6 ++-
 .../src/main/proto/hdfs.proto   |  1 +
 .../ContentSummaryComputationContext.java   | 48 
 .../hadoop/hdfs/server/namenode/INode.java  |  1 +
 .../test/resources/testErasureCodingConf.xml| 41 +
 11 files changed, 155 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45ff4d38/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 3e75951..cdbd10f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -39,6 +39,7 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   private long snapshotFileCount;
   private long snapshotDirectoryCount;
   private long snapshotSpaceConsumed;
+  private String erasureCodingPolicy;
 
   /** We don't use generics. Instead override spaceConsumed and other methods
   in order to keep backward compatibility. */
@@ -81,6 +82,11 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   return this;
 }
 
+public Builder erasureCodingPolicy(String ecPolicy) {
+  this.erasureCodingPolicy = ecPolicy;
+  return this;
+}
+
 @Override
 public Builder quota(long quota){
   super.quota(quota);
@@ -136,6 +142,7 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 private long snapshotFileCount;
 private long snapshotDirectoryCount;
 private long snapshotSpaceConsumed;
+private String erasureCodingPolicy;
   }
 
   /** Constructor deprecated by ContentSummary.Builder*/
@@ -175,6 +182,7 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 this.snapshotFileCount = builder.snapshotFileCount;
 this.snapshotDirectoryCount = builder.snapshotDirectoryCount;
 this.snapshotSpaceConsumed = builder.snapshotSpaceConsumed;
+this.erasureCodingPolicy = builder.erasureCodingPolicy;
   }
 
   /** @return the length */
@@ -202,6 +210,10 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
 return snapshotSpaceConsumed;
   }
 
+  public String getErasureCodingPolicy() {
+return erasureCodingPolicy;
+  }
+
   @Override
   @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {
@@ -237,6 +249,7 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   getSnapshotFileCount() == right.getSnapshotFileCount() &&
   getSnapshotDirectoryCount() == right.getSnapshotDirectoryCount() &&
   getSnapshotSpaceConsumed() == right.getSnapshotSpaceConsumed() &&
+  getErasureCodingPolicy().equals(right.getErasureCodingPolicy()) &&
   super.equals(to);
 } else {
   return super.equals(to);
@@ -247,7 +260,8 @@ public class ContentSummary extends QuotaUsage implements 
Writable{
   public int hashCode() {
 long result = getLength() ^ getFileCount() ^ getDirectoryCount()
 ^ getSnapshotLength() ^ getSnapshotFileCount()
-^ getSnapshotDirectoryCount() ^ getSnapshotSpaceConsumed();
+^ getSnapshotDirectoryCount() ^ getSnapshotSpaceConsumed()
+^ getErasureCodingPolicy().hashCode();
 return ((int) result) ^ super.hashCode();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/b

hadoop git commit: HDFS-11916. Extend TestErasureCodingPolicies/TestErasureCodingPolicyWithSnapshot with a random EC policy. Contributed by Takanobu Asanuma.

2017-06-19 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee89ac84e -> 73fb75017


HDFS-11916. Extend 
TestErasureCodingPolicies/TestErasureCodingPolicyWithSnapshot with a random EC 
policy. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73fb7501
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73fb7501
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73fb7501

Branch: refs/heads/trunk
Commit: 73fb75017e238e72c3162914f0db66e50139e199
Parents: ee89ac8
Author: Lei Xu <l...@apache.org>
Authored: Mon Jun 19 10:25:20 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Mon Jun 19 10:25:20 2017 -0700

--
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 60 ++--
 ...ErasureCodingPoliciesWithRandomECPolicy.java | 48 
 .../TestErasureCodingPolicyWithSnapshot.java| 50 
 ...ingPolicyWithSnapshotWithRandomECPolicy.java | 49 
 4 files changed, 155 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73fb7501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
index 4a4bed5..f90a2f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
@@ -61,15 +61,19 @@ public class TestErasureCodingPolicies {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private static final int BLOCK_SIZE = 1024;
-  private static final ErasureCodingPolicy EC_POLICY =
-  StripedFileTestUtil.getDefaultECPolicy();
+  private ErasureCodingPolicy ecPolicy;
   private FSNamesystem namesystem;
 
+  public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Rule
   public Timeout timeout = new Timeout(60 * 1000);
 
   @Before
   public void setupCluster() throws IOException {
+ecPolicy = getEcPolicy();
 conf = new HdfsConfiguration();
 conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 DFSTestUtil.enableAllECPolicies(conf);
@@ -100,8 +104,7 @@ public class TestErasureCodingPolicies {
 DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
 
 // set ec policy on dir
-fs.setErasureCodingPolicy(dir,
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.setErasureCodingPolicy(dir, ecPolicy.getName());
 // create a file which should be using ec
 final Path ecSubDir = new Path(dir, "ecSubDir");
 final Path ecFile = new Path(ecSubDir, "ecFile");
@@ -153,7 +156,7 @@ public class TestErasureCodingPolicies {
 fs.mkdir(testDir, FsPermission.getDirDefault());
 
 /* Normal creation of an erasure coding directory */
-fs.setErasureCodingPolicy(testDir, EC_POLICY.getName());
+fs.setErasureCodingPolicy(testDir, ecPolicy.getName());
 
 /* Verify files under the directory are striped */
 final Path ECFilePath = new Path(testDir, "foo");
@@ -169,7 +172,7 @@ public class TestErasureCodingPolicies {
 fs.mkdir(notEmpty, FsPermission.getDirDefault());
 final Path oldFile = new Path(notEmpty, "old");
 fs.create(oldFile);
-fs.setErasureCodingPolicy(notEmpty, EC_POLICY.getName());
+fs.setErasureCodingPolicy(notEmpty, ecPolicy.getName());
 final Path newFile = new Path(notEmpty, "new");
 fs.create(newFile);
 INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
@@ -181,10 +184,10 @@ public class TestErasureCodingPolicies {
 final Path dir1 = new Path("/dir1");
 final Path dir2 = new Path(dir1, "dir2");
 fs.mkdir(dir1, FsPermission.getDirDefault());
-fs.setErasureCodingPolicy(dir1, EC_POLICY.getName());
+fs.setErasureCodingPolicy(dir1, ecPolicy.getName());
 fs.mkdir(dir2, FsPermission.getDirDefault());
 try {
-  fs.setErasureCodingPolicy(dir2, EC_POLICY.getName());
+  fs.setErasureCodingPolicy(dir2, ecPolicy.getName());
 } catch (IOException e) {
   fail("Nested erasure coding policies are supported");
 }
@@ -193,7 +196,7 @@ public class TestErasureCodingPolicies {
 final Path fPath = new Path("/file");
 fs.create(fPath);
 try {
-  fs.setErasureCodingPolicy(fPath, EC_POLICY.getName());
+  fs.setErasureCodingPol

hadoop git commit: HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. Contributed by Lei (Eddy) Xu.

2017-06-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9529513f1 -> 33afa1fdc


HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. 
Contributed by Lei (Eddy) Xu.

(cherry picked from commit 6460df21a09a7fcc29eceb8dc3859d6298da6882)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33afa1fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33afa1fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33afa1fd

Branch: refs/heads/branch-2
Commit: 33afa1fdca8dfafc0214504626650fb25aec0b95
Parents: 9529513
Author: Lei Xu <l...@apache.org>
Authored: Fri Jun 16 17:24:00 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Jun 16 17:32:29 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 10 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 39 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 43 +++-
 3 files changed, 81 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33afa1fd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index e1329f4..38e53a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4163,4 +4163,14 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 return new FileSystemDataOutputStreamBuilder(this, path)
 .create().overwrite(true);
   }
+
+  /**
+   * Create a Builder to append a file.
+   * @param path file path.
+   * @return a {@link FSDataOutputStreamBuilder} to build file append request.
+   */
+  @InterfaceAudience.Private
+  protected FSDataOutputStreamBuilder appendFile(Path path) {
+return new FileSystemDataOutputStreamBuilder(this, path).append();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33afa1fd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 3e7c899..9bccf77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -2652,7 +2653,7 @@ public class DistributedFileSystem extends FileSystem {
*/
   public static final class HdfsDataOutputStreamBuilder
   extends FSDataOutputStreamBuilder<
-  HdfsDataOutputStream, HdfsDataOutputStreamBuilder> {
+  FSDataOutputStream, HdfsDataOutputStreamBuilder> {
 private final DistributedFileSystem dfs;
 private InetSocketAddress[] favoredNodes = null;
 
@@ -2739,16 +2740,23 @@ public class DistributedFileSystem extends FileSystem {
  * @throws IOException on I/O errors.
  */
 @Override
-public HdfsDataOutputStream build() throws IOException {
-  if (isRecursive()) {
-return dfs.create(getPath(), getPermission(), getFlags(),
-getBufferSize(), getReplication(), getBlockSize(),
-getProgress(), getChecksumOpt(), getFavoredNodes());
-  } else {
-return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
-getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-getChecksumOpt(), getFavoredNodes());
+public FSDataOutputStream build() throws IOException {
+  if (getFlags().contains(CreateFlag.CREATE)) {
+if (isRecursive()) {
+  return dfs.create(getPath(), getPermission(), getFlags(),
+  getBufferSize(), getReplication(), getBlockSize(),
+  getProgress(), getChecksumOpt(), getFavoredNodes());
+} else {
+  return dfs.createNonRecursive(getPath(), g

hadoop git commit: HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. Contributed by Lei (Eddy) Xu.

2017-06-16 Thread lei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 82bbcbf37 -> 6460df21a


HADOOP-14395. Provide Builder pattern for DistributedFileSystem.append. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6460df21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6460df21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6460df21

Branch: refs/heads/trunk
Commit: 6460df21a09a7fcc29eceb8dc3859d6298da6882
Parents: 82bbcbf
Author: Lei Xu <l...@apache.org>
Authored: Fri Jun 16 17:24:00 2017 -0700
Committer: Lei Xu <l...@apache.org>
Committed: Fri Jun 16 17:24:00 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileSystem.java   | 10 +
 .../hadoop/hdfs/DistributedFileSystem.java  | 41 ++-
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 43 +++-
 3 files changed, 82 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6460df21/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index cc92f31..d7cd7dd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -4179,4 +4179,14 @@ public abstract class FileSystem extends Configured 
implements Closeable {
 return new FileSystemDataOutputStreamBuilder(this, path)
 .create().overwrite(true);
   }
+
+  /**
+   * Create a Builder to append a file.
+   * @param path file path.
+   * @return a {@link FSDataOutputStreamBuilder} to build file append request.
+   */
+  @InterfaceAudience.Private
+  protected FSDataOutputStreamBuilder appendFile(Path path) {
+return new FileSystemDataOutputStreamBuilder(this, path).append();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6460df21/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 1fd8f79..1a9ae48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -2734,7 +2735,7 @@ public class DistributedFileSystem extends FileSystem {
*/
   public static final class HdfsDataOutputStreamBuilder
   extends FSDataOutputStreamBuilder<
-  HdfsDataOutputStream, HdfsDataOutputStreamBuilder> {
+  FSDataOutputStream, HdfsDataOutputStreamBuilder> {
 private final DistributedFileSystem dfs;
 private InetSocketAddress[] favoredNodes = null;
 private String ecPolicyName = null;
@@ -2857,17 +2858,24 @@ public class DistributedFileSystem extends FileSystem {
  * @throws IOException on I/O errors.
  */
 @Override
-public HdfsDataOutputStream build() throws IOException {
-  if (isRecursive()) {
-return dfs.create(getPath(), getPermission(), getFlags(),
-getBufferSize(), getReplication(), getBlockSize(),
-getProgress(), getChecksumOpt(), getFavoredNodes(),
-getEcPolicyName());
-  } else {
-return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
-getBufferSize(), getReplication(), getBlockSize(), getProgress(),
-getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
+public FSDataOutputStream build() throws IOException {
+  if (getFlags().contains(CreateFlag.CREATE)) {
+if (isRecursive()) {
+  return dfs.create(getPath(), getPermission(), getFlags(),
+  getBufferSize(), getReplication(), getBlockSize(),
+  getProgress(), getChecksumOpt(), getFavoredNodes(),
+  getEcPolicyName());
+} else {
+  return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
+  getBufferSize(), getRep

  1   2   3   >