[1/2] hadoop git commit: HADOOP-12239. StorageException complaining no lease ID when updating FolderLastModifiedTime in WASB. Contributed by Duo Xu.

2015-07-22 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 930e344d7 - bb080b32d
  refs/heads/trunk 402532628 - efa97243e


HADOOP-12239. StorageException complaining  no lease ID when updating 
FolderLastModifiedTime in WASB. Contributed by Duo Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efa97243
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efa97243
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efa97243

Branch: refs/heads/trunk
Commit: efa97243ecb84b3b468e732897cd685e3869f480
Parents: 4025326
Author: cnauroth cnaur...@apache.org
Authored: Wed Jul 22 11:16:49 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Jul 22 11:16:49 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efa97243/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3d101d4..c0e5c92 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -995,6 +995,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12017. Hadoop archives command should use configurable replication
 factor when closing (Bibin A Chundatt via vinayakumarb)
 
+HADOOP-12239. StorageException complaining  no lease ID when updating
+FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/efa97243/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a567b33..bb9941b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1360,8 +1360,12 @@ public class NativeAzureFileSystem extends FileSystem {
   String parentKey = pathToKey(parentFolder);
   FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
   if (parentMetadata != null  parentMetadata.isDir() 
-  parentMetadata.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
-store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+parentMetadata.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
+if (parentFolderLease != null) {
+  store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+} else {
+  updateParentFolderLastModifiedTime(key);
+}
   } else {
 // Make sure that the parent folder exists.
 // Create it using inherited permissions from the first existing 
directory going up the path



[2/2] hadoop git commit: HADOOP-12239. StorageException complaining no lease ID when updating FolderLastModifiedTime in WASB. Contributed by Duo Xu.

2015-07-22 Thread cnauroth
HADOOP-12239. StorageException complaining  no lease ID when updating 
FolderLastModifiedTime in WASB. Contributed by Duo Xu.

(cherry picked from commit efa97243ecb84b3b468e732897cd685e3869f480)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb080b32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb080b32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb080b32

Branch: refs/heads/branch-2
Commit: bb080b32d46fe02544d36b91de794860dd7d55f3
Parents: 930e344
Author: cnauroth cnaur...@apache.org
Authored: Wed Jul 22 11:16:49 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Jul 22 11:17:00 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/fs/azure/NativeAzureFileSystem.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb080b32/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9140867..f5d9a84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -505,6 +505,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12017. Hadoop archives command should use configurable replication
 factor when closing (Bibin A Chundatt via vinayakumarb)
 
+HADOOP-12239. StorageException complaining  no lease ID when updating
+FolderLastModifiedTime in WASB. (Duo Xu via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb080b32/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index a567b33..bb9941b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -1360,8 +1360,12 @@ public class NativeAzureFileSystem extends FileSystem {
   String parentKey = pathToKey(parentFolder);
   FileMetadata parentMetadata = store.retrieveMetadata(parentKey);
   if (parentMetadata != null  parentMetadata.isDir() 
-  parentMetadata.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
-store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+parentMetadata.getBlobMaterialization() == 
BlobMaterialization.Explicit) {
+if (parentFolderLease != null) {
+  store.updateFolderLastModifiedTime(parentKey, parentFolderLease);
+} else {
+  updateParentFolderLastModifiedTime(key);
+}
   } else {
 // Make sure that the parent folder exists.
 // Create it using inherited permissions from the first existing 
directory going up the path



[2/9] hadoop git commit: HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.

2015-07-22 Thread aengineer
HDFS-8721. Add a metric for number of encryption zones. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb03768b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb03768b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb03768b

Branch: refs/heads/HDFS-7240
Commit: cb03768b1b2250b9b5a7944cf6ef918e8a974e20
Parents: 5137b38
Author: cnauroth cnaur...@apache.org
Authored: Tue Jul 21 13:55:58 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Jul 21 13:55:58 2015 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md| 1 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop/hdfs/server/namenode/EncryptionZoneManager.java| 7 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 6 ++
 .../hdfs/server/namenode/metrics/FSNamesystemMBean.java   | 5 +
 .../test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java | 6 ++
 .../hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java| 5 +
 7 files changed, 33 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index ca89745..2b23508 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -216,6 +216,7 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `TotalLoad` | Current number of connections |
 | `SnapshottableDirectories` | Current number of snapshottable directories |
 | `Snapshots` | Current number of snapshots |
+| `NumEncryptionZones` | Current number of encryption zones |
 | `BlocksTotal` | Current number of allocated blocks in the system |
 | `FilesTotal` | Current number of files and directories |
 | `PendingReplicationBlocks` | Current number of blocks pending to be 
replicated |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a29a090..7c771b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -734,6 +734,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7483. Display information per tier on the Namenode UI.
 (Benoy Antony and wheat9 via wheat9)
 
+HDFS-8721. Add a metric for number of encryption zones.
+(Rakesh R via cnauroth)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 3fe748d..7c3c895 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -360,4 +360,11 @@ public class EncryptionZoneManager {
 final boolean hasMore = (numResponses  tailMap.size());
 return new BatchedListEntriesEncryptionZone(zones, hasMore);
   }
+
+  /**
+   * @return number of encryption zones.
+   */
+  public int getNumEncryptionZones() {
+return encryptionZones.size();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb03768b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7c6d6a1..fd37fbe 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4075,6 +4075,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 

[6/9] hadoop git commit: HADOOP-12017. Hadoop archives command should use configurable replication factor when closing (Contributed by Bibin A Chundatt)

2015-07-22 Thread aengineer
HADOOP-12017. Hadoop archives command should use configurable replication 
factor when closing (Contributed by Bibin A Chundatt)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94c6a4aa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94c6a4aa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94c6a4aa

Branch: refs/heads/HDFS-7240
Commit: 94c6a4aa85e7d98e9b532b330f30783315f4334b
Parents: 31f1171
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Jul 22 10:25:49 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Jul 22 10:25:49 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../org/apache/hadoop/tools/HadoopArchives.java | 21 ++--
 .../src/site/markdown/HadoopArchives.md.vm  |  2 +-
 .../apache/hadoop/tools/TestHadoopArchives.java | 26 
 4 files changed, 33 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5b51bce..3d101d4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -992,6 +992,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
 over getMessage() in logging/span events. (Varun Saxena via stevel)
 
+HADOOP-12017. Hadoop archives command should use configurable replication
+factor when closing (Bibin A Chundatt via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c6a4aa/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index 330830b..ee14850 100644
--- 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -100,15 +100,17 @@ public class HadoopArchives implements Tool {
   static final String SRC_PARENT_LABEL = NAME + .parent.path;
   /** the size of the blocks that will be created when archiving **/
   static final String HAR_BLOCKSIZE_LABEL = NAME + .block.size;
-  /**the size of the part files that will be created when archiving **/
+  /** the replication factor for the file in archiving. **/
+  static final String HAR_REPLICATION_LABEL = NAME + .replication.factor;
+  /** the size of the part files that will be created when archiving **/
   static final String HAR_PARTSIZE_LABEL = NAME + .partfile.size;
 
   /** size of each part file size **/
   long partSize = 2 * 1024 * 1024 * 1024l;
   /** size of blocks in hadoop archives **/
   long blockSize = 512 * 1024 * 1024l;
-  /** the desired replication degree; default is 10 **/
-  short repl = 10;
+  /** the desired replication degree; default is 3 **/
+  short repl = 3;
 
   private static final String usage = archive
   +  -archiveName NAME.har -p parent path [-r replication factor] +
@@ -475,6 +477,7 @@ public class HadoopArchives implements Tool {
 conf.setLong(HAR_PARTSIZE_LABEL, partSize);
 conf.set(DST_HAR_LABEL, archiveName);
 conf.set(SRC_PARENT_LABEL, parentPath.makeQualified(fs).toString());
+conf.setInt(HAR_REPLICATION_LABEL, repl);
 Path outputPath = new Path(dest, archiveName);
 FileOutputFormat.setOutputPath(conf, outputPath);
 FileSystem outFs = outputPath.getFileSystem(conf);
@@ -549,8 +552,6 @@ public class HadoopArchives implements Tool {
 } finally {
   srcWriter.close();
 }
-//increase the replication of src files
-jobfs.setReplication(srcFiles, repl);
 conf.setInt(SRC_COUNT_LABEL, numFiles);
 conf.setLong(TOTAL_SIZE_LABEL, totalSize);
 int numMaps = (int)(totalSize/partSize);
@@ -587,6 +588,7 @@ public class HadoopArchives implements Tool {
 FileSystem destFs = null;
 byte[] buffer;
 int buf_size = 128 * 1024;
+private int replication = 3;
 long blockSize = 512 * 1024 * 1024l;
 
 // configure the mapper and create 
@@ -595,7 +597,7 @@ public class HadoopArchives implements Tool {
 // tmp files. 
 public void configure(JobConf conf) {
   this.conf = conf;
-
+  replication = conf.getInt(HAR_REPLICATION_LABEL, 3);
   // this is tightly tied to map reduce
   // since it does not expose an api 
   

hadoop git commit: HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh via Colin P. McCabe)

2015-07-22 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk efa97243e - 1b3bceb58


HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh 
via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b3bceb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b3bceb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b3bceb5

Branch: refs/heads/trunk
Commit: 1b3bceb58c8e536a75fa3f99cc3ceeaba91a07de
Parents: efa9724
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jul 22 11:11:38 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Jul 22 11:34:10 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java| 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c0e5c92..ff7d2ad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
 (vinayakumarb)
 
+HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin
+Walsh via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 688b955..77a40ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -67,9 +67,6 @@ public class NativeIO {
 public static final int O_APPEND   = 02000;
 public static final int O_NONBLOCK = 04000;
 public static final int O_SYNC   =  01;
-public static final int O_ASYNC  =  02;
-public static final int O_FSYNC = O_SYNC;
-public static final int O_NDELAY = O_NONBLOCK;
 
 // Flags for posix_fadvise() from bits/fcntl.h
 /* No further special treatment.  */
@@ -356,7 +353,6 @@ public class NativeIO {
   public static final int   S_IFREG  = 010;  /* regular */
   public static final int   S_IFLNK  = 012;  /* symbolic link */
   public static final int   S_IFSOCK = 014;  /* socket */
-  public static final int   S_IFWHT  = 016;  /* whiteout */
   public static final int S_ISUID = 0004000;  /* set user id on execution 
*/
   public static final int S_ISGID = 0002000;  /* set group id on execution 
*/
   public static final int S_ISVTX = 0001000;  /* save swapped text even 
after use */



[7/9] hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread aengineer
HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40253262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40253262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40253262

Branch: refs/heads/HDFS-7240
Commit: 4025326288c0167ff300d4f7ecc96f84ed141912
Parents: 94c6a4a
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:16:50 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:16:50 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50803de..66cb89e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index a465f85..c486095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



[9/9] hadoop git commit: HDFS-8753. Ozone: Unify StorageContainerConfiguration with ozone-default.xml ozone-site.xml. Contributed by kanaka kumar avvaru

2015-07-22 Thread aengineer
HDFS-8753. Ozone: Unify StorageContainerConfiguration with ozone-default.xml  
ozone-site.xml. Contributed by kanaka kumar avvaru


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43bed72d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43bed72d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43bed72d

Branch: refs/heads/HDFS-7240
Commit: 43bed72d1355d44f6e910ba7ab9858dfde6edc4f
Parents: 01094cbf
Author: Anu Engineer anu.engin...@gmail.com
Authored: Wed Jul 22 10:31:43 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Wed Jul 22 10:31:43 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  2 ++
 .../apache/hadoop/ozone/OzoneConfiguration.java | 36 
 .../ozone/StorageContainerConfiguration.java| 35 ---
 .../web/localstorage/OzoneMetadataManager.java  |  4 +--
 .../StorageContainerConfiguration.java  | 32 -
 .../StorageContainerManager.java|  5 +--
 .../src/main/resources/ozone-default.xml| 27 +++
 .../src/test/resources/ozone-site.xml   | 24 +
 8 files changed, 94 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index db38851..5ec8a24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -307,6 +307,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
 configuration
   tasks
 copy file=src/main/resources/hdfs-default.xml 
todir=src/site/resources/
+copy file=src/main/resources/ozone-default.xml 
todir=src/site/resources/
 copy file=src/main/xsl/configuration.xsl 
todir=src/site/resources/
   /tasks
 /configuration
@@ -401,6 +402,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includes
 includeconfiguration.xsl/include
 includehdfs-default.xml/include
+includeozone-default.xml/include
   /includes
   followSymlinksfalse/followSymlinks
 /fileset

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
new file mode 100644
index 000..70efa49
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfiguration.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Configuration for ozone.
+ */
+@InterfaceAudience.Private
+public class OzoneConfiguration extends Configuration {
+  static {
+// adds the default resources
+Configuration.addDefaultResource(hdfs-default.xml);
+Configuration.addDefaultResource(hdfs-site.xml);
+Configuration.addDefaultResource(ozone-default.xml);
+Configuration.addDefaultResource(ozone-site.xml);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43bed72d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/StorageContainerConfiguration.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/StorageContainerConfiguration.java
 

[8/9] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2015-07-22 Thread aengineer
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01094cbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01094cbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01094cbf

Branch: refs/heads/HDFS-7240
Commit: 01094cbf0a44af89fd0a2e91812c9c0756de6934
Parents: 12bd963 4025326
Author: Anu Engineer anu.engin...@gmail.com
Authored: Wed Jul 22 10:28:28 2015 -0700
Committer: Anu Engineer anu.engin...@gmail.com
Committed: Wed Jul 22 10:28:28 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../hadoop-common/src/site/markdown/Metrics.md  |   6 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  14 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   3 -
 .../BlockInfoUnderConstruction.java |  19 +-
 .../server/blockmanagement/BlockManager.java|  14 +-
 .../blockmanagement/InvalidateBlocks.java   |   5 +-
 .../server/namenode/EncryptionZoneManager.java  |   7 +
 .../hdfs/server/namenode/FSDirAppendOp.java | 261 +++
 .../server/namenode/FSDirStatAndListingOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |   6 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 257 +++---
 .../namenode/metrics/FSNamesystemMBean.java |   5 +
 .../src/main/resources/hdfs-default.xml |   9 -
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   6 +
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   |  78 --
 .../server/namenode/TestFSNamesystemMBean.java  |   5 +
 .../org/apache/hadoop/tools/HadoopArchives.java |  21 +-
 .../src/site/markdown/HadoopArchives.md.vm  |   2 +-
 .../apache/hadoop/tools/TestHadoopArchives.java |  26 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/event/AsyncDispatcher.java  |   8 +
 .../hadoop/yarn/event/DrainDispatcher.java  |  11 +-
 .../hadoop/yarn/event/TestAsyncDispatcher.java  |  62 +
 26 files changed, 470 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01094cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--



[1/9] hadoop git commit: Revert HDFS-8344. NameNode doesn't recover lease for files with missing blocks (raviprak)

2015-07-22 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 12bd96395 - 43bed72d1


Revert HDFS-8344. NameNode doesn't recover lease for files with missing blocks 
(raviprak)

This reverts commit e4f756260f16156179ba4adad974ec92279c2fac.

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5137b388
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5137b388
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5137b388

Branch: refs/heads/HDFS-7240
Commit: 5137b388fc9d4d716f780daf6d04292feeb9df96
Parents: 68d1f4b
Author: Ravi Prakash ravip...@altiscale.com
Authored: Tue Jul 21 11:29:35 2015 -0700
Committer: Ravi Prakash ravip...@altiscale.com
Committed: Tue Jul 21 11:29:35 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  3 -
 .../BlockInfoUnderConstruction.java | 19 +
 .../server/blockmanagement/BlockManager.java| 14 +---
 .../hdfs/server/namenode/FSNamesystem.java  | 10 ---
 .../src/main/resources/hdfs-default.xml |  9 ---
 .../apache/hadoop/hdfs/TestLeaseRecovery.java   | 78 
 7 files changed, 4 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 223baaf..a29a090 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1056,9 +1056,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-8778. TestBlockReportRateLimiting#testLeaseExpiration can deadlock.
 (Arpit Agarwal)
 
-HDFS-8344. NameNode doesn't recover lease for files with missing blocks
-(raviprak)
-
 HDFS-7582. Enforce maximum number of ACL entries separately per access
 and default. (vinayakumarb)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 210d1e5..0e569f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -440,9 +440,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final longDFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT = 10 * 
1000;
   public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = 
dfs.block.invalidate.limit;
   public static final int DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 1000;
-  public static final String  DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS = 
dfs.block.uc.max.recovery.attempts;
-  public static final int DFS_BLOCK_UC_MAX_RECOVERY_ATTEMPTS_DEFAULT = 5;
-
   public static final String  DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY = 
dfs.corruptfilesreturned.max;
   public static final int DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED = 500;
   /* Maximum number of blocks to process for initializing replication queues */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5137b388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 28f1633..9cd3987 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 
@@ -60,11 +61,6 @@ public abstract class BlockInfoUnderConstruction extends 
BlockInfo {
*/
   protected Block truncateBlock;
 
-  /** The number of times all replicas will be used to attempt recovery before
-   * giving up and marking the block under construction missing.
-   */
-  private int recoveryAttemptsBeforeMarkingBlockMissing;
-
   /**
* 

[3/9] hadoop git commit: HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page. Contributed by Rakesh R.

2015-07-22 Thread aengineer
HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a26cc66f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a26cc66f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a26cc66f

Branch: refs/heads/HDFS-7240
Commit: a26cc66f38daec2342215a66b599bf59cee1112c
Parents: cb03768
Author: cnauroth cnaur...@apache.org
Authored: Tue Jul 21 14:12:03 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Jul 21 14:12:03 2015 -0700

--
 .../hadoop-common/src/site/markdown/Metrics.md  | 5 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 2b23508..646cda5 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -231,6 +231,11 @@ Each metrics record contains tags such as HAState and 
Hostname as additional inf
 | `BlockCapacity` | Current number of block capacity |
 | `StaleDataNodes` | Current number of DataNodes marked stale due to delayed 
heartbeat |
 | `TotalFiles` | Current number of files and directories (same as FilesTotal) |
+| `MissingReplOneBlocks` | Current number of missing blocks with replication 
factor 1 |
+| `NumFilesUnderConstruction` | Current number of files under construction |
+| `NumActiveClients` | Current number of active clients holding lease |
+| `HAState` | (HA-only) Current state of the NameNode: initializing or active 
or standby or stopping state |
+| `FSState` | Current state of the file system: Safemode or Operational |
 
 JournalNode
 ---

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a26cc66f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7c771b0..8122045 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1062,6 +1062,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7582. Enforce maximum number of ACL entries separately per access
 and default. (vinayakumarb)
 
+HDFS-8773. Few FSNamesystem metrics are not documented in the Metrics page.
+(Rakesh R via cnauroth)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh via Colin P. McCabe)

2015-07-22 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bb080b32d - d9876e69e


HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh 
via Colin P. McCabe)

(cherry picked from commit 1b3bceb58c8e536a75fa3f99cc3ceeaba91a07de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9876e69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9876e69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9876e69

Branch: refs/heads/branch-2
Commit: d9876e69ee4489219cd83b57703193d53a567220
Parents: bb080b3
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jul 22 11:11:38 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Jul 22 11:35:14 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java| 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9876e69/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f5d9a84..6320dc3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -208,6 +208,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
 (vinayakumarb)
 
+HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin
+Walsh via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9876e69/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 688b955..77a40ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -67,9 +67,6 @@ public class NativeIO {
 public static final int O_APPEND   = 02000;
 public static final int O_NONBLOCK = 04000;
 public static final int O_SYNC   =  01;
-public static final int O_ASYNC  =  02;
-public static final int O_FSYNC = O_SYNC;
-public static final int O_NDELAY = O_NONBLOCK;
 
 // Flags for posix_fadvise() from bits/fcntl.h
 /* No further special treatment.  */
@@ -356,7 +353,6 @@ public class NativeIO {
   public static final int   S_IFREG  = 010;  /* regular */
   public static final int   S_IFLNK  = 012;  /* symbolic link */
   public static final int   S_IFSOCK = 014;  /* socket */
-  public static final int   S_IFWHT  = 016;  /* whiteout */
   public static final int S_ISUID = 0004000;  /* set user id on execution 
*/
   public static final int S_ISGID = 0002000;  /* set group id on execution 
*/
   public static final int S_ISVTX = 0001000;  /* save swapped text even 
after use */



hadoop git commit: YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)

2015-07-22 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3576b0ac5 - 3800e25d6


YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via 
wangda)

(cherry picked from commit 06e5dd2c84c49460884757b56980b1b9c58af996)

Conflicts:
hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3800e25d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3800e25d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3800e25d

Branch: refs/heads/branch-2
Commit: 3800e25d6e491a0059054166cb78e0455ecc20f7
Parents: 3576b0a
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:59:31 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 12:03:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 ++
 .../server/nodemanager/util/TestNodeManagerHardwareUtils.java   | 5 +
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3800e25d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8b36292..3f093bd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -604,6 +604,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
 should based on total-used-resources. (Bibin A Chundatt via wangda)
 
+YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev 
via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3800e25d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
index 5bf8cb7..84a045d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -30,6 +30,11 @@ import org.mockito.Mockito;
 public class TestNodeManagerHardwareUtils {
 
   static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+TestResourceCalculatorPlugin() {
+  super(null);
+}
+
 @Override
 public long getVirtualMemorySize() {
   return 0;



hadoop git commit: YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo should based on total-used-resources. (Bibin A Chundatt via wangda)

2015-07-22 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1b3bceb58 - 76ec26de8


YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
should based on total-used-resources. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76ec26de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76ec26de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76ec26de

Branch: refs/heads/trunk
Commit: 76ec26de8099dc48ce3812c595b7ab857a600442
Parents: 1b3bceb
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:54:02 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 11:54:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../scheduler/capacity/LeafQueue.java   |  8 ++-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 64 
 4 files changed, 74 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5100cdf..f751862 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
+YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
+should based on total-used-resources. (Bibin A Chundatt via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index cf543bd..317e61c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -598,7 +598,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 AggregateAppResourceUsage runningResourceUsage =
 getRunningAggregateAppResourceUsage();
 Resource usedResourceClone =
-Resources.clone(attemptResourceUsage.getUsed());
+Resources.clone(attemptResourceUsage.getAllUsed());
 Resource reservedResourceClone =
 Resources.clone(attemptResourceUsage.getReserved());
 return ApplicationResourceUsageReport.newInstance(liveContainers.size(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 0ce4d68..5c283f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -439,7 +439,7 @@ public class LeafQueue extends AbstractCSQueue {
 for (Map.EntryString, User entry : users.entrySet()) {
   User user = entry.getValue();
   usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(user
-  .getUsed()), user.getActiveApplications(), user
+  

hadoop git commit: YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo should based on total-used-resources. (Bibin A Chundatt via wangda)

2015-07-22 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d9876e69e - 3576b0ac5


YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
should based on total-used-resources. (Bibin A Chundatt via wangda)

(cherry picked from commit 76ec26de8099dc48ce3812c595b7ab857a600442)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3576b0ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3576b0ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3576b0ac

Branch: refs/heads/branch-2
Commit: 3576b0ac5e5aa2edb7a4d0948ac8929a7cab5c26
Parents: d9876e6
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:54:02 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 11:54:38 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../scheduler/capacity/LeafQueue.java   |  8 ++-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 64 
 4 files changed, 74 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3576b0ac/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 571c113..8b36292 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -601,6 +601,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
+YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
+should based on total-used-resources. (Bibin A Chundatt via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3576b0ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index cf543bd..317e61c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -598,7 +598,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 AggregateAppResourceUsage runningResourceUsage =
 getRunningAggregateAppResourceUsage();
 Resource usedResourceClone =
-Resources.clone(attemptResourceUsage.getUsed());
+Resources.clone(attemptResourceUsage.getAllUsed());
 Resource reservedResourceClone =
 Resources.clone(attemptResourceUsage.getReserved());
 return ApplicationResourceUsageReport.newInstance(liveContainers.size(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3576b0ac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 0ce4d68..5c283f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -439,7 +439,7 @@ public class LeafQueue extends AbstractCSQueue {
 for (Map.EntryString, User entry : users.entrySet()) {
   User user = entry.getValue();
   usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(user
- 

hadoop git commit: HADOOP-12207. Add support for pylint (Kengo Seki via aw)

2015-07-22 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-12111 27a2328c5 - b8750c685


HADOOP-12207. Add support for pylint (Kengo Seki via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8750c68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8750c68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8750c68

Branch: refs/heads/HADOOP-12111
Commit: b8750c6854fae945798eb8530b1804669d863644
Parents: 27a2328
Author: Allen Wittenauer a...@apache.org
Authored: Wed Jul 22 12:55:10 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Wed Jul 22 12:55:10 2015 -0700

--
 dev-support/test-patch.d/pylint.sh | 186 
 1 file changed, 186 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8750c68/dev-support/test-patch.d/pylint.sh
--
diff --git a/dev-support/test-patch.d/pylint.sh 
b/dev-support/test-patch.d/pylint.sh
new file mode 100755
index 000..8542dad
--- /dev/null
+++ b/dev-support/test-patch.d/pylint.sh
@@ -0,0 +1,186 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the License); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_plugin pylint
+
+PYLINT_TIMER=0
+
+PYLINT=${PYLINT:-$(which pylint 2/dev/null)}
+
+function pylint_usage
+{
+  echo Pylint specific:
+  echo --pylint=path path to pylint executable
+}
+
+function pylint_parse_args
+{
+  local i
+
+  for i in $@; do
+case ${i} in
+--pylint=*)
+  PYLINT=${i#*=}
+;;
+esac
+  done
+}
+
+function pylint_filefilter
+{
+  local filename=$1
+
+  if [[ ${filename} =~ \.py$ ]]; then
+add_test pylint
+  fi
+}
+
+function pylint_preapply
+{
+  local i
+
+  verify_needed_test pylint
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header pylint plugin: prepatch
+
+  if [[ ! -x ${PYLINT} ]]; then
+yetus_error ${PYLINT} does not exist.
+return 0
+  fi
+
+  start_clock
+
+  echo Running pylint against modified python scripts.
+  pushd ${BASEDIR} /dev/null
+  for i in ${CHANGED_FILES}; do
+if [[ ${i} =~ \.py$  -f ${i} ]]; then
+  ${PYLINT} --indent-string=   --output-format=parseable --reports=n 
${i} 2/dev/null |
+  ${AWK} '1NR'  ${PATCH_DIR}/branchpylint-result.txt
+fi
+  done
+  popd /dev/null
+  # keep track of how much as elapsed for us already
+  PYLINT_TIMER=$(stop_clock)
+  return 0
+}
+
+function pylint_calcdiffs
+{
+  local orig=$1
+  local new=$2
+  local diffout=$3
+  local tmp=${PATCH_DIR}/pl.$$.${RANDOM}
+  local count=0
+  local j
+
+  # first, pull out just the errors
+  # shellcheck disable=SC2016
+  ${AWK} -F: '{print $NF}' ${orig}  ${tmp}.branch
+
+  # shellcheck disable=SC2016
+  ${AWK} -F: '{print $NF}' ${new}  ${tmp}.patch
+
+  # compare the errors, generating a string of line
+  # numbers. Sorry portability: GNU diff makes this too easy
+  ${DIFF} --unchanged-line-format= \
+ --old-line-format= \
+ --new-line-format=%dn  \
+ ${tmp}.branch \
+ ${tmp}.patch  ${tmp}.lined
+
+  # now, pull out those lines of the raw output
+  # shellcheck disable=SC2013
+  for j in $(cat ${tmp}.lined); do
+# shellcheck disable=SC2086
+head -${j} ${new} | tail -1  ${diffout}
+  done
+
+  if [[ -f ${diffout} ]]; then
+# shellcheck disable=SC2016
+count=$(${AWK} -F: 'BEGIN {sum=0} 2NF {sum+=1} END {print sum}' 
${diffout})
+  fi
+  rm ${tmp}.branch ${tmp}.patch ${tmp}.lined 2/dev/null
+  echo ${count}
+}
+
+function pylint_postapply
+{
+  local i
+  local msg
+  local numPrepatch
+  local numPostpatch
+  local diffPostpatch
+
+  verify_needed_test pylint
+  if [[ $? == 0 ]]; then
+return 0
+  fi
+
+  big_console_header pylint plugin: postpatch
+
+  if [[ ! -x ${PYLINT} ]]; then
+yetus_error ${PYLINT} is not available.
+add_vote_table 0 pylint Pylint was not available.
+return 0
+  fi
+
+  start_clock
+
+  # add our previous elapsed to our new timer
+  # by setting the clock back
+  offset_clock ${PYLINT_TIMER}
+
+  echo Running pylint against modified python scripts.
+  # we re-check this in case one has 

hadoop git commit: YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)

2015-07-22 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8376ea329 - 06e5dd2c8


YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06e5dd2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06e5dd2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06e5dd2c

Branch: refs/heads/trunk
Commit: 06e5dd2c84c49460884757b56980b1b9c58af996
Parents: 8376ea3
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:59:31 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 12:01:41 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 ++
 .../server/nodemanager/util/TestNodeManagerHardwareUtils.java   | 5 +
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eb52745..a5fd4e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -659,6 +659,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
 (varun saxena via rohithsharmaks)
 
+YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev 
via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
index 5bf8cb7..84a045d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -30,6 +30,11 @@ import org.mockito.Mockito;
 public class TestNodeManagerHardwareUtils {
 
   static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+TestResourceCalculatorPlugin() {
+  super(null);
+}
+
 @Override
 public long getVirtualMemorySize() {
   return 0;



hadoop git commit: YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun saxena via rohithsharmaks)

2015-07-22 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3800e25d6 - be2334ba3


YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun 
saxena via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be2334ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be2334ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be2334ba

Branch: refs/heads/branch-2
Commit: be2334ba3ab64298de471930ec2242168574f733
Parents: 3800e25
Author: rohithsharmaks rohithsharm...@apache.org
Authored: Thu Jul 23 02:10:45 2015 +0530
Committer: rohithsharmaks rohithsharm...@apache.org
Committed: Thu Jul 23 02:10:45 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../src/main/resources/yarn-default.xml   | 10 ++
 2 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be2334ba/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3f093bd..be74800 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -604,6 +604,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
 should based on total-used-resources. (Bibin A Chundatt via wangda)
 
+YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
+(varun saxena via rohithsharmaks)
+
 YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev 
via wangda)
 
 Release 2.7.2 - UNRELEASED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/be2334ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2edeef0..d586f51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2131,4 +2131,14 @@
 valuefalse/value
   /property
 
+  property
+description
+Defines maximum application priority in a cluster.
+If an application is submitted with a priority higher than this value, it 
will be
+reset to this maximum value.
+/description
+nameyarn.cluster.max-application-priority/name
+value0/value
+  /property
+
 /configuration



hadoop git commit: YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun saxena via rohithsharmaks)

2015-07-22 Thread rohithsharmaks
Repository: hadoop
Updated Branches:
  refs/heads/trunk 76ec26de8 - 8376ea329


YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun 
saxena via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8376ea32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8376ea32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8376ea32

Branch: refs/heads/trunk
Commit: 8376ea3297a3eab33df27454b18cf215cfb7c6ff
Parents: 76ec26d
Author: rohithsharmaks rohithsharm...@apache.org
Authored: Thu Jul 23 00:28:24 2015 +0530
Committer: rohithsharmaks rohithsharm...@apache.org
Committed: Thu Jul 23 00:28:24 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../src/main/resources/yarn-default.xml   | 10 ++
 2 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f751862..eb52745 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
 should based on total-used-resources. (Bibin A Chundatt via wangda)
 
+YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
+(varun saxena via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2edeef0..d586f51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2131,4 +2131,14 @@
 valuefalse/value
   /property
 
+  property
+description
+Defines maximum application priority in a cluster.
+If an application is submitted with a priority higher than this value, it 
will be
+reset to this maximum value.
+/description
+nameyarn.cluster.max-application-priority/name
+value0/value
+  /property
+
 /configuration



hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 94c6a4aa8 - 402532628


HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40253262
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40253262
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40253262

Branch: refs/heads/trunk
Commit: 4025326288c0167ff300d4f7ecc96f84ed141912
Parents: 94c6a4a
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:16:50 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:16:50 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 50803de..66cb89e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -740,6 +740,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40253262/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index a465f85..c486095 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



hadoop git commit: HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)

2015-07-22 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 03d68b557 - 930e344d7


HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/930e344d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/930e344d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/930e344d

Branch: refs/heads/branch-2
Commit: 930e344d7885cc392eb3ee01e59b748f175fff21
Parents: 03d68b5
Author: yliu y...@apache.org
Authored: Wed Jul 22 15:15:08 2015 +0800
Committer: yliu y...@apache.org
Committed: Wed Jul 22 15:15:08 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java| 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/930e344d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ea7bdc4..1383ea9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -397,6 +397,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8495. Consolidate append() related implementation into a single class.
 (Rakesh R via wheat9)
 
+HDFS-8795. Improve InvalidateBlocks#node2blocks. (yliu)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/930e344d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
index e357528..c60266e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
@@ -22,9 +22,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Calendar;
 import java.util.GregorianCalendar;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -36,6 +36,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import org.slf4j.Logger;
 
 /**
@@ -47,7 +48,7 @@ import org.slf4j.Logger;
 class InvalidateBlocks {
   /** Mapping: DatanodeInfo - Collection of Blocks */
   private final MapDatanodeInfo, LightWeightHashSetBlock node2blocks =
-  new TreeMapDatanodeInfo, LightWeightHashSetBlock();
+  new HashMapDatanodeInfo, LightWeightHashSetBlock();
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
 



hadoop git commit: HDFS-8797. WebHdfsFileSystem creates too many connections for pread. Contributed by Jing Zhao.

2015-07-22 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 06e5dd2c8 - e91ccfad0


HDFS-8797. WebHdfsFileSystem creates too many connections for pread. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e91ccfad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e91ccfad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e91ccfad

Branch: refs/heads/trunk
Commit: e91ccfad07ec5b5674a84009772dd31a82b4e4de
Parents: 06e5dd2
Author: Jing Zhao ji...@apache.org
Authored: Wed Jul 22 17:42:31 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Jul 22 17:42:31 2015 -0700

--
 .../hadoop/hdfs/web/ByteRangeInputStream.java   | 57 +---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/web/TestByteRangeInputStream.java  | 35 ++--
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 41 ++
 4 files changed, 113 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91ccfad/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
index 395c9f6..bb581db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
@@ -65,6 +66,16 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 final boolean resolved) throws IOException;
   }
 
+  static class InputStreamAndFileLength {
+final Long length;
+final InputStream in;
+
+InputStreamAndFileLength(Long length, InputStream in) {
+  this.length = length;
+  this.in = in;
+}
+  }
+
   enum StreamStatus {
 NORMAL, SEEK, CLOSED
   }
@@ -101,7 +112,9 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 if (in != null) {
   in.close();
 }
-in = openInputStream();
+InputStreamAndFileLength fin = openInputStream(startPos);
+in = fin.in;
+fileLength = fin.length;
 status = StreamStatus.NORMAL;
 break;
   case CLOSED:
@@ -111,20 +124,22 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
   }
 
   @VisibleForTesting
-  protected InputStream openInputStream() throws IOException {
+  protected InputStreamAndFileLength openInputStream(long startOffset)
+  throws IOException {
 // Use the original url if no resolved url exists, eg. if
 // it's the first time a request is made.
 final boolean resolved = resolvedURL.getURL() != null;
 final URLOpener opener = resolved? resolvedURL: originalURL;
 
-final HttpURLConnection connection = opener.connect(startPos, resolved);
+final HttpURLConnection connection = opener.connect(startOffset, resolved);
 resolvedURL.setURL(getResolvedUrl(connection));
 
 InputStream in = connection.getInputStream();
+final Long length;
 final MapString, ListString headers = connection.getHeaderFields();
 if (isChunkedTransferEncoding(headers)) {
   // file length is not known
-  fileLength = null;
+  length = null;
 } else {
   // for non-chunked transfer-encoding, get content-length
   final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH);
@@ -133,14 +148,14 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 + headers);
   }
   final long streamlength = Long.parseLong(cl);
-  fileLength = startPos + streamlength;
+  length = startOffset + streamlength;
 
   // Java has a bug with 2GB request streams.  It won't bounds check
   // the reads so the transfer blocks until the server times out
   in = new BoundedInputStream(in, streamlength);
 }
 
-return in;
+return new InputStreamAndFileLength(length, in);
   }
 
   private static boolean isChunkedTransferEncoding(
@@ -204,6 +219,36 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 }
   }
 
+  @Override
+  public int read(long position, byte[] buffer, int offset, int length)
+  throws IOException {
+try (InputStream in = openInputStream(position).in) {
+  return in.read(buffer, offset, length);
+}
+  }
+
+  @Override
+  public 

hadoop git commit: YARN-2019. Retrospect on decision of making RM crashed if any exception throw in ZKRMStateStore. Contributed by Jian He.

2015-07-22 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/trunk e91ccfad0 - ee98d6354


YARN-2019. Retrospect on decision of making RM crashed if any exception throw 
in ZKRMStateStore. Contributed by Jian He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee98d635
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee98d635
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee98d635

Branch: refs/heads/trunk
Commit: ee98d6354bbbcd0832d3e539ee097f837e5d0e31
Parents: e91ccfa
Author: Junping Du junping...@apache.org
Authored: Wed Jul 22 17:52:35 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Wed Jul 22 17:52:35 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../apache/hadoop/yarn/conf/YarnConfiguration.java  | 11 +++
 .../src/main/resources/yarn-default.xml | 16 
 .../resourcemanager/recovery/RMStateStore.java  |  9 +++--
 4 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a5fd4e7..93962f1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2003. Support for Application priority : Changes in RM and Capacity 
 Scheduler. (Sunil G via wangda)
 
+YARN-2019. Retrospect on decision of making RM crashed if any exception 
throw 
+in ZKRMStateStore. (Jian He via junping_du)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 060635f..9832729 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -401,6 +401,11 @@ public class YarnConfiguration extends Configuration {
   public static final String RECOVERY_ENABLED = RM_PREFIX + recovery.enabled;
   public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;
 
+  public static final String YARN_FAIL_FAST = YARN_PREFIX + fail-fast;
+  public static final boolean DEFAULT_YARN_FAIL_FAST = true;
+
+  public static final String RM_FAIL_FAST = RM_PREFIX + fail-fast;
+
   @Private
   public static final String RM_WORK_PRESERVING_RECOVERY_ENABLED = RM_PREFIX
   + work-preserving-recovery.enabled;
@@ -2018,6 +2023,12 @@ public class YarnConfiguration extends Configuration {
 YARN_HTTP_POLICY_DEFAULT));
   }
 
+  public static boolean shouldRMFailFast(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.RM_FAIL_FAST,
+conf.getBoolean(YarnConfiguration.YARN_FAIL_FAST,
+YarnConfiguration.DEFAULT_YARN_FAIL_FAST));
+  }
+
   @Private
   public static String getClusterId(Configuration conf) {
 String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee98d635/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d586f51..8b3a3af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -324,6 +324,22 @@
   /property
 
   property
+descriptionShould RM fail fast if it encounters any errors. By defalt, it
+  points to ${yarn.fail-fast}. Errors include:
+  1) exceptions when state-store write/read operations fails.
+/description
+nameyarn.resourcemanager.fail-fast/name
+value${yarn.fail-fast}/value
+  /property
+
+  property
+descriptionShould YARN fail fast if it encounters any errors.
+/description
+nameyarn.fail-fast/name
+valuetrue/value
+  /property
+
+  property
 descriptionEnable RM work preserving recovery. This 

[3/6] hadoop git commit: YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo should based on total-used-resources. (Bibin A Chundatt via wangda)

2015-07-22 Thread arp
YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
should based on total-used-resources. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76ec26de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76ec26de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76ec26de

Branch: refs/heads/HDFS-7240
Commit: 76ec26de8099dc48ce3812c595b7ab857a600442
Parents: 1b3bceb
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:54:02 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 11:54:02 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../scheduler/capacity/LeafQueue.java   |  8 ++-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 64 
 4 files changed, 74 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5100cdf..f751862 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -653,6 +653,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3885. ProportionalCapacityPreemptionPolicy doesn't preempt if queue 
is 
 more than 2 level. (Ajith S via wangda)
 
+YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
+should based on total-used-resources. (Bibin A Chundatt via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index cf543bd..317e61c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -598,7 +598,7 @@ public class SchedulerApplicationAttempt implements 
SchedulableEntity {
 AggregateAppResourceUsage runningResourceUsage =
 getRunningAggregateAppResourceUsage();
 Resource usedResourceClone =
-Resources.clone(attemptResourceUsage.getUsed());
+Resources.clone(attemptResourceUsage.getAllUsed());
 Resource reservedResourceClone =
 Resources.clone(attemptResourceUsage.getReserved());
 return ApplicationResourceUsageReport.newInstance(liveContainers.size(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76ec26de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 0ce4d68..5c283f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -439,7 +439,7 @@ public class LeafQueue extends AbstractCSQueue {
 for (Map.EntryString, User entry : users.entrySet()) {
   User user = entry.getValue();
   usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(user
-  .getUsed()), user.getActiveApplications(), user
+  .getAllUsed()), user.getActiveApplications(), user
   

[4/6] hadoop git commit: YARN-3954. Fix TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun saxena via rohithsharmaks)

2015-07-22 Thread arp
YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml. (varun 
saxena via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8376ea32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8376ea32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8376ea32

Branch: refs/heads/HDFS-7240
Commit: 8376ea3297a3eab33df27454b18cf215cfb7c6ff
Parents: 76ec26d
Author: rohithsharmaks rohithsharm...@apache.org
Authored: Thu Jul 23 00:28:24 2015 +0530
Committer: rohithsharmaks rohithsharm...@apache.org
Committed: Thu Jul 23 00:28:24 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../src/main/resources/yarn-default.xml   | 10 ++
 2 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f751862..eb52745 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -656,6 +656,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3932. SchedulerApplicationAttempt#getResourceUsageReport and UserInfo 
 should based on total-used-resources. (Bibin A Chundatt via wangda)
 
+YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
+(varun saxena via rohithsharmaks)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8376ea32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 2edeef0..d586f51 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2131,4 +2131,14 @@
 valuefalse/value
   /property
 
+  property
+description
+Defines maximum application priority in a cluster.
+If an application is submitted with a priority higher than this value, it 
will be
+reset to this maximum value.
+/description
+nameyarn.cluster.max-application-priority/name
+value0/value
+  /property
+
 /configuration



[2/6] hadoop git commit: HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh via Colin P. McCabe)

2015-07-22 Thread arp
HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin Walsh 
via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b3bceb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b3bceb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b3bceb5

Branch: refs/heads/HDFS-7240
Commit: 1b3bceb58c8e536a75fa3f99cc3ceeaba91a07de
Parents: efa9724
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Jul 22 11:11:38 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Jul 22 11:34:10 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java| 4 
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c0e5c92..ff7d2ad 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12214. Parse 'HadoopArchive' commandline using cli Options.
 (vinayakumarb)
 
+HADOOP-12184. Remove unused Linux-specific constants in NativeIO (Martin
+Walsh via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b3bceb5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index 688b955..77a40ea 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -67,9 +67,6 @@ public class NativeIO {
 public static final int O_APPEND   = 02000;
 public static final int O_NONBLOCK = 04000;
 public static final int O_SYNC   =  01;
-public static final int O_ASYNC  =  02;
-public static final int O_FSYNC = O_SYNC;
-public static final int O_NDELAY = O_NONBLOCK;
 
 // Flags for posix_fadvise() from bits/fcntl.h
 /* No further special treatment.  */
@@ -356,7 +353,6 @@ public class NativeIO {
   public static final int   S_IFREG  = 010;  /* regular */
   public static final int   S_IFLNK  = 012;  /* symbolic link */
   public static final int   S_IFSOCK = 014;  /* socket */
-  public static final int   S_IFWHT  = 016;  /* whiteout */
   public static final int S_ISUID = 0004000;  /* set user id on execution 
*/
   public static final int S_ISGID = 0002000;  /* set group id on execution 
*/
   public static final int S_ISVTX = 0001000;  /* save swapped text even 
after use */



[6/6] hadoop git commit: Merge remote-tracking branch 'apache-commit/trunk' into HDFS-7240

2015-07-22 Thread arp
Merge remote-tracking branch 'apache-commit/trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef128ee4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef128ee4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef128ee4

Branch: refs/heads/HDFS-7240
Commit: ef128ee4bc957bd0c7544865b8962c0b1288bccc
Parents: 43bed72 06e5dd2
Author: Arpit Agarwal a...@apache.org
Authored: Wed Jul 22 16:22:48 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed Jul 22 16:22:48 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  6 ++
 .../org/apache/hadoop/io/nativeio/NativeIO.java |  4 --
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |  8 ++-
 hadoop-yarn-project/CHANGES.txt |  8 +++
 .../src/main/resources/yarn-default.xml | 10 +++
 .../util/TestNodeManagerHardwareUtils.java  |  5 ++
 .../scheduler/SchedulerApplicationAttempt.java  |  2 +-
 .../scheduler/capacity/LeafQueue.java   |  8 ++-
 .../TestCapacitySchedulerNodeLabelUpdate.java   | 64 
 9 files changed, 106 insertions(+), 9 deletions(-)
--




[5/6] hadoop git commit: YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via wangda)

2015-07-22 Thread arp
YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06e5dd2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06e5dd2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06e5dd2c

Branch: refs/heads/HDFS-7240
Commit: 06e5dd2c84c49460884757b56980b1b9c58af996
Parents: 8376ea3
Author: Wangda Tan wan...@apache.org
Authored: Wed Jul 22 11:59:31 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Jul 22 12:01:41 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 ++
 .../server/nodemanager/util/TestNodeManagerHardwareUtils.java   | 5 +
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eb52745..a5fd4e7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -659,6 +659,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3954. Fix 
TestYarnConfigurationFields#testCompareConfigurationClassAgainstXml.
 (varun saxena via rohithsharmaks)
 
+YARN-3956. Fix TestNodeManagerHardwareUtils fails on Mac (Varun Vasudev 
via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06e5dd2c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
index 5bf8cb7..84a045d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/util/TestNodeManagerHardwareUtils.java
@@ -30,6 +30,11 @@ import org.mockito.Mockito;
 public class TestNodeManagerHardwareUtils {
 
   static class TestResourceCalculatorPlugin extends ResourceCalculatorPlugin {
+
+TestResourceCalculatorPlugin() {
+  super(null);
+}
+
 @Override
 public long getVirtualMemorySize() {
   return 0;



hadoop git commit: YARN-2019. Retrospect on decision of making RM crashed if any exception throw in ZKRMStateStore. Contributed by Jian He. (cherry picked from commit ee98d6354bbbcd0832d3e539ee097f837

2015-07-22 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 be2334ba3 - 6772c3f4d


YARN-2019. Retrospect on decision of making RM crashed if any exception throw 
in ZKRMStateStore. Contributed by Jian He.
(cherry picked from commit ee98d6354bbbcd0832d3e539ee097f837e5d0e31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6772c3f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6772c3f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6772c3f4

Branch: refs/heads/branch-2
Commit: 6772c3f4ddfc14645f26497d346708e910711366
Parents: be2334b
Author: Junping Du junping...@apache.org
Authored: Wed Jul 22 17:52:35 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Wed Jul 22 17:57:16 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../apache/hadoop/yarn/conf/YarnConfiguration.java  | 11 +++
 .../src/main/resources/yarn-default.xml | 16 
 .../resourcemanager/recovery/RMStateStore.java  |  9 +++--
 4 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6772c3f4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index be74800..7905676 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -89,6 +89,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2003. Support for Application priority : Changes in RM and Capacity 
 Scheduler. (Sunil G via wangda)
 
+YARN-2019. Retrospect on decision of making RM crashed if any exception 
throw 
+in ZKRMStateStore. (Jian He via junping_du)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6772c3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cdfb393..7d602a6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -401,6 +401,11 @@ public class YarnConfiguration extends Configuration {
   public static final String RECOVERY_ENABLED = RM_PREFIX + recovery.enabled;
   public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false;
 
+  public static final String YARN_FAIL_FAST = YARN_PREFIX + fail-fast;
+  public static final boolean DEFAULT_YARN_FAIL_FAST = true;
+
+  public static final String RM_FAIL_FAST = RM_PREFIX + fail-fast;
+
   @Private
   public static final String RM_WORK_PRESERVING_RECOVERY_ENABLED = RM_PREFIX
   + work-preserving-recovery.enabled;
@@ -2018,6 +2023,12 @@ public class YarnConfiguration extends Configuration {
 YARN_HTTP_POLICY_DEFAULT));
   }
 
+  public static boolean shouldRMFailFast(Configuration conf) {
+return conf.getBoolean(YarnConfiguration.RM_FAIL_FAST,
+conf.getBoolean(YarnConfiguration.YARN_FAIL_FAST,
+YarnConfiguration.DEFAULT_YARN_FAIL_FAST));
+  }
+
   @Private
   public static String getClusterId(Configuration conf) {
 String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6772c3f4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d586f51..8b3a3af 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -324,6 +324,22 @@
   /property
 
   property
+descriptionShould RM fail fast if it encounters any errors. By defalt, it
+  points to ${yarn.fail-fast}. Errors include:
+  1) exceptions when state-store write/read operations fails.
+/description
+nameyarn.resourcemanager.fail-fast/name
+value${yarn.fail-fast}/value
+  /property
+
+  property
+descriptionShould YARN fail fast if it encounters any errors.
+/description
+nameyarn.fail-fast/name
+valuetrue/value
+  /property
+

hadoop git commit: HDFS-8797. WebHdfsFileSystem creates too many connections for pread. Contributed by Jing Zhao.

2015-07-22 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6772c3f4d - 71764a92c


HDFS-8797. WebHdfsFileSystem creates too many connections for pread. 
Contributed by Jing Zhao.

(cherry picked from commit e91ccfad07ec5b5674a84009772dd31a82b4e4de)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71764a92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71764a92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71764a92

Branch: refs/heads/branch-2
Commit: 71764a92c6755ec21223be894c5437b04c6fe3fb
Parents: 6772c3f
Author: Jing Zhao ji...@apache.org
Authored: Wed Jul 22 17:42:31 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Wed Jul 22 18:03:45 2015 -0700

--
 .../hadoop/hdfs/web/ByteRangeInputStream.java   | 57 +---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 .../hdfs/web/TestByteRangeInputStream.java  | 35 ++--
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 41 ++
 4 files changed, 113 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71764a92/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
index 9e3b29a..8e21b77 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
@@ -66,6 +67,16 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 final boolean resolved) throws IOException;
   }
 
+  static class InputStreamAndFileLength {
+final Long length;
+final InputStream in;
+
+InputStreamAndFileLength(Long length, InputStream in) {
+  this.length = length;
+  this.in = in;
+}
+  }
+
   enum StreamStatus {
 NORMAL, SEEK, CLOSED
   }
@@ -102,7 +113,9 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 if (in != null) {
   in.close();
 }
-in = openInputStream();
+InputStreamAndFileLength fin = openInputStream(startPos);
+in = fin.in;
+fileLength = fin.length;
 status = StreamStatus.NORMAL;
 break;
   case CLOSED:
@@ -112,31 +125,33 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
   }
 
   @VisibleForTesting
-  protected InputStream openInputStream() throws IOException {
+  protected InputStreamAndFileLength openInputStream(long startOffset)
+  throws IOException {
 // Use the original url if no resolved url exists, eg. if
 // it's the first time a request is made.
 final boolean resolved = resolvedURL.getURL() != null;
 final URLOpener opener = resolved? resolvedURL: originalURL;
 
-final HttpURLConnection connection = opener.connect(startPos, resolved);
+final HttpURLConnection connection = opener.connect(startOffset, resolved);
 resolvedURL.setURL(getResolvedUrl(connection));
 
 InputStream in = connection.getInputStream();
+final Long length;
 final MapString, ListString headers = connection.getHeaderFields();
 if (isChunkedTransferEncoding(headers)) {
   // file length is not known
-  fileLength = null;
+  length = null;
 } else {
   // for non-chunked transfer-encoding, get content-length
   long streamlength = getStreamLength(connection, headers);
-  fileLength = startPos + streamlength;
+  length = startOffset + streamlength;
 
   // Java has a bug with 2GB request streams.  It won't bounds check
   // the reads so the transfer blocks until the server times out
   in = new BoundedInputStream(in, streamlength);
 }
 
-return in;
+return new InputStreamAndFileLength(length, in);
   }
 
   private static long getStreamLength(HttpURLConnection connection,
@@ -230,6 +245,36 @@ public abstract class ByteRangeInputStream extends 
FSInputStream {
 }
   }
 
+  @Override
+  public int read(long position, byte[] buffer, int offset, int length)
+  throws IOException {
+try (InputStream in = openInputStream(position).in) {
+  return in.read(buffer, offset, length);
+}
+  }
+
+  @Override
+  public void readFully(long position, byte[] buffer, int offset, int length)
+  throws IOException {
+