hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-01-27 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk fb238d7e5 -> 8a91109d1


HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a91109d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a91109d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a91109d

Branch: refs/heads/trunk
Commit: 8a91109d16394310f2568717f103e6fff7cbddb0
Parents: fb238d7
Author: Jing Zhao 
Authored: Wed Jan 27 15:48:47 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 15:48:47 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 126 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be 

hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-01-27 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 da18bbeda -> d798f


HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d798
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d798
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d798

Branch: refs/heads/branch-2
Commit: d798fa85d43248cb6eb2dd0eb321bfc45b2f
Parents: da18bbed
Author: Jing Zhao 
Authored: Wed Jan 27 15:51:10 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 15:51:10 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  4 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 15 files changed, 119 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d798/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e9300db..c921fc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -56,6 +56,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d798/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index b7742ed..87572b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,14 +49,15 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
   private final SequentialBlockIdGenerator blockIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
   }
 
@@ -66,14 +67,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  public long upgradeGenerationStampToV2() {
-Preconditions.checkState(generationStampV2.getCurrentValue() ==
+  public long upgradeLegacyGenerationStamp() {
+

hadoop git commit: MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)

2016-01-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d798f -> a99fa7449


MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)

(cherry picked from commit dca0dc8ac28e843acd8b79c9560245638a539fde)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a99fa744
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a99fa744
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a99fa744

Branch: refs/heads/branch-2
Commit: a99fa744909ef69b968f24def5138d2f3beb568f
Parents: d798222
Author: Robert Kanter 
Authored: Wed Jan 27 17:11:07 2016 -0800
Committer: Robert Kanter 
Committed: Wed Jan 27 17:11:41 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  2 ++
 .../test/java/org/apache/hadoop/mapred/TestJobClient.java | 10 ++
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java |  3 ++-
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a99fa744/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4dcad88..e92797b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,8 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a99fa744/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
index b18b531..bf37b03 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
@@ -87,4 +87,14 @@ public class TestJobClient {
 client.getClusterHandle().getStagingAreaDir().toString()
 .equals(client.getStagingAreaDir().toString()));
   }
+
+  /**
+   * Asks the compiler to check if JobClient is AutoClosable.
+   */
+  @Test(timeout = 1)
+  public void testAutoClosable() throws IOException {
+Configuration conf = new Configuration();
+try (JobClient jobClient = new JobClient(conf)) {
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a99fa744/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index aae2e1b..0b0ab2b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class JobClient extends CLI {
+public class JobClient extends CLI implements AutoCloseable {
 
   @InterfaceAudience.Private
   public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
@@ -499,6 +499,7 @@ public class JobClient extends CLI {
   /**
* Close the JobClient.
*/
+  @Override
   public synchronized void close() throws IOException {
 cluster.close();
   }



hadoop git commit: MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)

2016-01-27 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk ec25c7f9c -> dca0dc8ac


MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dca0dc8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dca0dc8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dca0dc8a

Branch: refs/heads/trunk
Commit: dca0dc8ac28e843acd8b79c9560245638a539fde
Parents: ec25c7f
Author: Robert Kanter 
Authored: Wed Jan 27 17:11:07 2016 -0800
Committer: Robert Kanter 
Committed: Wed Jan 27 17:11:07 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  2 ++
 .../test/java/org/apache/hadoop/mapred/TestJobClient.java | 10 ++
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java |  3 ++-
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 68564b6..3f85a9b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -311,6 +311,8 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
index b18b531..bf37b03 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
@@ -87,4 +87,14 @@ public class TestJobClient {
 client.getClusterHandle().getStagingAreaDir().toString()
 .equals(client.getStagingAreaDir().toString()));
   }
+
+  /**
+   * Asks the compiler to check if JobClient is AutoClosable.
+   */
+  @Test(timeout = 1)
+  public void testAutoClosable() throws IOException {
+Configuration conf = new Configuration();
+try (JobClient jobClient = new JobClient(conf)) {
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index cf123c7..baa6221 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class JobClient extends CLI {
+public class JobClient extends CLI implements AutoCloseable {
 
   @InterfaceAudience.Private
   public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
@@ -499,6 +499,7 @@ public class JobClient extends CLI {
   /**
* Close the JobClient.
*/
+  @Override
   public synchronized void close() throws IOException {
 cluster.close();
   }



hadoop git commit: HDFS-9654. Code refactoring for HDFS-8578.

2016-01-27 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d83b124c9 -> f09b0fe5c


HDFS-9654. Code refactoring for HDFS-8578.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f09b0fe5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f09b0fe5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f09b0fe5

Branch: refs/heads/branch-2.8
Commit: f09b0fe5c4cbb2163eb999b3d2a19510a9d9609d
Parents: d83b124
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:56:01 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 11:01:47 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/common/Storage.java  |   3 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 131 +
 .../hdfs/server/datanode/DataStorage.java   | 282 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  15 +
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  48 +++-
 .../hdfs/server/datanode/TestDataStorage.java   |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 11 files changed, 297 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09b0fe5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b4b499f..83065fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1697,6 +1697,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-9634. webhdfs client side exceptions don't provide enough details
 (Eric Payne via kihwal)
 
+HDFS-9654. Code refactoring for HDFS-8578.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09b0fe5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 4dc6553..fa7e23b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -640,7 +640,8 @@ public abstract class Storage extends StorageInfo {
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
-throw new IOException("Unexpected FS state: " + curState);
+throw new IOException("Unexpected FS state: " + curState
++ " for storage directory: " + rootPath);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09b0fe5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 1bb..acf10f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -18,10 +18,21 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -34,18 +45,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 

hadoop git commit: HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE blocks.

2016-01-27 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a99fa7449 -> b10d8ced2


HDFS-8999. Allow a file to be closed with COMMITTED but not yet COMPLETE blocks.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b10d8ced
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b10d8ced
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b10d8ced

Branch: refs/heads/branch-2
Commit: b10d8ced21a860390c46e7729a02b81d9f7b88e6
Parents: a99fa74
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:42:40 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 10:42:40 2016 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 41 ---
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 55 +--
 .../org/apache/hadoop/hdfs/DataStreamer.java|  3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 ++
 .../server/blockmanagement/BlockManager.java| 31 +++-
 .../hdfs/server/namenode/FSDirAppendOp.java | 15 +++-
 .../hdfs/server/namenode/FSDirWriteFileOp.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  8 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 74 +++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 56 +++
 .../hdfs/server/namenode/LeaseManager.java  | 17 ++---
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 56 ++-
 .../hdfs/server/namenode/TestINodeFile.java |  6 +-
 .../hdfs/server/namenode/TestLeaseManager.java  |  4 +-
 15 files changed, 271 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10d8ced/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index b2dafbf..dae889a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -34,7 +34,6 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.lang.reflect.Proxy;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
@@ -168,6 +167,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RpcNoSuchMethodException;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
@@ -182,16 +182,15 @@ import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
 import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.net.InetAddresses;
-import org.apache.htrace.core.Tracer;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /
  * DFSClient can connect to a Hadoop Filesystem and
@@ -1355,17 +1354,43 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Invoke namenode append RPC.
+   * It retries in case of {@link BlockNotYetCompleteException}.
+   */
+  private LastBlockWithStatus callAppend(String src,
+  EnumSetWritable flag) throws IOException {
+final long startTime = Time.monotonicNow();
+for(;;) {
+  try {
+return namenode.append(src, clientName, flag);
+  } catch(RemoteException re) {
+if (Time.monotonicNow() - startTime > 5000
+|| !RetriableException.class.getName().equals(
+re.getClassName())) {
+  throw re;
+}
+
+try { // sleep and retry
+  Thread.sleep(500);
+} catch (InterruptedException e) {
+  throw DFSUtilClient.toInterruptedIOException("callAppend", e);
+}
+  }
+}
+  }
+
   /** Method to get stream returned by append call */
   private DFSOutputStream callAppend(String src, EnumSet flag,
   Progressable progress, String[] favoredNodes) throws IOException {
 

hadoop git commit: HDFS-9654. Code refactoring for HDFS-8578.

2016-01-27 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk dca0dc8ac -> 662e17b46


HDFS-9654. Code refactoring for HDFS-8578.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/662e17b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/662e17b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/662e17b4

Branch: refs/heads/trunk
Commit: 662e17b46a0f41ade6a304e12925b70b5d09fc2f
Parents: dca0dc8
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:56:01 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 10:58:03 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/common/Storage.java  |   3 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 131 +
 .../hdfs/server/datanode/DataStorage.java   | 282 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  15 +
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  48 +++-
 .../hdfs/server/datanode/TestDataStorage.java   |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 11 files changed, 297 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..a51dc15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2665,6 +2665,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-9634. webhdfs client side exceptions don't provide enough details
 (Eric Payne via kihwal)
 
+HDFS-9654. Code refactoring for HDFS-8578.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 7b4b571..41719b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -640,7 +640,8 @@ public abstract class Storage extends StorageInfo {
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
-throw new IOException("Unexpected FS state: " + curState);
+throw new IOException("Unexpected FS state: " + curState
++ " for storage directory: " + rootPath);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 1bb..acf10f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -18,10 +18,21 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -34,18 +45,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 
-import 

[1/2] hadoop git commit: Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

2016-01-27 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8a91109d1 -> ec25c7f9c


Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

This reverts commit 8a91109d16394310f2568717f103e6fff7cbddb0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a957130
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a957130
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a957130

Branch: refs/heads/trunk
Commit: 3a9571308e99cc374681bbc451a517d41a150aa0
Parents: 8a91109
Author: Jing Zhao 
Authored: Wed Jan 27 16:31:19 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:31:19 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 -
 .../OutOfV1GenerationStampsException.java   | 38 +
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 ++-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 134 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..097c051 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,9 +959,6 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
-HDFS-9677. Rename generationStampV1/generationStampV2 to
-legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3f21d9b..9c71287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV1 = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV2 = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long legacyGenerationStampLimit;
+  private long generationStampV1Limit;
   /**
* The global block ID space for this file system.
*/
@@ -57,8 +57,7 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.legacyGenerationStampLimit =
-HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }

[2/2] hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-01-27 Thread jing9
HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec25c7f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec25c7f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec25c7f9

Branch: refs/heads/trunk
Commit: ec25c7f9c7e60c077d8c4143253c20445fcdaecf
Parents: 3a95713
Author: Jing Zhao 
Authored: Wed Jan 27 16:34:40 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:34:40 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  4 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 15 files changed, 119 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  public long upgradeGenerationStampToV2() {
-Preconditions.checkState(generationStampV2.getCurrentValue() ==
+  public 

hadoop git commit: Revert HADOOP-12715 which cause build failure on branch-2.6.

2016-01-27 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 d75375a62 -> 8b9241f93


Revert HADOOP-12715 which cause build failure on branch-2.6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b9241f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b9241f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b9241f9

Branch: refs/heads/branch-2.6
Commit: 8b9241f93e86fa61d4e841a275087bbb7c7867e0
Parents: d75375a
Author: Junping Du 
Authored: Wed Jan 27 08:14:30 2016 -0800
Committer: Junping Du 
Committed: Wed Jan 27 08:14:30 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 -
 .../hadoop/crypto/key/TestValueQueue.java   | 65 +---
 2 files changed, 16 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b9241f9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ebdcf45..af9ffa9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -27,9 +27,6 @@ Release 2.6.4 - UNRELEASED
 HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks
 sometimes times out. (Xiao Chen via aajisaka)
 
-HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently.
-(Xiao Chen via waltersu4549)
-
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b9241f9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
index 5eae9a0..8e3a093 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
@@ -19,24 +19,18 @@ package org.apache.hadoop.crypto.key;
 
 import java.io.IOException;
 import java.util.Queue;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
-import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
 public class TestValueQueue {
-  Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
 
   private static class FillInfo {
 final int num;
@@ -66,7 +60,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initially filled to "numInitValues"
*/
-  @Test(timeout=3)
+  @Test
   public void testInitFill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -80,7 +74,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
-  @Test(timeout=3)
+  @Test
   public void testWarmUp() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -103,7 +97,7 @@ public class TestValueQueue {
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
-  @Test(timeout=3)
+  @Test
   public void testRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -122,7 +116,7 @@ public class TestValueQueue {
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
-  @Test(timeout=3)
+  @Test
   public void testNoRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -137,56 +131,29 @@ public class TestValueQueue {
   /**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
-  @Test(timeout=3)
+  @Test
   public void testgetAtMostPolicyALL() throws Exception {
 MockFiller filler = new MockFiller();
-final ValueQueue vq =
+ValueQueue vq =
 new ValueQueue(10, 0.1f, 300, 1,
 SyncGenerationPolicy.ALL, filler);
 Assert.assertEquals("test", vq.getNext("k1"));
 Assert.assertEquals(1, filler.getTop().num);
-
-// Synchronous 

hadoop git commit: HADOOP-12718. Incorrect error message by fs -put local dir without permission. (John Zhuge via Yongjun Zhang)

2016-01-27 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk c01bee010 -> 97056c335


HADOOP-12718. Incorrect error message by fs -put local dir without permission. 
(John Zhuge via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97056c33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97056c33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97056c33

Branch: refs/heads/trunk
Commit: 97056c3355810a803f07baca89b89e2bf6bb7201
Parents: c01bee0
Author: Yongjun Zhang 
Authored: Wed Jan 27 08:04:25 2016 -0800
Committer: Yongjun Zhang 
Committed: Wed Jan 27 08:04:25 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97056c33/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5121a83..3b8376f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1648,6 +1648,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12731. Remove useless boxing/unboxing code.
 (Kousuke Saruta via aajisaka)
 
+HADOOP-12718. Incorrect error message by fs -put local dir without
+permission. (John Zhuge via Yongjun Zhang)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97056c33/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 352b27a..3e984e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -33,6 +33,7 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
@@ -463,6 +464,10 @@ public class RawLocalFileSystem extends FileSystem {
 if (localf.isDirectory()) {
   String[] names = localf.list();
   if (names == null) {
+if (!localf.canRead()) {
+  throw new AccessDeniedException("cannot open directory " + f +
+  ": Permission denied");
+}
 return null;
   }
   results = new FileStatus[names.length];



hadoop git commit: HDFS-7694. FSDataInputStream should support "unbuffer" (cmccabe)

2016-01-27 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 8b9241f93 -> 8ef73cd4c


HDFS-7694. FSDataInputStream should support "unbuffer" (cmccabe)

(cherry picked from commit 6b39ad0865cb2a7960dd59d68178f0bf28865ce2)
(cherry picked from commit e35788aa5a41940651af0b73dfeaeca011556904)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ef73cd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ef73cd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ef73cd4

Branch: refs/heads/branch-2.6
Commit: 8ef73cd4ce640e5aebcdd35f33f7e27a7c83d7a9
Parents: 8b9241f
Author: Colin Patrick Mccabe 
Authored: Thu Feb 12 10:40:46 2015 -0800
Committer: Junping Du 
Committed: Wed Jan 27 08:46:38 2016 -0800

--
 .../java/org/apache/hadoop/fs/CanUnbuffer.java  |  36 ++
 .../org/apache/hadoop/fs/FSDataInputStream.java |  12 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   8 +-
 .../java/org/apache/hadoop/hdfs/PeerCache.java  |   9 +-
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.c  |  28 
 .../hadoop-hdfs/src/main/native/libhdfs/hdfs.h  |   9 ++
 .../java/org/apache/hadoop/fs/TestUnbuffer.java | 127 +++
 8 files changed, 227 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ef73cd4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanUnbuffer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanUnbuffer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanUnbuffer.java
new file mode 100644
index 000..07e65f5
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CanUnbuffer.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * FSDataInputStreams implement this interface to indicate that they can clear
+ * their buffers on request.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface CanUnbuffer {
+  /**
+   * Reduce the buffering.  This will also free sockets and file descriptors
+   * held by the stream, if possible.
+   */
+  public void unbuffer();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ef73cd4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
index c8609d4..7dae991 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.util.IdentityHashStore;
 public class FSDataInputStream extends DataInputStream
 implements Seekable, PositionedReadable, 
   ByteBufferReadable, HasFileDescriptor, CanSetDropBehind, CanSetReadahead,
-  HasEnhancedByteBufferAccess {
+  HasEnhancedByteBufferAccess, CanUnbuffer {
   /**
* Map ByteBuffers that we have handed out to readers to ByteBufferPool 
* objects
@@ -220,4 +220,14 @@ public class FSDataInputStream extends DataInputStream
   bufferPool.putBuffer(buffer);
 }
   }
+
+  @Override
+  public void unbuffer() {
+try {
+  ((CanUnbuffer)in).unbuffer();
+} catch (ClassCastException e) {
+  throw new UnsupportedOperationException("this stream does not " +
+  "support unbuffering.");
+}
+  }
 }


hadoop git commit: HADOOP-12718. Incorrect error message by fs -put local dir without permission. (John Zhuge via Yongjun Zhang)

2016-01-27 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0b8767e01 -> a1f913fb6


HADOOP-12718. Incorrect error message by fs -put local dir without permission. 
(John Zhuge via Yongjun Zhang)

(cherry picked from commit 97056c3355810a803f07baca89b89e2bf6bb7201)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f913fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f913fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f913fb

Branch: refs/heads/branch-2
Commit: a1f913fb6e917f90d9492c78a30b380096ddb3db
Parents: 0b8767e
Author: Yongjun Zhang 
Authored: Wed Jan 27 08:04:25 2016 -0800
Committer: Yongjun Zhang 
Committed: Wed Jan 27 08:05:17 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f913fb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1532af3..7040048 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1015,6 +1015,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12731. Remove useless boxing/unboxing code.
 (Kousuke Saruta via aajisaka)
 
+HADOOP-12718. Incorrect error message by fs -put local dir without
+permission. (John Zhuge via Yongjun Zhang)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f913fb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index a6a9665..318bbb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -33,6 +33,7 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
@@ -450,6 +451,10 @@ public class RawLocalFileSystem extends FileSystem {
 if (localf.isDirectory()) {
   String[] names = localf.list();
   if (names == null) {
+if (!localf.canRead()) {
+  throw new AccessDeniedException("cannot open directory " + f +
+  ": Permission denied");
+}
 return null;
   }
   results = new FileStatus[names.length];



hadoop git commit: HADOOP-12718. Incorrect error message by fs -put local dir without permission. (John Zhuge via Yongjun Zhang)

2016-01-27 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 b6fe3c20c -> 62206dd2e


HADOOP-12718. Incorrect error message by fs -put local dir without permission. 
(John Zhuge via Yongjun Zhang)

(cherry picked from commit 97056c3355810a803f07baca89b89e2bf6bb7201)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62206dd2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62206dd2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62206dd2

Branch: refs/heads/branch-2.8
Commit: 62206dd2edcc53dbfbae30a524c21e7eb828bd0e
Parents: b6fe3c2
Author: Yongjun Zhang 
Authored: Wed Jan 27 08:04:25 2016 -0800
Committer: Yongjun Zhang 
Committed: Wed Jan 27 08:05:33 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62206dd2/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8dc42a2..040f143 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -961,6 +961,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12731. Remove useless boxing/unboxing code.
 (Kousuke Saruta via aajisaka)
 
+HADOOP-12718. Incorrect error message by fs -put local dir without
+permission. (John Zhuge via Yongjun Zhang)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62206dd2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index a6a9665..318bbb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -33,6 +33,7 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
@@ -450,6 +451,10 @@ public class RawLocalFileSystem extends FileSystem {
 if (localf.isDirectory()) {
   String[] names = localf.list();
   if (names == null) {
+if (!localf.canRead()) {
+  throw new AccessDeniedException("cannot open directory " + f +
+  ": Permission denied");
+}
 return null;
   }
   results = new FileStatus[names.length];



[35/50] [abbrv] hadoop git commit: HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.

2016-01-27 Thread vvasudev
HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45c763ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45c763ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45c763ad

Branch: refs/heads/YARN-3926
Commit: 45c763ad6171bc7808c2ddcb9099a4215113da2a
Parents: bd909ed
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Jan 26 11:20:13 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Tue Jan 26 11:20:13 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 11 +++---
 .../hadoop/hdfs/TestDFSClientRetries.java   | 36 +++-
 3 files changed, 35 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a14a1d8..56a85f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2696,6 +2696,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
 via jitendra)
 
+HDFS-9690. ClientProtocol.addBlock is not idempotent after HDFS-8071.
+(szetszwo)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index 914fbd9..6ba8e1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -184,17 +184,16 @@ class FSDirWriteFileOp {
 src = fsn.dir.resolvePath(pc, src, pathComponents);
 FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
previous, onRetryBlock);
-final INodeFile pendingFile = fileState.inode;
-// Check if the penultimate block is minimally replicated
-if (!fsn.checkFileProgress(src, pendingFile, false)) {
-  throw new NotReplicatedYetException("Not replicated yet: " + src);
-}
-
 if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
   // This is a retry. No need to generate new locations.
   // Use the last block if it has locations.
   return null;
 }
+
+final INodeFile pendingFile = fileState.inode;
+if (!fsn.checkFileProgress(src, pendingFile, false)) {
+  throw new NotReplicatedYetException("Not replicated yet: " + src);
+}
 if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
   throw new IOException("File has reached the limit on maximum number of"
   + " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/45c763ad/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index e41c06a..1f783f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -432,19 +432,37 @@ public class TestDFSClientRetries {
   // Make the call to addBlock() get called twice, as if it were retried
   // due to an IPC issue.
   doAnswer(new Answer() {
-@Override
-public LocatedBlock answer(InvocationOnMock invocation) throws 
Throwable {
-  LocatedBlock ret = (LocatedBlock) invocation.callRealMethod();
+private int getBlockCount(LocatedBlock ret) throws IOException {
   LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 
0, Long.MAX_VALUE);
-  int blockCount = lb.getLocatedBlocks().size();
   assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
-  
+  return 

[10/50] [abbrv] hadoop git commit: HDFS-9682. Fix a typo "aplication" in HttpFS document. Contributed by Weiwei yang.

2016-01-27 Thread vvasudev
HDFS-9682. Fix a typo "aplication" in HttpFS document. Contributed by Weiwei 
yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68316346
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68316346
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68316346

Branch: refs/heads/YARN-3926
Commit: 683163468219107afb57a002f2cf2a369442171d
Parents: 8f58f74
Author: Akira Ajisaka 
Authored: Fri Jan 22 20:12:18 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jan 22 20:12:18 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68316346/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
index ac8f4ca..3e8daa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/site/markdown/index.md
@@ -23,7 +23,7 @@ HttpFS can be used to access data in HDFS on a cluster behind 
of a firewall (the
 
 HttpFS can be used to access data in HDFS using HTTP utilities (such as curl 
and wget) and HTTP libraries Perl from other languages than Java.
 
-The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java aplications using the Hadoop FileSystem Java API.
+The **webhdfs** client FileSytem implementation can be used to access HttpFS 
using the Hadoop filesystem command (`hadoop fs`) line tool as well as from 
Java applications using the Hadoop FileSystem Java API.
 
 HttpFS has built-in security supporting Hadoop pseudo authentication and HTTP 
SPNEGO Kerberos and other pluggable authentication mechanims. It also provides 
Hadoop proxy user support.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68316346/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c26ee7..c7061e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2619,6 +2619,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6054. MiniQJMHACluster should not use static port to avoid binding
 failure in unit test. (Yongjun Zhang)
 
+HDFS-9682. Fix a typo "aplication" in HttpFS document.
+(Weiwei Yang via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[39/50] [abbrv] hadoop git commit: YARN-4612. Fix rumen and scheduler load simulator handle killed tasks properly. Contributed by Ming Ma.

2016-01-27 Thread vvasudev
YARN-4612. Fix rumen and scheduler load simulator handle killed tasks
properly. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4efdf3a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4efdf3a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4efdf3a9

Branch: refs/heads/YARN-3926
Commit: 4efdf3a979c361348612f817a3253be6d0de58f7
Parents: d323639
Author: Xuan 
Authored: Tue Jan 26 18:17:12 2016 -0800
Committer: Xuan 
Committed: Tue Jan 26 18:17:12 2016 -0800

--
 .../apache/hadoop/tools/rumen/JobBuilder.java   |  11 +-
 .../src/main/data/2jobs2min-rumen-jh.json   | 606 +++
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |   6 +
 .../apache/hadoop/yarn/sls/utils/SLSUtils.java  |   6 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 5 files changed, 628 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4efdf3a9/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
--
diff --git 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
index c5ae2fc..890f388 100644
--- 
a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
+++ 
b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/JobBuilder.java
@@ -473,9 +473,12 @@ public class JobBuilder {
 task.setTaskStatus(getPre21Value(event.getTaskStatus()));
 TaskFailed t = (TaskFailed)(event.getDatum());
 task.putDiagnosticInfo(t.error.toString());
-task.putFailedDueToAttemptId(t.failedDueToAttempt.toString());
+// killed task wouldn't have failed attempt.
+if (t.getFailedDueToAttempt() != null) {
+  task.putFailedDueToAttemptId(t.getFailedDueToAttempt().toString());
+}
 org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
-((TaskFailed) event.getDatum()).counters;
+((TaskFailed) event.getDatum()).getCounters();
 task.incorporateCounters(
 counters == null ? EMPTY_COUNTERS : counters);
   }
@@ -500,7 +503,7 @@ public class JobBuilder {
 
 attempt.setFinishTime(event.getFinishTime());
 org.apache.hadoop.mapreduce.jobhistory.JhCounters counters =
-((TaskAttemptUnsuccessfulCompletion) event.getDatum()).counters;
+((TaskAttemptUnsuccessfulCompletion) event.getDatum()).getCounters();
 attempt.incorporateCounters(
 counters == null ? EMPTY_COUNTERS : counters);
 attempt.arraySetClockSplits(event.getClockSplits());
@@ -509,7 +512,7 @@ public class JobBuilder {
 attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
 TaskAttemptUnsuccessfulCompletion t =
 (TaskAttemptUnsuccessfulCompletion) (event.getDatum());
-attempt.putDiagnosticInfo(t.error.toString());
+attempt.putDiagnosticInfo(t.getError().toString());
   }
 
   private void processTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4efdf3a9/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
--
diff --git a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json 
b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
index 83629ed..9d90deb 100644
--- a/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
+++ b/hadoop-tools/hadoop-sls/src/main/data/2jobs2min-rumen-jh.json
@@ -10208,4 +10208,610 @@
   "clusterReduceMB" : -1,
   "jobMapMB" : 200,
   "jobReduceMB" : 200
+} {
+"priority" : "NORMAL",
+"jobID" : "job_1369942127770_1207",
+"user" : "jenkins",
+"jobName" : "TeraGen",
+"submitTime" : 1371223054499,
+"finishTime" : 1371223153874,
+"queue" : "sls_queue_1",
+"mapTasks" : [ {
+"startTime" : 1371223059053,
+"taskID" : "task_1369942127770_1207_m_00",
+"taskType" : "MAP",
+"finishTime" : 1371223078206,
+"attempts" : [ ],
+"preferredLocations" : [ ],
+"taskStatus" : "KILLED",
+"inputBytes" : -1,
+"inputRecords" : -1,
+"outputBytes" : -1,
+"outputRecords" : -1
+} ],
+"reduceTasks" : [ ],
+"launchTime" : 1371223058937,
+"totalMaps" : 1,
+"totalReduces" : 0,
+"otherTasks" : [ ],
+"jobProperties" : {
+"mapreduce.job.ubertask.enable" : "false",
+"yarn.resourcemanager.max-completed-applications" : "1",
+"yarn.resourcemanager.delayed.delegation-token.removal-interval-ms" : "3",
+"mapreduce.client.submit.file.replication" : "2",
+"yarn.nodemanager.container-manager.thread-count" : "20",
+"mapred.queue.default.acl-administer-jobs" : "*",
+"dfs.image.transfer.bandwidthPerSec" : "0",

[04/50] [abbrv] hadoop git commit: HDFS-9601. NNThroughputBenchmark.BlockReportStats should handle NotReplicatedYetException on adding block (iwasakims)

2016-01-27 Thread vvasudev
HDFS-9601. NNThroughputBenchmark.BlockReportStats should handle 
NotReplicatedYetException on adding block (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2ffcc29
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2ffcc29
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2ffcc29

Branch: refs/heads/YARN-3926
Commit: b2ffcc2915838cf2e35544cfbb0286984a435205
Parents: f3427d3
Author: Masatake Iwasaki 
Authored: Fri Jan 22 12:28:38 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Jan 22 12:28:38 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/namenode/NNThroughputBenchmark.java  | 34 +++-
 2 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ffcc29/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0ded0c4..140be77 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -948,6 +948,9 @@ Release 2.9.0 - UNRELEASED
 
   BUG FIXES
 
+HDFS-9601. NNThroughputBenchmark.BlockReportStats should handle
+NotReplicatedYetException on adding block (iwasakims)
+
 HDFS-9621. getListing wrongly associates Erasure Coding policy to 
pre-existing
 replicated files under an EC directory. (jing9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2ffcc29/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 8a594ed..42cb72f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -68,6 +68,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.Groups;
@@ -1181,7 +1182,7 @@ public class NNThroughputBenchmark implements Tool {
 throws IOException {
   ExtendedBlock prevBlock = null;
   for(int jdx = 0; jdx < blocksPerFile; jdx++) {
-LocatedBlock loc = clientProto.addBlock(fileName, clientName,
+LocatedBlock loc = addBlock(fileName, clientName,
 prevBlock, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
 prevBlock = loc.getBlock();
 for(DatanodeInfo dnInfo : loc.getLocations()) {
@@ -1195,11 +1196,42 @@ public class NNThroughputBenchmark implements Tool {
   
dataNodeProto.blockReceivedAndDeleted(datanodes[dnIdx].dnRegistration,
   bpid, report);
 }
+// IBRs are asynchronously processed by NameNode. The next
+// ClientProtocol#addBlock() may throw NotReplicatedYetException.
   }
   return prevBlock;
 }
 
 /**
+ * Retry ClientProtocol.addBlock() if it throws NotReplicatedYetException.
+ * Because addBlock() also commits the previous block,
+ * it fails if enough IBRs are not processed by NameNode.
+ */
+private LocatedBlock addBlock(String src, String clientName,
+ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
+String[] favoredNodes) throws IOException {
+  for (int i = 0; i < 30; i++) {
+try {
+  return clientProto.addBlock(src, clientName,
+  previous, excludeNodes, fileId, favoredNodes);
+} catch (NotReplicatedYetException|RemoteException e) {
+  if (e instanceof RemoteException) {
+String className = ((RemoteException) e).getClassName();
+if (!className.equals(NotReplicatedYetException.class.getName())) {
+  throw e;
+}
+  }
+  try {
+Thread.sleep(100);
+  } catch (InterruptedException ie) {
+LOG.warn("interrupted while retrying addBlock.", ie);
+  }
+}
+  }
+  throw new IOException("failed to add block.");
+}
+
+/**
  * Does not require 

[12/50] [abbrv] hadoop git commit: YARN-4497. RM might fail to restart when recovering apps whose attempts are missing. (Jun Gong via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4497. RM might fail to restart when recovering apps whose attempts are 
missing. (Jun Gong via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6258b33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6258b33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6258b33

Branch: refs/heads/YARN-3926
Commit: d6258b33a7428a0725ead96bc43f4dd444c7c8f1
Parents: 0bae506
Author: rohithsharmaks 
Authored: Fri Jan 22 20:27:38 2016 +0530
Committer: rohithsharmaks 
Committed: Fri Jan 22 20:27:38 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/resourcemanager/rmapp/RMAppImpl.java | 26 -
 .../rmapp/attempt/RMAppAttemptImpl.java |  8 +++
 .../server/resourcemanager/TestRMRestart.java   | 61 
 4 files changed, 96 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6258b33/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2230b42..b667b5b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -143,6 +143,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4578. Directories that are mounted in docker containers need to be 
more
 restrictive/container-specific. (Sidharta Seethana via vvasudev)
 
+YARN-4497. RM might fail to restart when recovering apps whose attempts 
are missing.
+(Jun Gong via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6258b33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 1a390df..10c9edc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -34,6 +34,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
@@ -849,17 +850,32 @@ public class RMAppImpl implements RMApp, Recoverable {
 // send the ATS create Event
 sendATSCreateEvent(this, this.startTime);
 
-for(int i=0; i(appState.attempts.keySet())) {
   // create attempt
-  createNewAttempt();
+  createNewAttempt(attemptId);
   ((RMAppAttemptImpl)this.currentAttempt).recover(state);
+  // If previous attempt is not in final state, it means we failed to store
+  // its final state. We set it to FAILED now because we could not make 
sure
+  // about its final state.
+  if (preAttempt != null && preAttempt.getRecoveredFinalState() == null) {
+preAttempt.setRecoveredFinalState(RMAppAttemptState.FAILED);
+  }
+  preAttempt = (RMAppAttemptImpl)currentAttempt;
+}
+if (currentAttempt != null) {
+  nextAttemptId = currentAttempt.getAppAttemptId().getAttemptId() + 1;
 }
   }
 
   private void createNewAttempt() {
 ApplicationAttemptId appAttemptId =
 ApplicationAttemptId.newInstance(applicationId, nextAttemptId++);
+createNewAttempt(appAttemptId);
+  }
 
+  private void createNewAttempt(ApplicationAttemptId appAttemptId) {
 BlacklistManager currentAMBlacklist;
 if (currentAttempt != null) {
   currentAMBlacklist = currentAttempt.getAMBlacklist();
@@ -1803,4 +1819,10 @@ public class RMAppImpl implements RMApp, Recoverable {
   public float getAmBlacklistingDisableThreshold() {
 return blacklistDisableThreshold;
   }
+
+  @Private
+  @VisibleForTesting
+  public int getNextAttemptId() {
+return nextAttemptId;
+  }
 }


[46/50] [abbrv] hadoop git commit: Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

2016-01-27 Thread vvasudev
Revert "HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu."

This reverts commit 8a91109d16394310f2568717f103e6fff7cbddb0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a957130
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a957130
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a957130

Branch: refs/heads/YARN-3926
Commit: 3a9571308e99cc374681bbc451a517d41a150aa0
Parents: 8a91109
Author: Jing Zhao 
Authored: Wed Jan 27 16:31:19 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:31:19 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 -
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 -
 .../OutOfV1GenerationStampsException.java   | 38 +
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 ++-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 134 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..097c051 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,9 +959,6 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
-HDFS-9677. Rename generationStampV1/generationStampV2 to
-legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a957130/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3f21d9b..9c71287 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV1 = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStamp = new GenerationStamp();
+  private final GenerationStamp generationStampV2 = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long legacyGenerationStampLimit;
+  private long generationStampV1Limit;
   /**
* The global block ID space for this file system.
*/
@@ -57,8 +57,7 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.legacyGenerationStampLimit =
-HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -69,14 +68,14 @@ public class BlockIdManager {
* Should be invoked 

[16/50] [abbrv] hadoop git commit: HDFS-9646. ErasureCodingWorker may fail when recovering data blocks with length less than the first internal block. Contributed by Jing Zhao.

2016-01-27 Thread vvasudev
HDFS-9646. ErasureCodingWorker may fail when recovering data blocks with length 
less than the first internal block. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95363bcc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95363bcc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95363bcc

Branch: refs/heads/YARN-3926
Commit: 95363bcc7dae28ba9ae2cd7ee9a258fcb58cd932
Parents: 34a3900
Author: Jing Zhao 
Authored: Fri Jan 22 09:46:02 2016 -0800
Committer: Jing Zhao 
Committed: Fri Jan 22 09:46:02 2016 -0800

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  48 --
 .../hadoop/hdfs/DFSStripedInputStream.java  |   2 +-
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  13 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../erasurecode/ErasureCodingWorker.java| 146 +++-
 .../server/protocol/BlockECRecoveryCommand.java |   2 +-
 .../hdfs/TestReadStripedFileWithDecoding.java   |  17 +-
 .../hadoop/hdfs/TestRecoverStripedFile.java | 172 ---
 8 files changed, 270 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95363bcc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 3de60b2..3c91ca1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -32,7 +32,6 @@ import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
@@ -909,7 +908,8 @@ public class DFSInputStream extends FSInputStream
 }
   }
 
-  protected synchronized int readWithStrategy(ReaderStrategy strategy, int 
off, int len) throws IOException {
+  protected synchronized int readWithStrategy(ReaderStrategy strategy, int off,
+  int len) throws IOException {
 dfsClient.checkOpen();
 if (closed.get()) {
   throw new IOException("Stream closed");
@@ -959,7 +959,7 @@ public class DFSInputStream extends FSInputStream
   // Check if need to report block replicas corruption either read
   // was successful or ChecksumException occured.
   reportCheckSumFailure(corruptedBlockMap,
-  currentLocatedBlock.getLocations().length);
+  currentLocatedBlock.getLocations().length, false);
 }
   }
 }
@@ -1492,7 +1492,8 @@ public class DFSInputStream extends FSInputStream
 // Check and report if any block replicas are corrupted.
 // BlockMissingException may be caught if all block replicas are
 // corrupted.
-reportCheckSumFailure(corruptedBlockMap, blk.getLocations().length);
+reportCheckSumFailure(corruptedBlockMap, blk.getLocations().length,
+false);
   }
 
   remaining -= bytesToRead;
@@ -1508,6 +1509,7 @@ public class DFSInputStream extends FSInputStream
 
   /**
* DFSInputStream reports checksum failure.
+   * For replicated blocks, we have the following logic:
* Case I : client has tried multiple data nodes and at least one of the
* attempts has succeeded. We report the other failures as corrupted block to
* namenode.
@@ -1515,29 +1517,39 @@ public class DFSInputStream extends FSInputStream
* only report if the total number of replica is 1. We do not
* report otherwise since this maybe due to the client is a handicapped 
client
* (who can not read).
+   *
+   * For erasure-coded blocks, each block in corruptedBlockMap is an internal
+   * block in a block group, and there is usually only one DataNode
+   * corresponding to each internal block. For this case we simply report the
+   * corrupted blocks to NameNode and ignore the above logic.
+   *
* @param corruptedBlockMap map of corrupted blocks
* @param dataNodeCount number of data nodes who contains the block replicas
*/
   protected void reportCheckSumFailure(
   Map corruptedBlockMap,
-  int dataNodeCount) {
+  int dataNodeCount, boolean isStriped) {
 if (corruptedBlockMap.isEmpty()) {
   return;
 }
-Iterator> it = corruptedBlockMap
-.entrySet().iterator();
-Entry entry = 

[03/50] [abbrv] hadoop git commit: HADOOP-12659. Incorrect usage of config parameters in token manager of KMS. Contributed by Mingliang Liu.

2016-01-27 Thread vvasudev
HADOOP-12659. Incorrect usage of config parameters in token manager of KMS. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3427d37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3427d37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3427d37

Branch: refs/heads/YARN-3926
Commit: f3427d3766d7101d0d1c37d6281918551d221ebe
Parents: b4a05c1
Author: Xiaoyu Yao 
Authored: Thu Jan 21 13:51:58 2016 -0800
Committer: Xiaoyu Yao 
Committed: Thu Jan 21 13:51:58 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../token/delegation/ZKDelegationTokenSecretManager.java | 2 +-
 .../security/token/delegation/web/DelegationTokenManager.java| 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3427d37/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e5172d5..2fb1173 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1639,6 +1639,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-7161. Remove unnecessary oro package from dependency management
 section. (Sean Busbey via aajisaka)
 
+HADOOP-12659. Incorrect usage of config parameters in token manager of
+KMS. (Mingliang Liu via xyao)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3427d37/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
index da0e6ad..88b81b0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java
@@ -141,7 +141,7 @@ public abstract class 
ZKDelegationTokenSecretManagerhttp://git-wip-us.apache.org/repos/asf/hadoop/blob/f3427d37/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
index 5d86249..221b02a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
@@ -68,8 +68,8 @@ public class DelegationTokenManager {
   super(conf.getLong(UPDATE_INTERVAL, UPDATE_INTERVAL_DEFAULT) * 1000,
   conf.getLong(MAX_LIFETIME, MAX_LIFETIME_DEFAULT) * 1000,
   conf.getLong(RENEW_INTERVAL, RENEW_INTERVAL_DEFAULT) * 1000,
-  conf.getLong(REMOVAL_SCAN_INTERVAL,
-  REMOVAL_SCAN_INTERVAL_DEFAULT * 1000));
+  conf.getLong(REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT)
+  * 1000);
   this.tokenKind = tokenKind;
 }
 



[01/50] [abbrv] hadoop git commit: HADOOP-12651. Replace dev-support with wrappers to Yetus (aw) [Forced Update!]

2016-01-27 Thread vvasudev
Repository: hadoop
Updated Branches:
  refs/heads/YARN-3926 6caa0a264 -> d328f70ec (forced update)


HADOOP-12651. Replace dev-support with wrappers to Yetus (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a867355
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a867355
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a867355

Branch: refs/heads/YARN-3926
Commit: 2a867355dfce880bb82257508862ec26bc7f16b9
Parents: 8cecad2
Author: Allen Wittenauer 
Authored: Thu Jan 21 13:03:34 2016 -0800
Committer: Allen Wittenauer 
Committed: Thu Jan 21 13:03:34 2016 -0800

--
 dev-support/README.md |  57 +++
 dev-support/bin/releasedocmaker   |  18 
 dev-support/bin/shelldocs |  18 
 dev-support/bin/smart-apply-patch |  18 
 dev-support/bin/test-patch|  18 
 dev-support/bin/yetus-wrapper | 175 +
 6 files changed, 304 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a867355/dev-support/README.md
--
diff --git a/dev-support/README.md b/dev-support/README.md
new file mode 100644
index 000..b2e3476
--- /dev/null
+++ b/dev-support/README.md
@@ -0,0 +1,57 @@
+
+
+This directory contains tools to help in the development and release of Apache 
Hadoop.
+
+* bin
+
+  * releasedocmaker
+
+Build release notes for a given Hadoop project or subproject.  This is 
called from Maven when -Preleasedocs is used.  See BUILDING.txt for more 
information.
+
+  * shelldocs
+
+Create documentation for the Unix Shell API.  This is called from Maven 
when -Pshelldocs is used.
+
+  * smart-apply-patch
+
+Intelligently apply a patch file to a source tree.
+
+  * test-patch
+
+Test a patch against a source tree.
+
+* create-release.sh
+
+  Helps REs create a release of Apache Hadoop for distribution.
+
+* determine-flaky-tests-hadoop.py
+
+  Given a jenkins test job, this script examines all runs of the job done 
within specified period of time (number of days prior to the execution time of 
this script), and reports all failed tests.
+
+* docker
+
+  Various helpers for the start-build-env.sh script, including the Dockerfile 
itself. See parent BUILDING.txt for more information.
+
+* findHangingTest.sh
+
+  Finds hanging test from Jenkins build output.
+
+
+Previously, the scripts test-patch.sh, smart-apply-patch.sh, 
releasedocmaker.py, and shelldocs.py were in this directory.  They have been 
moved to the Apache Yetus project (https://yetus.apache.org).  These scripts 
have been replaced with wrapper scripts located in the bin directory. Command 
line options are generally different than the previous versions that shipped 
with older versions of Apache Hadoop.
+
+The wrapper scripts will download, verify (if GPG is installed), and cache a 
local copy of Apache Yetus in the hadoop/patchprocess directory. The version 
that is used may be overridden by setting the HADOOP\_YETUS\_VERSION 
environment variable.  The cache directory may be overwritten by setting the 
HADOOP\_PATCHPROCESS directory.  If a local version of Apache Yetus is already 
installed, it may be used instead by setting the YETUS\_HOME environment 
variable to point to that directory.
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a867355/dev-support/bin/releasedocmaker
--
diff --git a/dev-support/bin/releasedocmaker b/dev-support/bin/releasedocmaker
new file mode 100755
index 000..16e4c7e
--- /dev/null
+++ b/dev-support/bin/releasedocmaker
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P)
+exec "${BINDIR}/yetus-wrapper" releasedocmaker "$@"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a867355/dev-support/bin/shelldocs

[08/50] [abbrv] hadoop git commit: MAPREDUCE-6605. Fix typos mapreduce.map.skip.proc.count.autoincr and mapreduce.reduce.skip.proc.count.autoincr in mapred-default.xml. Contributed by Kai Sasaki.

2016-01-27 Thread vvasudev
MAPREDUCE-6605. Fix typos mapreduce.map.skip.proc.count.autoincr and 
mapreduce.reduce.skip.proc.count.autoincr in mapred-default.xml. Contributed by 
Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5c8c857
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5c8c857
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5c8c857

Branch: refs/heads/YARN-3926
Commit: f5c8c857beea5b1c0a652f3097c15c06c421fbaa
Parents: b41a7e8
Author: Akira Ajisaka 
Authored: Fri Jan 22 18:35:43 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jan 22 18:35:43 2016 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  4 
 .../src/main/resources/mapred-default.xml   | 24 
 2 files changed, 28 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c8c857/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 373b4bb..5eb6984 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -691,6 +691,10 @@ Release 2.8.0 - UNRELEASED
 
 MAPREDUCE-6601. Fix typo in Job#setUseNewAPI. (Kai Sasaki via aajisaka)
 
+MAPREDUCE-6605. Fix typos mapreduce.map.skip.proc.count.autoincr and
+mapreduce.reduce.skip.proc.count.autoincr in mapred-default.xml.
+(Kai Sasaki via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5c8c857/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 962584c..efbbf53 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -912,6 +912,18 @@
   
 
   
+mapreduce.map.skip.proc-count.auto-incr
+true
+The flag which if set to true,
+SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented by
+MapRunner after invoking the map function. This value must be set
+to false for applications which process the records asynchronously
+or buffer the input records. For example streaming. In such cases
+applications should increment this counter on their own.
+
+  
+
+  
 mapreduce.reduce.skip.maxgroups
 0
  The number of acceptable skip groups surrounding the bad
@@ -927,6 +939,18 @@
   
 
   
+mapreduce.reduce.skip.proc-count.auto-incr
+true
+The flag which if set to true.
+SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented by framework
+after invoking the reduce function. This value must be set to false for
+applications which process the records asynchronously or buffer the input
+records. For example streaming. In such cases applications should increment
+this counter on their own.
+
+  
+
+  
 mapreduce.ifile.readahead
 true
 Configuration key to enable/disable IFile readahead.



[43/50] [abbrv] hadoop git commit: HADOOP-12735. core-default.xml misspells hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

2016-01-27 Thread vvasudev
HADOOP-12735. core-default.xml misspells 
hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e8ab3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e8ab3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e8ab3d4

Branch: refs/heads/YARN-3926
Commit: 2e8ab3d46568162af6aa90b612ed61d487e7c7b0
Parents: 79d7949
Author: Colin Patrick Mccabe 
Authored: Wed Jan 27 11:39:55 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Wed Jan 27 11:39:55 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3b8376f..4da20e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1651,6 +1651,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12718. Incorrect error message by fs -put local dir without
 permission. (John Zhuge via Yongjun Zhang)
 
+HADOOP-12735. core-default.xml misspells
+hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index c25f49e..ed3802f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -428,7 +428,7 @@ for ldap providers in the same way as above does.
 
 
 
-  hadoop.work.around.non.threadsafe.getpwuid
+  hadoop.workaround.non.threadsafe.getpwuid
   false
   Some operating systems or authentication modules are known to
   have broken implementations of getpwuid_r and getpwgid_r, such that these



[38/50] [abbrv] hadoop git commit: HADOOP-12743. Fix git environment check during test-patch (aw)

2016-01-27 Thread vvasudev
HADOOP-12743. Fix git environment check during test-patch (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3236396
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3236396
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3236396

Branch: refs/heads/YARN-3926
Commit: d323639686eab28f1510031e52e4390f82d78989
Parents: cf8af7b
Author: Allen Wittenauer 
Authored: Tue Jan 26 15:46:57 2016 -0800
Committer: Allen Wittenauer 
Committed: Tue Jan 26 15:47:07 2016 -0800

--
 dev-support/bin/yetus-wrapper | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3236396/dev-support/bin/yetus-wrapper
--
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index 37082d8..ac3e121 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -165,6 +165,7 @@ if [[ $? != 0 ]]; then
 fi
 
 if [[ -x "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
]]; then
+  popd >/dev/null
   exec "${HADOOP_PATCHPROCESS}/yetus-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
"${ARGV[@]}"
 fi
 



[22/50] [abbrv] hadoop git commit: YARN-4613. Fix test failure in TestClientRMService#testGetClusterNodes. (Takashi Ohnishi via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4613. Fix test failure in TestClientRMService#testGetClusterNodes. 
(Takashi Ohnishi via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10dc2c04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10dc2c04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10dc2c04

Branch: refs/heads/YARN-3926
Commit: 10dc2c049304671f0ed9fd737a30cd843427b53e
Parents: 99829eb
Author: rohithsharmaks 
Authored: Sun Jan 24 23:36:15 2016 +0530
Committer: rohithsharmaks 
Committed: Sun Jan 24 23:36:15 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/resourcemanager/TestClientRMService.java   | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10dc2c04/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1e9f83c..6674194 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -152,6 +152,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4574. Fix random failure in TestAMRMClientOnRMRestart.
 (Takashi Ohnishi via rohithsharmaks)
 
+YARN-4613. Fix test failure in TestClientRMService#testGetClusterNodes.
+(Takashi Ohnishi via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10dc2c04/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 0f892cd..df95957 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -209,6 +209,7 @@ public class TestClientRMService {
 
 // Now make the node unhealthy.
 node.nodeHeartbeat(false);
+rm.NMwaitForState(node.getNodeId(), NodeState.UNHEALTHY);
 
 // Call again
 nodeReports = client.getClusterNodes(request).getNodeReports();



[19/50] [abbrv] hadoop git commit: YARN-4598. Invalid event: RESOURCE_FAILED at CONTAINER_CLEANEDUP_AFTER_KILL. Contributed by tangshangwen

2016-01-27 Thread vvasudev
YARN-4598. Invalid event: RESOURCE_FAILED at CONTAINER_CLEANEDUP_AFTER_KILL. 
Contributed by tangshangwen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46e5ea81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46e5ea81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46e5ea81

Branch: refs/heads/YARN-3926
Commit: 46e5ea81e0e41646ba02391aea31481ead28aaf8
Parents: e91e8b7
Author: Jason Lowe 
Authored: Fri Jan 22 21:55:01 2016 +
Committer: Jason Lowe 
Committed: Fri Jan 22 21:55:01 2016 +

--
 hadoop-yarn-project/CHANGES.txt| 6 ++
 .../nodemanager/containermanager/container/ContainerImpl.java  | 1 +
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e5ea81/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0ceb905..f840a9e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1389,6 +1389,9 @@ Release 2.7.3 - UNRELEASED
 YARN-4610. Reservations continue looking for one app causes other apps to
 starve (jlowe)
 
+YARN-4598. Invalid event: RESOURCE_FAILED at
+CONTAINER_CLEANEDUP_AFTER_KILL (tangshangwen via jlowe)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -2278,6 +2281,9 @@ Release 2.6.4 - UNRELEASED
 YARN-4581. AHS writer thread leak makes RM crash while RM is recovering.
 (sandflee via junping_du)
 
+YARN-4598. Invalid event: RESOURCE_FAILED at
+CONTAINER_CLEANEDUP_AFTER_KILL (tangshangwen via jlowe)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46e5ea81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index 3c49489..fb1728a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -328,6 +328,7 @@ public class ContainerImpl implements Container {
 .addTransition(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
 ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,
 EnumSet.of(ContainerEventType.KILL_CONTAINER,
+ContainerEventType.RESOURCE_FAILED,
 ContainerEventType.CONTAINER_EXITED_WITH_SUCCESS,
 ContainerEventType.CONTAINER_EXITED_WITH_FAILURE))
 



[02/50] [abbrv] hadoop git commit: HDFS-9674. The HTrace span for OpWriteBlock should record the maxWriteToDisk time. Contributed by Colin McCabe.

2016-01-27 Thread vvasudev
HDFS-9674. The HTrace span for OpWriteBlock should record the maxWriteToDisk 
time. Contributed by Colin McCabe.

Change-Id: I9bf3f3bcd57f5880189ad7c160f3dd66f97d904b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4a05c1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4a05c1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4a05c1f

Branch: refs/heads/YARN-3926
Commit: b4a05c1fd5ae98e28cf2b15f5b068e6c2d12f2a8
Parents: 2a86735
Author: Zhe Zhang 
Authored: Thu Jan 21 13:25:42 2016 -0800
Committer: Zhe Zhang 
Committed: Thu Jan 21 13:25:42 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop/hdfs/server/datanode/BlockReceiver.java   | 11 +++
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4a05c1f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index da16357..0ded0c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -941,6 +941,9 @@ Release 2.9.0 - UNRELEASED
 
 HDFS-9542. Move BlockIdManager from FSNamesystem to BlockManager. (jing9)
 
+HDFS-9674. The HTrace span for OpWriteBlock should record the 
maxWriteToDisk
+time. (cmccabe via zhz)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4a05c1f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 8003c76..e7908a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -58,6 +58,8 @@ import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.Tracer;
 
 import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
 import static 
org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE;
@@ -136,6 +138,7 @@ class BlockReceiver implements Closeable {
   private long lastResponseTime = 0;
   private boolean isReplaceBlock = false;
   private DataOutputStream replyOut = null;
+  private long maxWriteToDiskMs = 0;
   
   private boolean pinning;
   private long lastSentTime;
@@ -302,6 +305,11 @@ class BlockReceiver implements Closeable {
*/
   @Override
   public void close() throws IOException {
+Span span = Tracer.getCurrentSpan();
+if (span != null) {
+  span.addKVAnnotation("maxWriteToDiskMs",
+Long.toString(maxWriteToDiskMs));
+}
 packetReceiver.close();
 
 IOException ioe = null;
@@ -697,6 +705,9 @@ class BlockReceiver implements Closeable {
   long begin = Time.monotonicNow();
   out.write(dataBuf.array(), startByteToDisk, numBytesToDisk);
   long duration = Time.monotonicNow() - begin;
+  if (duration > maxWriteToDiskMs) {
+maxWriteToDiskMs = duration;
+  }
   if (duration > datanodeSlowLogThresholdMs) {
 LOG.warn("Slow BlockReceiver write data to disk cost:" + duration
 + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");



[31/50] [abbrv] hadoop git commit: Release process for 2.7.2: Set the release date for 2.7.2

2016-01-27 Thread vvasudev
Release process for 2.7.2: Set the release date for 2.7.2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec4d2d9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec4d2d9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec4d2d9f

Branch: refs/heads/YARN-3926
Commit: ec4d2d9f40c1cb52ca4561b3d010ffc046a73495
Parents: 992dd2f
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Jan 25 15:45:12 2016 -0800
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Jan 25 15:45:57 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9606296..5121a83 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1693,7 +1693,7 @@ Release 2.7.3 - UNRELEASED
 HADOOP-12706. TestLocalFsFCStatistics#testStatisticsThreadLocalDataCleanUp
 times out occasionally (Sangjin Lee and Colin Patrick McCabe via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e5285b6..f35ae3d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2690,7 +2690,7 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ba392c3..8f35c6f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -722,7 +722,7 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec4d2d9f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e5049d9..41802ae 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1406,7 +1406,7 @@ Release 2.7.3 - UNRELEASED
 YARN-4598. Invalid event: RESOURCE_FAILED at
 CONTAINER_CLEANEDUP_AFTER_KILL (tangshangwen via jlowe)
 
-Release 2.7.2 - UNRELEASED
+Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES
 



[48/50] [abbrv] hadoop git commit: MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)

2016-01-27 Thread vvasudev
MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dca0dc8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dca0dc8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dca0dc8a

Branch: refs/heads/YARN-3926
Commit: dca0dc8ac28e843acd8b79c9560245638a539fde
Parents: ec25c7f
Author: Robert Kanter 
Authored: Wed Jan 27 17:11:07 2016 -0800
Committer: Robert Kanter 
Committed: Wed Jan 27 17:11:07 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  2 ++
 .../test/java/org/apache/hadoop/mapred/TestJobClient.java | 10 ++
 .../src/main/java/org/apache/hadoop/mapred/JobClient.java |  3 ++-
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 68564b6..3f85a9b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -311,6 +311,8 @@ Release 2.9.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6431. JobClient should be an AutoClosable (haibochen via rkanter)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
index b18b531..bf37b03 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
@@ -87,4 +87,14 @@ public class TestJobClient {
 client.getClusterHandle().getStagingAreaDir().toString()
 .equals(client.getStagingAreaDir().toString()));
   }
+
+  /**
+   * Asks the compiler to check if JobClient is AutoClosable.
+   */
+  @Test(timeout = 1)
+  public void testAutoClosable() throws IOException {
+Configuration conf = new Configuration();
+try (JobClient jobClient = new JobClient(conf)) {
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca0dc8a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index cf123c7..baa6221 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -137,7 +137,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class JobClient extends CLI {
+public class JobClient extends CLI implements AutoCloseable {
 
   @InterfaceAudience.Private
   public static final String MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY =
@@ -499,6 +499,7 @@ public class JobClient extends CLI {
   /**
* Close the JobClient.
*/
+  @Override
   public synchronized void close() throws IOException {
 cluster.close();
   }



[44/50] [abbrv] hadoop git commit: YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)

2016-01-27 Thread vvasudev
YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb238d7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb238d7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb238d7e

Branch: refs/heads/YARN-3926
Commit: fb238d7e5dcd96466c8938b13ca7f13cedecb08a
Parents: 2e8ab3d
Author: Karthik Kambatla 
Authored: Wed Jan 27 11:47:29 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Jan 27 12:29:06 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSParentQueue.java   |   8 +
 .../resourcemanager/scheduler/fair/FSQueue.java |  11 +-
 .../webapp/FairSchedulerPage.java   |   1 +
 .../webapp/dao/FairSchedulerQueueInfo.java  |   7 +
 .../scheduler/fair/TestFairScheduler.java   | 327 +++
 8 files changed, 377 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fbecdb..2fae034 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,8 @@ Release 2.9.0 - UNRELEASED
 YARN-1856. Added cgroups based memory monitoring for containers as another
 alternative to custom memory-monitoring. (Varun Vasudev via vinodkv)
 
+YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via 
kasha)
+
   IMPROVEMENTS
 
 YARN-4072. ApplicationHistoryServer, WebAppProxyServer, NodeManager and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index bf4eae8..180ae49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -98,6 +98,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   // Reservation system configuration
   private ReservationQueueConfiguration globalReservationQueueConfig;
 
+  private final Set nonPreemptableQueues;
+
   public AllocationConfiguration(Map minQueueResources,
   Map maxQueueResources,
   Map queueMaxApps, Map userMaxApps,
@@ -114,7 +116,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   QueuePlacementPolicy placementPolicy,
   Map configuredQueues,
   ReservationQueueConfiguration globalReservationQueueConfig,
-  Set reservableQueues) {
+  Set reservableQueues,
+  Set nonPreemptableQueues) {
 this.minQueueResources = minQueueResources;
 this.maxQueueResources = maxQueueResources;
 this.queueMaxApps = queueMaxApps;
@@ -135,6 +138,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 this.globalReservationQueueConfig = globalReservationQueueConfig;
 this.placementPolicy = placementPolicy;
 this.configuredQueues = configuredQueues;
+this.nonPreemptableQueues = nonPreemptableQueues;
   }
   
   public AllocationConfiguration(Configuration conf) {
@@ -161,6 +165,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 }
 placementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
 configuredQueues);
+nonPreemptableQueues = new HashSet();
   }
   
   /**
@@ -210,6 +215,10 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 -1f : fairSharePreemptionThreshold;
   }
 
+  public boolean isPreemptable(String queueName) {
+return 

[15/50] [abbrv] hadoop git commit: YARN-4574. Fix random failure in TestAMRMClientOnRMRestart. (Takashi Ohnishi via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4574. Fix random failure in TestAMRMClientOnRMRestart. (Takashi Ohnishi 
via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34a39007
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34a39007
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34a39007

Branch: refs/heads/YARN-3926
Commit: 34a3900773df6d9075dcf5476e8fe5208cd8f806
Parents: 2fd19b9
Author: rohithsharmaks 
Authored: Fri Jan 22 21:17:26 2016 +0530
Committer: rohithsharmaks 
Committed: Fri Jan 22 21:17:37 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java| 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/34a39007/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b667b5b..a4f9874 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -146,6 +146,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4497. RM might fail to restart when recovering apps whose attempts 
are missing.
 (Jun Gong via rohithsharmaks)
 
+YARN-4574. Fix random failure in TestAMRMClientOnRMRestart.
+(Takashi Ohnishi via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/34a39007/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
index 0460f1e..0890396 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
@@ -219,6 +219,7 @@ public class TestAMRMClientOnRMRestart {
 // request
 nm1.nodeHeartbeat(containerId.getApplicationAttemptId(),
 containerId.getContainerId(), ContainerState.RUNNING);
+dispatcher.await();
 amClient.requestContainerResourceChange(
 container, Resource.newInstance(2048, 1));
 it.remove();



[30/50] [abbrv] hadoop git commit: YARN-4520. Finished app info is unnecessarily persisted in NM state-store if container is acquired but not lunched on this node. Contributed by sandflee

2016-01-27 Thread vvasudev
YARN-4520. Finished app info is unnecessarily persisted in NM state-store if 
container is acquired but not lunched on this node. Contributed by sandflee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/992dd2f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/992dd2f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/992dd2f7

Branch: refs/heads/YARN-3926
Commit: 992dd2f783fc051c32727d4a45a5c61c22bf5640
Parents: d62b4a4
Author: Jian He 
Authored: Mon Jan 25 15:35:51 2016 -0800
Committer: Jian He 
Committed: Mon Jan 25 15:36:14 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../nodemanager/containermanager/ContainerManagerImpl.java | 6 ++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/992dd2f7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8ece214..e5049d9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1327,6 +1327,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4592. Remove unused GetContainerStatus proto. (Chang Li via aajisaka)
 
+YARN-4520. Finished app info is unnecessarily persisted in NM state-store
+if container is acquired but not lunched on this node. (sandflee via 
jianeh)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/992dd2f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f44de59..d0663d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1310,6 +1310,12 @@ public class ContainerManagerImpl extends 
CompositeService implements
   CMgrCompletedAppsEvent appsFinishedEvent =
   (CMgrCompletedAppsEvent) event;
   for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
+Application app = this.context.getApplications().get(appID);
+if (app == null) {
+  LOG.warn("couldn't find application " + appID + " while processing"
+  + " FINISH_APPS event");
+  continue;
+}
 String diagnostic = "";
 if (appsFinishedEvent.getReason() == 
CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN) {
   diagnostic = "Application killed on shutdown";



[14/50] [abbrv] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-27 Thread vvasudev
HDFS-8898. Create API and command-line argument to get quota and quota usage 
without detailed content summary. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2fd19b96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2fd19b96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2fd19b96

Branch: refs/heads/YARN-3926
Commit: 2fd19b9674420e025af54a5bed12eb96478f8c48
Parents: d6258b3
Author: Kihwal Lee 
Authored: Thu Jan 21 12:04:14 2016 -0600
Committer: Kihwal Lee 
Committed: Fri Jan 22 09:10:06 2016 -0600

--
 .../org/apache/hadoop/fs/ContentSummary.java| 241 -
 .../java/org/apache/hadoop/fs/FileSystem.java   |   7 +
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 359 +++
 .../java/org/apache/hadoop/fs/shell/Count.java  |  37 +-
 .../hadoop/fs/viewfs/ChRootedFileSystem.java|   7 +-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java |  15 +-
 .../src/site/markdown/FileSystemShell.md|  11 +-
 .../org/apache/hadoop/cli/CLITestHelper.java|   2 +-
 .../apache/hadoop/fs/TestFilterFileSystem.java  |   3 +-
 .../org/apache/hadoop/fs/TestHarFileSystem.java |   1 +
 .../org/apache/hadoop/fs/TestQuotaUsage.java| 146 
 .../org/apache/hadoop/fs/shell/TestCount.java   | 109 +-
 .../src/test/resources/testConf.xml |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  27 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  19 +
 .../hadoop/hdfs/protocol/ClientProtocol.java|  14 +
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  79 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  10 +
 .../src/main/proto/hdfs.proto   |  11 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +
 .../server/namenode/FSDirStatAndListingOp.java  |  53 +++
 .../hdfs/server/namenode/FSNamesystem.java  |  31 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../fs/viewfs/TestViewFsDefaultValue.java   |  53 ++-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  66 +++-
 .../server/namenode/ha/TestQuotasWithHA.java|  14 +-
 28 files changed, 1125 insertions(+), 232 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd19b96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 678ce7f..3dedbcc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -30,26 +30,15 @@ import org.apache.hadoop.util.StringUtils;
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class ContentSummary implements Writable{
+public class ContentSummary extends QuotaUsage implements Writable{
   private long length;
   private long fileCount;
   private long directoryCount;
-  private long quota;
-  private long spaceConsumed;
-  private long spaceQuota;
-  private long typeConsumed[];
-  private long typeQuota[];
 
-  public static class Builder{
+  /** We don't use generics. Instead override spaceConsumed and other methods
+  in order to keep backward compatibility. */
+  public static class Builder extends QuotaUsage.Builder {
 public Builder() {
-  this.quota = -1;
-  this.spaceQuota = -1;
-
-  typeConsumed = new long[StorageType.values().length];
-  typeQuota = new long[StorageType.values().length];
-  for (int i = 0; i < typeQuota.length; i++) {
-typeQuota[i] = -1;
-  }
 }
 
 public Builder length(long length) {
@@ -67,58 +56,57 @@ public class ContentSummary implements Writable{
   return this;
 }
 
+@Override
 public Builder quota(long quota){
-  this.quota = quota;
+  super.quota(quota);
   return this;
 }
 
+@Override
 public Builder spaceConsumed(long spaceConsumed) {
-  this.spaceConsumed = spaceConsumed;
+  super.spaceConsumed(spaceConsumed);
   return this;
 }
 
+@Override
 public Builder spaceQuota(long spaceQuota) {
-  this.spaceQuota = spaceQuota;
+  super.spaceQuota(spaceQuota);
   return this;
 }
 
+@Override
 public Builder typeConsumed(long typeConsumed[]) {
-  for (int i = 0; i < typeConsumed.length; i++) {
-

[49/50] [abbrv] hadoop git commit: HDFS-9654. Code refactoring for HDFS-8578.

2016-01-27 Thread vvasudev
HDFS-9654. Code refactoring for HDFS-8578.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/662e17b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/662e17b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/662e17b4

Branch: refs/heads/YARN-3926
Commit: 662e17b46a0f41ade6a304e12925b70b5d09fc2f
Parents: dca0dc8
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:56:01 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 10:58:03 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/common/Storage.java  |   3 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 131 +
 .../hdfs/server/datanode/DataStorage.java   | 282 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  15 +
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  48 +++-
 .../hdfs/server/datanode/TestDataStorage.java   |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 11 files changed, 297 insertions(+), 200 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7e75558..a51dc15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2665,6 +2665,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-9634. webhdfs client side exceptions don't provide enough details
 (Eric Payne via kihwal)
 
+HDFS-9654. Code refactoring for HDFS-8578.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 7b4b571..41719b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -640,7 +640,8 @@ public abstract class Storage extends StorageInfo {
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
-throw new IOException("Unexpected FS state: " + curState);
+throw new IOException("Unexpected FS state: " + curState
++ " for storage directory: " + rootPath);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/662e17b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index 1bb..acf10f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -18,10 +18,21 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -34,18 +45,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import 

[40/50] [abbrv] hadoop git commit: YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and 
testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c01bee01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c01bee01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c01bee01

Branch: refs/heads/YARN-3926
Commit: c01bee010832ca31d8e60e5461181cdf05140602
Parents: 4efdf3a
Author: Rohith Sharma K S 
Authored: Wed Jan 27 08:23:02 2016 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jan 27 08:23:02 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../server/resourcemanager/rmapp/TestRMAppTransitions.java  | 9 +
 2 files changed, 8 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01bee01/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 435eb68..2fbecdb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -162,6 +162,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4612. Fix rumen and scheduler load simulator handle killed tasks 
properly.
 (Ming Ma via xgong)
 
+YARN-4573. Fix test failure in TestRMAppTransitions#testAppRunningKill and
+testAppKilledKilled. (Takashi Ohnishi via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c01bee01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index f2f09de..293c0b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -275,7 +275,7 @@ public class TestRMAppTransitions {
   // Test expected newly created app state
   private static void testAppStartState(ApplicationId applicationId, 
   String user, String name, String queue, RMApp application) {
-Assert.assertTrue("application start time is not greater then 0", 
+Assert.assertTrue("application start time is not greater than 0",
 application.getStartTime() > 0);
 Assert.assertTrue("application start time is before currentTime", 
 application.getStartTime() <= System.currentTimeMillis());
@@ -300,7 +300,7 @@ public class TestRMAppTransitions {
 
   // test to make sure times are set when app finishes
   private static void assertStartTimeSet(RMApp application) {
-Assert.assertTrue("application start time is not greater then 0", 
+Assert.assertTrue("application start time is not greater than 0",
 application.getStartTime() > 0);
 Assert.assertTrue("application start time is before currentTime", 
 application.getStartTime() <= System.currentTimeMillis());
@@ -319,9 +319,9 @@ public class TestRMAppTransitions {
   // test to make sure times are set when app finishes
   private void assertTimesAtFinish(RMApp application) {
 assertStartTimeSet(application);
-Assert.assertTrue("application finish time is not greater then 0",
+Assert.assertTrue("application finish time is not greater than 0",
 (application.getFinishTime() > 0));
-Assert.assertTrue("application finish time is not >= then start time",
+Assert.assertTrue("application finish time is not >= than start time",
 (application.getFinishTime() >= application.getStartTime()));
   }
 
@@ -364,6 +364,7 @@ public class TestRMAppTransitions {
 application.getCurrentAppAttempt().handle(
 new 
RMAppAttemptEvent(application.getCurrentAppAttempt().getAppAttemptId(),
 RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED));
+rmDispatcher.await();
   }
 
   protected RMApp testCreateAppNewSaving(



[17/50] [abbrv] hadoop git commit: HDFS-9525. hadoop utilities need to support provided delegation tokens (HeeSoo Kim via aw)

2016-01-27 Thread vvasudev
HDFS-9525. hadoop utilities need to support provided delegation tokens (HeeSoo 
Kim via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d22c4239
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d22c4239
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d22c4239

Branch: refs/heads/YARN-3926
Commit: d22c4239a40a1c7ed49c06038138f0e3f387b4a0
Parents: 95363bc
Author: Allen Wittenauer 
Authored: Fri Jan 22 12:15:22 2016 -0800
Committer: Allen Wittenauer 
Committed: Fri Jan 22 12:15:22 2016 -0800

--
 .../fs/CommonConfigurationKeysPublic.java   |  3 ++
 .../hadoop/security/UserGroupInformation.java   | 22 +
 .../src/main/resources/core-default.xml |  6 +++
 .../security/TestUserGroupInformation.java  | 48 +++-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 +++---
 .../hdfs/web/resources/DelegationParam.java |  5 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |  5 +-
 8 files changed, 92 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22c4239/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index c9f758b..648ad59 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -311,6 +311,9 @@ public class CommonConfigurationKeysPublic {
   /** See core-default.xml */
   public static final String HADOOP_SECURITY_DNS_NAMESERVER_KEY =
 "hadoop.security.dns.nameserver";
+  /** See core-default.xml */
+  public static final String HADOOP_TOKEN_FILES =
+  "hadoop.token.files";
 
   /** See core-default.xml */
   public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22c4239/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 28014bf..d33e1aa 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.security;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
+import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_TOKEN_FILES;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 import java.io.File;
@@ -70,6 +71,7 @@ import 
org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -821,6 +823,26 @@ public class UserGroupInformation {
   }
   loginUser = proxyUser == null ? realUser : createProxyUser(proxyUser, 
realUser);
 
+  String tokenFileLocation = System.getProperty(HADOOP_TOKEN_FILES);
+  if (tokenFileLocation == null) {
+tokenFileLocation = conf.get(HADOOP_TOKEN_FILES);
+  }
+  if (tokenFileLocation != null) {
+for (String tokenFileName:
+ StringUtils.getTrimmedStrings(tokenFileLocation)) {
+  if (tokenFileName.length() > 0) {
+File tokenFile = new File(tokenFileName);
+if (tokenFile.exists() && tokenFile.isFile()) {
+  Credentials cred = Credentials.readTokenStorageFile(
+  tokenFile, conf);
+  loginUser.addCredentials(cred);
+} else {
+  LOG.info("tokenFile("+tokenFileName+") does not exist");
+

[41/50] [abbrv] hadoop git commit: HADOOP-12718. Incorrect error message by fs -put local dir without permission. (John Zhuge via Yongjun Zhang)

2016-01-27 Thread vvasudev
HADOOP-12718. Incorrect error message by fs -put local dir without permission. 
(John Zhuge via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97056c33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97056c33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97056c33

Branch: refs/heads/YARN-3926
Commit: 97056c3355810a803f07baca89b89e2bf6bb7201
Parents: c01bee0
Author: Yongjun Zhang 
Authored: Wed Jan 27 08:04:25 2016 -0800
Committer: Yongjun Zhang 
Committed: Wed Jan 27 08:04:25 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java  | 5 +
 2 files changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97056c33/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5121a83..3b8376f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1648,6 +1648,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12731. Remove useless boxing/unboxing code.
 (Kousuke Saruta via aajisaka)
 
+HADOOP-12718. Incorrect error message by fs -put local dir without
+permission. (John Zhuge via Yongjun Zhang)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97056c33/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 352b27a..3e984e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -33,6 +33,7 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
@@ -463,6 +464,10 @@ public class RawLocalFileSystem extends FileSystem {
 if (localf.isDirectory()) {
   String[] names = localf.list();
   if (names == null) {
+if (!localf.canRead()) {
+  throw new AccessDeniedException("cannot open directory " + f +
+  ": Permission denied");
+}
 return null;
   }
   results = new FileStatus[names.length];



[32/50] [abbrv] hadoop git commit: HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by Mingliang Liu.

2016-01-27 Thread vvasudev
HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently. Contributed by 
Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8650fea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8650fea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8650fea

Branch: refs/heads/YARN-3926
Commit: e8650fea1f0837026cbb36ae8bf51c6133259809
Parents: ec4d2d9
Author: Jitendra Pandey 
Authored: Mon Jan 25 15:42:25 2016 -0800
Committer: Jitendra Pandey 
Committed: Mon Jan 25 16:08:46 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../apache/hadoop/hdfs/TestLeaseRecovery2.java  | 48 ++--
 2 files changed, 37 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f35ae3d..68d5de6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -2690,6 +2690,9 @@ Release 2.7.3 - UNRELEASED
 HDFS-9625. set replication for empty file failed when set storage policy
 (DENG FEI via vinayakumarb)
 
+HDFS-9672. o.a.h.hdfs.TestLeaseRecovery2 fails intermittently (Mingliang 
Liu
+via jitendra)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8650fea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 13e8644..e8cd476 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -21,11 +21,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -49,10 +53,11 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestLeaseRecovery2 {
   
@@ -85,12 +90,15 @@ public class TestLeaseRecovery2 {
* 
* @throws IOException
*/
-  @BeforeClass
-  public static void startUp() throws IOException {
+  @Before
+  public void startUp() throws IOException {
 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 
-cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
+cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(5)
+.checkExitOnShutdown(false)
+.build();
 cluster.waitActive();
 dfs = cluster.getFileSystem();
   }
@@ -99,8 +107,8 @@ public class TestLeaseRecovery2 {
* stop the cluster
* @throws IOException
*/
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 if (cluster != null) {
   IOUtils.closeStream(dfs);
   cluster.shutdown();
@@ -419,17 +427,17 @@ public class TestLeaseRecovery2 {
* 
* @throws Exception
*/
-  @Test
+  @Test(timeout = 3)
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
 

[23/50] [abbrv] hadoop git commit: HDFS-9653. Added blocks pending deletion report to dfsadmin. (Weiwei Yang via eyang)

2016-01-27 Thread vvasudev
HDFS-9653.  Added blocks pending deletion report to dfsadmin.
(Weiwei Yang via eyang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10a2bc0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10a2bc0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10a2bc0d

Branch: refs/heads/YARN-3926
Commit: 10a2bc0dffaece216eb9a6bac3236a086b9ece31
Parents: 10dc2c0
Author: Eric Yang 
Authored: Sun Jan 24 14:19:49 2016 -0800
Committer: Eric Yang 
Committed: Sun Jan 24 14:19:49 2016 -0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 34 ++--
 .../hadoop/hdfs/DistributedFileSystem.java  |  9 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  4 ++-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  7 
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../blockmanagement/HeartbeatManager.java   |  1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  2 ++
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  2 ++
 .../TestPendingInvalidateBlock.java | 19 +++
 10 files changed, 71 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a2bc0d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 277990f..02ef47e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1957,10 +1957,11 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  private long[] callGetStats() throws IOException {
+  private long getStateByIndex(int stateIndex) throws IOException {
 checkOpen();
 try (TraceScope ignored = tracer.newScope("getStats")) {
-  return namenode.getStats();
+  long[] states =  namenode.getStats();
+  return states.length > stateIndex ? states[stateIndex] : -1;
 }
   }
 
@@ -1968,8 +1969,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @see ClientProtocol#getStats()
*/
   public FsStatus getDiskStatus() throws IOException {
-long rawNums[] = callGetStats();
-return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
+return new FsStatus(getStateByIndex(0),
+getStateByIndex(1), getStateByIndex(2));
   }
 
   /**
@@ -1978,7 +1979,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @throws IOException
*/
   public long getMissingBlocksCount() throws IOException {
-return callGetStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
+return getStateByIndex(ClientProtocol.
+GET_STATS_MISSING_BLOCKS_IDX);
   }
 
   /**
@@ -1987,8 +1989,17 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @throws IOException
*/
   public long getMissingReplOneBlocksCount() throws IOException {
-return callGetStats()[ClientProtocol.
-GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX];
+return getStateByIndex(ClientProtocol.
+GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX);
+  }
+
+  /**
+   * Returns count of blocks pending on deletion.
+   * @throws IOException
+   */
+  public long getPendingDeletionBlocksCount() throws IOException {
+return getStateByIndex(ClientProtocol.
+GET_STATS_PENDING_DELETION_BLOCKS_IDX);
   }
 
   /**
@@ -1996,7 +2007,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @throws IOException
*/
   public long getUnderReplicatedBlocksCount() throws IOException {
-return callGetStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
+return getStateByIndex(ClientProtocol.
+GET_STATS_UNDER_REPLICATED_IDX);
   }
 
   /**
@@ -2004,7 +2016,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @throws IOException
*/
   public long getCorruptBlocksCount() throws IOException {
-return callGetStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
+return getStateByIndex(ClientProtocol.
+GET_STATS_CORRUPT_BLOCKS_IDX);
   }
 
   /**
@@ -2014,7 +2027,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
* @throws IOException
*/
   public long getBytesInFutureBlocks() throws IOException {
-return callGetStats()[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX];
+return getStateByIndex(ClientProtocol.
+   

[05/50] [abbrv] hadoop git commit: YARN-4584. RM startup failure when AM attempts greater than max-attempts. (Bibin A Chundatt via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4584. RM startup failure when AM attempts greater than max-attempts. 
(Bibin A Chundatt via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3066810
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3066810
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3066810

Branch: refs/heads/YARN-3926
Commit: e30668106dc246f68db36fbd1f2db6ec08cd96f2
Parents: b2ffcc2
Author: Rohith Sharma K S 
Authored: Fri Jan 22 10:14:46 2016 +0530
Committer: Rohith Sharma K S 
Committed: Fri Jan 22 10:14:46 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/resourcemanager/rmapp/RMAppImpl.java | 23 +---
 .../server/resourcemanager/TestRMRestart.java   | 58 
 3 files changed, 77 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3066810/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f789bcb..a7a63b1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -137,6 +137,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4611. Fix scheduler load simulator to support multi-layer network
 location. (Ming Ma via xgong)
 
+YARN-4584. RM startup failure when AM attempts greater than max-attempts.
+(Bibin A Chundatt via rohithsharmaks)
+
 Release 2.8.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3066810/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 6ecc7d3..1a390df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -841,7 +841,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 this.startTime = appState.getStartTime();
 this.callerContext = appState.getCallerContext();
 // If interval > 0, some attempts might have been deleted.
-if (submissionContext.getAttemptFailuresValidityInterval() > 0) {
+if (this.attemptFailuresValidityInterval > 0) {
   this.firstAttemptIdInStateStore = appState.getFirstAttemptId();
   this.nextAttemptId = firstAttemptIdInStateStore;
 }
@@ -1341,7 +1341,9 @@ public class RMAppImpl implements RMApp, Recoverable {
   + "is " + numberOfFailure + ". The max attempts is "
   + app.maxAppAttempts);
 
-  removeExcessAttempts(app);
+  if (app.attemptFailuresValidityInterval > 0) {
+removeExcessAttempts(app);
+  }
 
   if (!app.submissionContext.getUnmanagedAM()
   && numberOfFailure < app.maxAppAttempts) {
@@ -1381,15 +1383,22 @@ public class RMAppImpl implements RMApp, Recoverable {
 }
 
 private void removeExcessAttempts(RMAppImpl app) {
-  while (app.nextAttemptId - app.firstAttemptIdInStateStore
-  > app.maxAppAttempts) {
+  while (app.nextAttemptId
+  - app.firstAttemptIdInStateStore > app.maxAppAttempts) {
 // attempts' first element is oldest attempt because it is a
 // LinkedHashMap
 ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
 app.getApplicationId(), app.firstAttemptIdInStateStore);
-app.firstAttemptIdInStateStore++;
-LOG.info("Remove attempt from state store : " + attemptId);
-app.rmContext.getStateStore().removeApplicationAttempt(attemptId);
+RMAppAttempt rmAppAttempt = app.getRMAppAttempt(attemptId);
+long endTime = app.systemClock.getTime();
+if (rmAppAttempt.getFinishTime() < (endTime
+- app.attemptFailuresValidityInterval)) {
+  app.firstAttemptIdInStateStore++;
+  LOG.info("Remove attempt from state store : " + attemptId);
+  app.rmContext.getStateStore().removeApplicationAttempt(attemptId);
+} else {
+  break;
+}
   }
 }
   }


[29/50] [abbrv] hadoop git commit: HDFS-9094. Add command line option to ask NameNode reload configuration. (Contributed by Xiaobing Zhou)

2016-01-27 Thread vvasudev
HDFS-9094. Add command line option to ask NameNode reload configuration. 
(Contributed by Xiaobing Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d62b4a4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d62b4a4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d62b4a4d

Branch: refs/heads/YARN-3926
Commit: d62b4a4de75edb840df6634f49cb4beb74e3fb07
Parents: 6eacdea
Author: Arpit Agarwal 
Authored: Mon Jan 25 12:17:05 2016 -0800
Committer: Arpit Agarwal 
Committed: Mon Jan 25 12:17:05 2016 -0800

--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |   8 +
 .../hdfs/protocol/ReconfigurationProtocol.java  |   4 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../ReconfigurationProtocolServerSideUtils.java |   4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  35 +++
 .../hdfs/server/protocol/NamenodeProtocols.java |   2 +
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 254 +--
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  | 162 
 8 files changed, 350 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b4a4d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 62c5d81..8f6ed14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -38,9 +38,11 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.protocolPB.ReconfigurationProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
@@ -496,6 +498,12 @@ public class DFSUtilClient {
 return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
   }
 
+  public static ReconfigurationProtocol createReconfigurationProtocolProxy(
+  InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+  SocketFactory factory) throws IOException {
+return new ReconfigurationProtocolTranslatorPB(addr, ticket, conf, 
factory);
+  }
+
   /**
* Creates a new KeyProvider from the given Configuration.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d62b4a4d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
index 75dc877..8370438 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReconfigurationProtocol.java
@@ -25,6 +25,7 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
+import org.apache.hadoop.io.retry.Idempotent;
 
 /**
  * ReconfigurationProtocol is used by HDFS admin to reload configuration
@@ -39,16 +40,19 @@ public interface ReconfigurationProtocol {
   /**
* Asynchronously reload configuration on disk and apply changes.
*/
+  @Idempotent
   void startReconfiguration() throws IOException;
 
   /**
* Get the status of the previously issued reconfig task.
* @see {@link org.apache.hadoop.conf.ReconfigurationTaskStatus}.
*/
+  @Idempotent
   ReconfigurationTaskStatus getReconfigurationStatus() throws IOException;
 
   /**
* Get a list of allowed properties for reconfiguration.

[21/50] [abbrv] hadoop git commit: YARN-4614. Fix random failure in TestApplicationPriority#testApplicationPriorityAllocationWithChangeInPriority. (Sunil G via rohithsharmaks)

2016-01-27 Thread vvasudev
YARN-4614. Fix random failure in 
TestApplicationPriority#testApplicationPriorityAllocationWithChangeInPriority. 
(Sunil G via rohithsharmaks)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99829eb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99829eb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99829eb2

Branch: refs/heads/YARN-3926
Commit: 99829eb221482928d8a1b148ae3c802cc7c9253e
Parents: 618bfd6
Author: rohithsharmaks 
Authored: Sat Jan 23 07:56:15 2016 +0530
Committer: rohithsharmaks 
Committed: Sat Jan 23 07:56:57 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../scheduler/capacity/TestApplicationPriority.java   | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99829eb2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8e87f4a..1e9f83c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1319,6 +1319,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4605. Spelling mistake in the help message of "yarn 
applicationattempt"
 command. (Weiwei Yang via aajisaka)
 
+YARN-4614. Fix random failure in 
TestApplicationPriority#testApplicationPriority
+AllocationWithChangeInPriority. (Sunil G via rohithsharmaks)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99829eb2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
index e32a33b..1569a12 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java
@@ -490,7 +490,6 @@ public class TestApplicationPriority {
 RMApp app2 = rm.submitApp(1 * GB, appPriority2);
 
 // kick the scheduler, 1 GB which was free is given to AM of App2
-nm1.nodeHeartbeat(true);
 MockAM am2 = MockRM.launchAM(app2, rm, nm1);
 am2.registerAppAttempt();
 



[28/50] [abbrv] hadoop git commit: HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently. Contributed by Xiao Chen.

2016-01-27 Thread vvasudev
HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently. 
Contributed by Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eacdea0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eacdea0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eacdea0

Branch: refs/heads/YARN-3926
Commit: 6eacdea0e475b4fff91cedce5005a7c11749cf64
Parents: 56a0c17
Author: Walter Su 
Authored: Mon Jan 25 19:30:04 2016 +0800
Committer: Walter Su 
Committed: Mon Jan 25 19:30:04 2016 +0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop/crypto/key/TestValueQueue.java   | 65 +++-
 2 files changed, 52 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eacdea0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2fc8ab4..9606296 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -2496,6 +2496,9 @@ Release 2.6.4 - UNRELEASED
 HADOOP-12736. TestTimedOutTestsListener#testThreadDumpAndDeadlocks
 sometimes times out. (Xiao Chen via aajisaka)
 
+HADOOP-12715. TestValueQueue#testgetAtMostPolicyALL fails intermittently.
+(Xiao Chen via waltersu4549)
+
 Release 2.6.3 - 2015-12-17
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eacdea0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
index 8e3a093..5eae9a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestValueQueue.java
@@ -19,18 +19,24 @@ package org.apache.hadoop.crypto.key;
 
 import java.io.IOException;
 import java.util.Queue;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.crypto.key.kms.ValueQueue;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
 import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.google.common.base.Supplier;
 import com.google.common.collect.Sets;
 
 public class TestValueQueue {
+  Logger LOG = LoggerFactory.getLogger(TestValueQueue.class);
 
   private static class FillInfo {
 final int num;
@@ -60,7 +66,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initially filled to "numInitValues"
*/
-  @Test
+  @Test(timeout=3)
   public void testInitFill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -74,7 +80,7 @@ public class TestValueQueue {
   /**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
-  @Test
+  @Test(timeout=3)
   public void testWarmUp() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -97,7 +103,7 @@ public class TestValueQueue {
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
-  @Test
+  @Test(timeout=3)
   public void testRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -116,7 +122,7 @@ public class TestValueQueue {
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
-  @Test
+  @Test(timeout=3)
   public void testNoRefill() throws Exception {
 MockFiller filler = new MockFiller();
 ValueQueue vq =
@@ -131,29 +137,56 @@ public class TestValueQueue {
   /**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
-  @Test
+  @Test(timeout=3)
   public void testgetAtMostPolicyALL() throws Exception {
 MockFiller filler = new MockFiller();
-ValueQueue vq =
+final ValueQueue vq =
 new ValueQueue(10, 0.1f, 300, 1,
 SyncGenerationPolicy.ALL, filler);
 Assert.assertEquals("test", vq.getNext("k1"));
 Assert.assertEquals(1, filler.getTop().num);
-// Drain completely
-Assert.assertEquals(10, 

[47/50] [abbrv] hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-01-27 Thread vvasudev
HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec25c7f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec25c7f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec25c7f9

Branch: refs/heads/YARN-3926
Commit: ec25c7f9c7e60c077d8c4143253c20445fcdaecf
Parents: 3a95713
Author: Jing Zhao 
Authored: Wed Jan 27 16:34:40 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 16:34:40 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  4 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 15 files changed, 119 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec25c7f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  public long upgradeGenerationStampToV2() {
-Preconditions.checkState(generationStampV2.getCurrentValue() ==
+  

[25/50] [abbrv] hadoop git commit: HADOOP-12731. Remove useless boxing/unboxing code. Contributed by Kousuke Saruta.

2016-01-27 Thread vvasudev
HADOOP-12731. Remove useless boxing/unboxing code. Contributed by Kousuke 
Saruta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736eb17a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736eb17a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736eb17a

Branch: refs/heads/YARN-3926
Commit: 736eb17a796a1c1ad5f4db2c6a64f6752db7bec3
Parents: 2b83329
Author: Akira Ajisaka 
Authored: Mon Jan 25 13:47:29 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 13:47:29 2016 +0900

--
 .../util/TestZKSignerSecretProvider.java|  2 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../java/org/apache/hadoop/fs/FileContext.java  |  4 +-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  4 +-
 .../apache/hadoop/io/AbstractMapWritable.java   | 54 +++-
 .../hadoop/security/ShellBasedIdMapping.java|  9 ++--
 .../java/org/apache/hadoop/util/bloom/Key.java  |  4 +-
 .../apache/hadoop/ha/TestSshFenceByTcpPort.java |  8 +--
 .../apache/hadoop/test/GenericTestUtils.java|  2 +-
 .../gridmix/DistributedCacheEmulator.java   |  4 +-
 .../hadoop/mapred/gridmix/CommonJobTest.java|  2 +-
 .../mapred/gridmix/TestPseudoLocalFs.java   |  2 +-
 .../apache/hadoop/streaming/DelayEchoApp.java   |  2 +-
 13 files changed, 43 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
index 4f8b5ae..8211314 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java
@@ -35,7 +35,7 @@ public class TestZKSignerSecretProvider {
 
   // rollover every 2 sec
   private final int timeout = 4000;
-  private final long rolloverFrequency = Long.valueOf(timeout / 2);
+  private final long rolloverFrequency = timeout / 2;
 
   @Before
   public void setup() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 36cac2f..3db68fb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1645,6 +1645,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12730. Hadoop streaming -mapper and -reducer options are wrongly
 documented as required. (Kengo Seki via aajisaka)
 
+HADOOP-12731. Remove useless boxing/unboxing code.
+(Kousuke Saruta via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 2456154..d96abad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -761,7 +761,7 @@ public class FileContext {
   @Override
   public Boolean next(final AbstractFileSystem fs, final Path p) 
 throws IOException, UnresolvedLinkException {
-return Boolean.valueOf(fs.delete(p, recursive));
+return fs.delete(p, recursive);
   }
 }.resolve(this, absF);
   }
@@ -895,7 +895,7 @@ public class FileContext {
   @Override
   public Boolean next(final AbstractFileSystem fs, final Path p) 
 throws IOException, UnresolvedLinkException {
-return Boolean.valueOf(fs.setReplication(p, replication));
+return fs.setReplication(p, replication);
   }
 }.resolve(this, absF);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/736eb17a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

[42/50] [abbrv] hadoop git commit: HADOOP-12492. maven install triggers bats test (aw)

2016-01-27 Thread vvasudev
HADOOP-12492. maven install triggers bats test (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79d7949f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79d7949f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79d7949f

Branch: refs/heads/YARN-3926
Commit: 79d7949fbb19928e0cae4f6b5dd9f1af82242f53
Parents: 97056c3
Author: Allen Wittenauer 
Authored: Wed Jan 27 11:27:27 2016 -0800
Committer: Allen Wittenauer 
Committed: Wed Jan 27 11:27:27 2016 -0800

--
 hadoop-common-project/hadoop-common/pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d7949f/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 68ad350..7e4d090 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -1028,7 +1028,9 @@
 
   shelltest
   
-true
+
+  !skipTests
+
   
   
 
@@ -1037,7 +1039,7 @@
 
 
 common-test-bats-driver
-process-test-classes
+test
 
 run
 



[27/50] [abbrv] hadoop git commit: YARN-4592. Remove unused GetContainerStatus proto. Contributed by Chang Li.

2016-01-27 Thread vvasudev
YARN-4592. Remove unused GetContainerStatus proto. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56a0c175
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56a0c175
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56a0c175

Branch: refs/heads/YARN-3926
Commit: 56a0c175082b2b62698f65c1769a64224b3fb821
Parents: 6432279
Author: Akira Ajisaka 
Authored: Mon Jan 25 15:20:29 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 15:20:29 2016 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 2 ++
 .../hadoop-yarn-api/src/main/proto/yarn_service_protos.proto  | 7 ---
 2 files changed, 2 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a0c175/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6674194..8ece214 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1325,6 +1325,8 @@ Release 2.8.0 - UNRELEASED
 YARN-4614. Fix random failure in 
TestApplicationPriority#testApplicationPriority
 AllocationWithChangeInPriority. (Sunil G via rohithsharmaks)
 
+YARN-4592. Remove unused GetContainerStatus proto. (Chang Li via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56a0c175/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index 115df9a..eae840b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -262,13 +262,6 @@ message StopContainerRequestProto {
 message StopContainerResponseProto {
 }
 
-message GetContainerStatusRequestProto {
-  optional ContainerIdProto container_id = 1;
-}
-
-message GetContainerStatusResponseProto {
-  optional ContainerStatusProto status = 1;
-}
 
  bulk API records
 message StartContainersRequestProto {



[06/50] [abbrv] hadoop git commit: HDFS-9618. Fix mismatch between log level and guard in BlockManager#computeRecoveryWorkForBlocks (iwasakims)

2016-01-27 Thread vvasudev
HDFS-9618. Fix mismatch between log level and guard in 
BlockManager#computeRecoveryWorkForBlocks (iwasakims)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae9c61ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae9c61ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae9c61ff

Branch: refs/heads/YARN-3926
Commit: ae9c61ff0a90b070a5b7b2e7160d726e92c8eacf
Parents: e306681
Author: Masatake Iwasaki 
Authored: Fri Jan 22 14:11:48 2016 +0900
Committer: Masatake Iwasaki 
Committed: Fri Jan 22 14:11:48 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 5 ++---
 2 files changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9c61ff/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 140be77..8c26ee7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -951,6 +951,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9601. NNThroughputBenchmark.BlockReportStats should handle
 NotReplicatedYetException on adding block (iwasakims)
 
+HDFS-9618. Fix mismatch between log level and guard in
+BlockManager#computeRecoveryWorkForBlocks (iwasakims)
+
 HDFS-9621. getListing wrongly associates Erasure Coding policy to 
pre-existing
 replicated files under an EC directory. (jing9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae9c61ff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bf63708..d255471 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1488,7 +1488,7 @@ public class BlockManager implements BlockStatsMXBean {
   namesystem.writeUnlock();
 }
 
-if (blockLog.isInfoEnabled()) {
+if (blockLog.isDebugEnabled()) {
   // log which blocks have been scheduled for replication
   for(BlockRecoveryWork rw : recovWork){
 DatanodeStorageInfo[] targets = rw.getTargets();
@@ -1502,8 +1502,7 @@ public class BlockManager implements BlockStatsMXBean {
   rw.getBlock(), targetList);
 }
   }
-}
-if (blockLog.isDebugEnabled()) {
+
   blockLog.debug("BLOCK* neededReplications = {} pendingReplications = {}",
   neededReplications.size(), pendingReplications.size());
 }



[20/50] [abbrv] hadoop git commit: YARN-4496. Improve HA ResourceManager Failover detection on the client. Contributed by Jian He

2016-01-27 Thread vvasudev
YARN-4496. Improve HA ResourceManager Failover detection on the client.
Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/618bfd6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/618bfd6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/618bfd6a

Branch: refs/heads/YARN-3926
Commit: 618bfd6ac2a5b62d39e9bed80f75362bafc0ef28
Parents: 46e5ea8
Author: Xuan 
Authored: Fri Jan 22 18:20:38 2016 -0800
Committer: Xuan 
Committed: Fri Jan 22 18:20:38 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 ...stHedgingRequestRMFailoverProxyProvider.java |  98 ++
 .../ConfiguredRMFailoverProxyProvider.java  |   6 +-
 .../org/apache/hadoop/yarn/client/RMProxy.java  |  33 ++--
 .../RequestHedgingRMFailoverProxyProvider.java  | 194 +++
 .../nodemanager/TestNodeStatusUpdater.java  |   7 +-
 .../hadoop/yarn/server/MiniYARNCluster.java |   1 +
 7 files changed, 323 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/618bfd6a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f840a9e..8e87f4a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -104,6 +104,9 @@ Release 2.9.0 - UNRELEASED
 YARN-4603. FairScheduler should mention user requested queuename in error 
 message when failed in queue ACL check. (Tao Jie via kasha)
 
+YARN-4496. Improve HA ResourceManager Failover detection on the client.
+(Jian He via xgong)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/618bfd6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java
new file mode 100644
index 000..6fd6591
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestHedgingRequestRMFailoverProxyProvider.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+
+public class TestHedgingRequestRMFailoverProxyProvider {
+
+  @Test
+  public void testHedgingRequestProxyProvider() throws Exception {
+final MiniYARNCluster cluster =
+new MiniYARNCluster("testHedgingRequestProxyProvider", 5, 0, 1, 1);
+Configuration conf = new YarnConfiguration();
+
+conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
+conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
+conf.set(YarnConfiguration.RM_CLUSTER_ID, "cluster1");
+conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2,rm3,rm4,rm5");
+
+conf.set(YarnConfiguration.CLIENT_FAILOVER_PROXY_PROVIDER,
+RequestHedgingRMFailoverProxyProvider.class.getName());
+conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS,
+2000);
+
+HATestUtil.setRpcAddressForRM("rm1", 1, conf);
+HATestUtil.setRpcAddressForRM("rm2", 2, conf);
+HATestUtil.setRpcAddressForRM("rm3", 3, conf);
+HATestUtil.setRpcAddressForRM("rm4", 4, conf);
+

[18/50] [abbrv] hadoop git commit: YARN-4371. "yarn application -kill" should take multiple application ids. Contributed by Sunil G

2016-01-27 Thread vvasudev
YARN-4371. "yarn application -kill" should take multiple application ids. 
Contributed by Sunil G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e91e8b71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e91e8b71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e91e8b71

Branch: refs/heads/YARN-3926
Commit: e91e8b711c68273460b36557fc37fdfc86be097b
Parents: d22c423
Author: Jason Lowe 
Authored: Fri Jan 22 21:36:15 2016 +
Committer: Jason Lowe 
Committed: Fri Jan 22 21:36:15 2016 +

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  57 ++--
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 141 +--
 3 files changed, 183 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91e8b71/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a4f9874..0ceb905 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -733,6 +733,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-4524. Cleanup AppSchedulingInfo. (Karthik Kambatla via wangda)
 
+YARN-4371. "yarn application -kill" should take multiple application ids
+(Sunil G via jlowe)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e91e8b71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 77e0688..caa4d46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -23,6 +23,7 @@ import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
 import java.nio.charset.Charset;
 import java.text.DecimalFormat;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
@@ -103,7 +104,6 @@ public class ApplicationCLI extends YarnCLI {
   + "Supports optional use of -appTypes to filter applications "
   + "based on application type, "
   + "and -appStates to filter applications based on application 
state.");
-  opts.addOption(KILL_CMD, true, "Kills the application.");
   opts.addOption(MOVE_TO_QUEUE_CMD, true, "Moves the application to a "
   + "different queue.");
   opts.addOption(QUEUE_CMD, true, "Works with the movetoqueue command to"
@@ -127,7 +127,12 @@ public class ApplicationCLI extends YarnCLI {
   opts.addOption(UPDATE_PRIORITY, true,
   "update priority of an application. ApplicationId can be"
   + " passed using 'appId' option.");
-  opts.getOption(KILL_CMD).setArgName("Application ID");
+  Option killOpt = new Option(KILL_CMD, true, "Kills the application. "
+  + "Set of applications can be provided separated with space");
+  killOpt.setValueSeparator(' ');
+  killOpt.setArgs(Option.UNLIMITED_VALUES);
+  killOpt.setArgName("Application ID");
+  opts.addOption(killOpt);
   opts.getOption(MOVE_TO_QUEUE_CMD).setArgName("Application ID");
   opts.getOption(QUEUE_CMD).setArgName("Queue Name");
   opts.getOption(STATUS_CMD).setArgName("Application ID");
@@ -239,15 +244,11 @@ public class ApplicationCLI extends YarnCLI {
 listContainers(cliParser.getOptionValue(LIST_CMD));
   }
 } else if (cliParser.hasOption(KILL_CMD)) {
-  if (args.length != 3) {
+  if (args.length < 3 || hasAnyOtherCLIOptions(cliParser, opts, KILL_CMD)) 
{
 printUsage(title, opts);
 return exitCode;
   }
-  try{
-killApplication(cliParser.getOptionValue(KILL_CMD));
-  } catch (ApplicationNotFoundException e) {
-return exitCode;
-  }
+  return killApplication(cliParser.getOptionValues(KILL_CMD));
 } else if (cliParser.hasOption(MOVE_TO_QUEUE_CMD)) {
   if (!cliParser.hasOption(QUEUE_CMD)) {
 printUsage(title, opts);
@@ -482,6 +483,30 @@ public class ApplicationCLI extends YarnCLI {
   }
 
   /**
+   * Kills applications with the application id as appId
+   *
+   * @param Array of applicationIds
+   * @return 

[50/50] [abbrv] hadoop git commit: YARN-4081. Add support for multiple resource types in the Resource class. (Varun Vasudev via wangda)

2016-01-27 Thread vvasudev
YARN-4081. Add support for multiple resource types in the Resource class. 
(Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d328f70e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d328f70e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d328f70e

Branch: refs/heads/YARN-3926
Commit: d328f70ec21018e3440ce67c47f8229c80c45ef7
Parents: 662e17b
Author: Wangda Tan 
Authored: Thu Sep 10 09:43:26 2015 -0700
Committer: Varun Vasudev 
Committed: Thu Jan 28 12:11:24 2016 +0530

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/protocolrecords/ResourceTypes.java |  27 +++
 .../hadoop/yarn/api/records/Resource.java   | 197 +++--
 .../yarn/api/records/ResourceInformation.java   | 218 +++
 .../exceptions/ResourceNotFoundException.java   |  45 
 .../hadoop/yarn/util/UnitsConversionUtil.java   | 197 +
 .../src/main/proto/yarn_protos.proto|  12 +
 .../yarn/conf/TestResourceInformation.java  |  70 ++
 .../yarn/util/TestUnitsConversionUtil.java  | 120 ++
 .../yarn/api/records/impl/pb/ProtoUtils.java|  14 ++
 .../api/records/impl/pb/ResourcePBImpl.java | 199 +++--
 .../hadoop/yarn/util/resource/Resources.java| 120 +++---
 .../hadoop/yarn/api/TestPBImplRecords.java  |   3 +
 13 files changed, 1149 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d328f70e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fae034..1ce1ffc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -381,6 +381,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4265. Provide new timeline plugin storage to support fine-grained 
entity
 caching. (Li Lu and Jason Lowe via junping_du)
 
+YARN-4081. Add support for multiple resource types in the Resource
+class. (Varun Vasudev via wangda)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d328f70e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
new file mode 100644
index 000..dbd9c37
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ResourceTypes.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+/**
+ * Enum which represents the resource type. Currently, the only type allowed is
+ * COUNTABLE.
+ */
+public enum ResourceTypes {
+  COUNTABLE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d328f70e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index 88b57f1..4ba5397 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -22,8 +22,12 @@ import 

[13/50] [abbrv] hadoop git commit: HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

2016-01-27 Thread vvasudev
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2fd19b96/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 961ae0e..9798787 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -134,6 +135,7 @@ public class TestQuota {
   
   // 4: count -q /test
   ContentSummary c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 3);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
@@ -141,10 +143,12 @@ public class TestQuota {
   
   // 5: count -q /test/data0
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), -1);
   // check disk space consumed
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getSpaceConsumed(), fileLen*replication);
 
   // 6: create a directory /test/data1
@@ -172,12 +176,14 @@ public class TestQuota {
   // 8: clear quota /test
   runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), spaceQuota);
   
   // 9: clear quota /test/data0
   runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, 
false);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getQuota(), -1);
   
   // 10: create a file /test/datafile1
@@ -198,6 +204,7 @@ public class TestQuota {
   // 9.s: clear diskspace quota
   runCommand(admin, false, "-clrSpaceQuota", parent.toString());
   c = dfs.getContentSummary(parent);
+  compareQuotaUsage(c, dfs, parent);
   assertEquals(c.getQuota(), -1);
   assertEquals(c.getSpaceQuota(), -1);   
   
@@ -224,6 +231,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(childDir0);
+  compareQuotaUsage(c, dfs, childDir0);
   assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
   assertEquals(c.getQuota(), 1);
   
@@ -362,7 +370,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
 
-  assertEquals(4, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
+  assertEquals(5, 
cluster.getNamesystem().getFSDirectory().getYieldCount());
 } finally {
   cluster.shutdown();
 }
@@ -387,6 +395,7 @@ public class TestQuota {
   final Path quotaDir1 = new Path("/nqdir0/qdir1");
   dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
   ContentSummary c = dfs.getContentSummary(quotaDir1);
+  compareQuotaUsage(c, dfs, quotaDir1);
   assertEquals(c.getDirectoryCount(), 3);
   assertEquals(c.getQuota(), 6);
 
@@ -394,6 +403,7 @@ public class TestQuota {
   final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
   dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir2);
+  compareQuotaUsage(c, dfs, quotaDir2);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 7);
 
@@ -402,6 +412,7 @@ public class TestQuota {
   assertTrue(dfs.mkdirs(quotaDir3));
   dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 1);
   assertEquals(c.getQuota(), 2);
 
@@ -409,6 +420,7 @@ public class TestQuota {
   Path tempPath = new Path(quotaDir3, "nqdir32");
   assertTrue(dfs.mkdirs(tempPath));
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   assertEquals(c.getDirectoryCount(), 2);
   assertEquals(c.getQuota(), 2);
 
@@ -422,6 +434,7 @@ public class TestQuota {
   }
   assertTrue(hasException);
   c = dfs.getContentSummary(quotaDir3);
+  compareQuotaUsage(c, dfs, quotaDir3);
   

[37/50] [abbrv] hadoop git commit: HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support defaultBlockSizes greater than 2 GB. Contributed by Colin Patrick McCabe.

2016-01-27 Thread vvasudev
HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support defaultBlockSizes 
greater than 2 GB. Contributed by Colin Patrick McCabe.

Change-Id: Ifce1b9be534dc8f7e9d2634cd60e423921b9810f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf8af7bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf8af7bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf8af7bb

Branch: refs/heads/YARN-3926
Commit: cf8af7bb459b21babaad2d972330a3b4c6bb222d
Parents: d0d7c22
Author: Zhe Zhang 
Authored: Tue Jan 26 11:24:57 2016 -0800
Committer: Zhe Zhang 
Committed: Tue Jan 26 11:24:57 2016 -0800

--
 .../src/main/native/libhdfs/hdfs.c  | 98 +++-
 .../src/main/native/libhdfs/include/hdfs/hdfs.h | 89 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 3 files changed, 187 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf8af7bb/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
index c5aad1d..4618dbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.c
@@ -836,9 +836,95 @@ static jthrowable getDefaultBlockSize(JNIEnv *env, jobject 
jFS,
 return NULL;
 }
 
-hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags, 
+hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags,
   int bufferSize, short replication, tSize blockSize)
 {
+struct hdfsStreamBuilder *bld = hdfsStreamBuilderAlloc(fs, path, flags);
+if (bufferSize != 0) {
+  hdfsStreamBuilderSetBufferSize(bld, bufferSize);
+}
+if (replication != 0) {
+  hdfsStreamBuilderSetReplication(bld, replication);
+}
+if (blockSize != 0) {
+  hdfsStreamBuilderSetDefaultBlockSize(bld, blockSize);
+}
+return hdfsStreamBuilderBuild(bld);
+}
+
+struct hdfsStreamBuilder {
+hdfsFS fs;
+int flags;
+int32_t bufferSize;
+int16_t replication;
+int64_t defaultBlockSize;
+char path[1];
+};
+
+struct hdfsStreamBuilder *hdfsStreamBuilderAlloc(hdfsFS fs,
+const char *path, int flags)
+{
+int path_len = strlen(path);
+struct hdfsStreamBuilder *bld;
+
+// sizeof(hdfsStreamBuilder->path) includes one byte for the string
+// terminator
+bld = malloc(sizeof(struct hdfsStreamBuilder) + path_len);
+if (!bld) {
+errno = ENOMEM;
+return NULL;
+}
+bld->fs = fs;
+bld->flags = flags;
+bld->bufferSize = 0;
+bld->replication = 0;
+bld->defaultBlockSize = 0;
+memcpy(bld->path, path, path_len);
+bld->path[path_len] = '\0';
+return bld;
+}
+
+void hdfsStreamBuilderFree(struct hdfsStreamBuilder *bld)
+{
+free(bld);
+}
+
+int hdfsStreamBuilderSetBufferSize(struct hdfsStreamBuilder *bld,
+   int32_t bufferSize)
+{
+if ((bld->flags & O_ACCMODE) != O_WRONLY) {
+errno = EINVAL;
+return -1;
+}
+bld->bufferSize = bufferSize;
+return 0;
+}
+
+int hdfsStreamBuilderSetReplication(struct hdfsStreamBuilder *bld,
+int16_t replication)
+{
+if ((bld->flags & O_ACCMODE) != O_WRONLY) {
+errno = EINVAL;
+return -1;
+}
+bld->replication = replication;
+return 0;
+}
+
+int hdfsStreamBuilderSetDefaultBlockSize(struct hdfsStreamBuilder *bld,
+ int64_t defaultBlockSize)
+{
+if ((bld->flags & O_ACCMODE) != O_WRONLY) {
+errno = EINVAL;
+return -1;
+}
+bld->defaultBlockSize = defaultBlockSize;
+return 0;
+}
+
+static hdfsFile hdfsOpenFileImpl(hdfsFS fs, const char *path, int flags,
+  int32_t bufferSize, int16_t replication, int64_t blockSize)
+{
 /*
   JAVA EQUIVALENT:
File f = new File(path);
@@ -1037,6 +1123,16 @@ done:
 return file;
 }
 
+hdfsFile hdfsStreamBuilderBuild(struct hdfsStreamBuilder *bld)
+{
+hdfsFile file = hdfsOpenFileImpl(bld->fs, bld->path, bld->flags,
+  bld->bufferSize, bld->replication, bld->defaultBlockSize);
+int prevErrno = errno;
+hdfsStreamBuilderFree(bld);
+errno = prevErrno;
+return file;
+}
+
 int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength)
 {
 jobject jFS = (jobject)fs;


[24/50] [abbrv] hadoop git commit: Added MAPREDUCE-6614 to 2.8.0 in CHANGES.txt.

2016-01-27 Thread vvasudev
Added MAPREDUCE-6614 to 2.8.0 in CHANGES.txt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b833297
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b833297
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b833297

Branch: refs/heads/YARN-3926
Commit: 2b833297ceb523d39e683fcd34ed8ab9b5651bcf
Parents: 10a2bc0
Author: Akira Ajisaka 
Authored: Mon Jan 25 11:41:19 2016 +0900
Committer: Akira Ajisaka 
Committed: Mon Jan 25 11:41:19 2016 +0900

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b833297/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5eb6984..ba392c3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -695,6 +695,9 @@ Release 2.8.0 - UNRELEASED
 mapreduce.reduce.skip.proc.count.autoincr in mapred-default.xml.
 (Kai Sasaki via aajisaka)
 
+MAPREDUCE-6614. Remove unnecessary code in TestMapreduceConfigFields.
+(Kai Sasaki via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[09/50] [abbrv] hadoop git commit: YARN-4605. Spelling mistake in the help message of "yarn applicationattempt" command. Contributed by Weiwei Yang.

2016-01-27 Thread vvasudev
YARN-4605. Spelling mistake in the help message of "yarn applicationattempt" 
command. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f58f742
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f58f742
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f58f742

Branch: refs/heads/YARN-3926
Commit: 8f58f742aea87b2b46b9741ffeaebfa36af3573f
Parents: f5c8c85
Author: Akira Ajisaka 
Authored: Fri Jan 22 19:43:06 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Jan 22 19:43:06 2016 +0900

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java | 4 ++--
 .../java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java| 2 +-
 .../hadoop/yarn/server/resourcemanager/ClientRMService.java| 6 +++---
 .../yarn/server/resourcemanager/TestClientRMService.java   | 6 +++---
 5 files changed, 12 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f58f742/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 77b4eb4..2230b42 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1304,6 +1304,9 @@ Release 2.8.0 - UNRELEASED
 YARN-4608. Redundant code statement in WritingYarnApplications.
 (Kai Sasaki via aajisaka)
 
+YARN-4605. Spelling mistake in the help message of "yarn 
applicationattempt"
+command. (Weiwei Yang via aajisaka)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f58f742/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index bdd6215..77e0688 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -138,7 +138,7 @@ public class ApplicationCLI extends YarnCLI {
   opts.addOption(STATUS_CMD, true,
   "Prints the status of the application attempt.");
   opts.addOption(LIST_CMD, true,
-  "List application attempts for aplication.");
+  "List application attempts for application.");
   opts.addOption(FAIL_CMD, true, "Fails application attempt.");
   opts.addOption(HELP_CMD, false, "Displays help for all commands.");
   opts.getOption(STATUS_CMD).setArgName("Application Attempt ID");
@@ -712,7 +712,7 @@ public class ApplicationCLI extends YarnCLI {
   throws YarnException, IOException {
 ApplicationId appId = ConverterUtils.toApplicationId(applicationId);
 Priority newAppPriority = Priority.newInstance(Integer.parseInt(priority));
-sysout.println("Updating priority of an aplication " + applicationId);
+sysout.println("Updating priority of an application " + applicationId);
 Priority updateApplicationPriority =
 client.updateApplicationPriority(appId, newAppPriority);
 if (newAppPriority.equals(updateApplicationPriority)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f58f742/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index 1a77c7c..95f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -1721,7 +1721,7 @@ public class TestYarnCLI {
 pw.println(" -fail  Fails application 
attempt.");
 pw.println(" -help  Displays help for all 
commands.");
 pw.println(" -list  List application attempts 
for");
-pw.println("aplication.");
+pw.println("application.");
 pw.println(" 

[36/50] [abbrv] hadoop git commit: MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response. Contributed by Li Lu

2016-01-27 Thread vvasudev
MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response. 
Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0d7c221
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0d7c221
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0d7c221

Branch: refs/heads/YARN-3926
Commit: d0d7c221682a88ac6e11e9b7c07513e369104b10
Parents: 45c763a
Author: Jian He 
Authored: Mon Jan 25 23:01:03 2016 -0800
Committer: Jian He 
Committed: Mon Jan 25 23:01:03 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 +++
 .../jobhistory/JobHistoryEventHandler.java| 18 +-
 2 files changed, 20 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d7c221/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8f35c6f..68564b6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -698,6 +698,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6614. Remove unnecessary code in TestMapreduceConfigFields.
 (Kai Sasaki via aajisaka)
 
+MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response
+(Li Lu via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d7c221/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index dd0de2a..63e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.Timer;
 import java.util.TimerTask;
@@ -63,6 +64,7 @@ import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
+import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
@@ -1012,7 +1014,21 @@ public class JobHistoryEventHandler extends 
AbstractService
 }
 
 try {
-  timelineClient.putEntities(tEntity);
+  TimelinePutResponse response = timelineClient.putEntities(tEntity);
+  List errors = response.getErrors();
+  if (errors.size() == 0) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Timeline entities are successfully put in event " + event
+  .getEventType());
+}
+  } else {
+for (TimelinePutResponse.TimelinePutError error : errors) {
+  LOG.error(
+  "Error when publishing entity [" + error.getEntityType() + ","
+  + error.getEntityId() + "], server side error code: "
+  + error.getErrorCode());
+}
+  }
 } catch (IOException ex) {
   LOG.error("Error putting entity " + tEntity.getEntityId() + " to 
Timeline"
   + "Server", ex);



[45/50] [abbrv] hadoop git commit: HDFS-9677. Rename generationStampV1/generationStampV2 to legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.

2016-01-27 Thread vvasudev
HDFS-9677. Rename generationStampV1/generationStampV2 to 
legacyGenerationStamp/generationStamp. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a91109d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a91109d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a91109d

Branch: refs/heads/YARN-3926
Commit: 8a91109d16394310f2568717f103e6fff7cbddb0
Parents: fb238d7
Author: Jing Zhao 
Authored: Wed Jan 27 15:48:47 2016 -0800
Committer: Jing Zhao 
Committed: Wed Jan 27 15:48:47 2016 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../server/blockmanagement/BlockIdManager.java  | 83 ++--
 .../server/blockmanagement/BlockManager.java|  8 +-
 .../OutOfLegacyGenerationStampsException.java   | 38 +
 .../OutOfV1GenerationStampsException.java   | 38 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  4 +-
 .../hdfs/server/namenode/FSImageFormat.java | 12 +--
 .../server/namenode/FSImageFormatProtobuf.java  | 12 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java | 13 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|  4 +-
 .../blockmanagement/TestSequentialBlockId.java  | 18 ++---
 .../hdfs/server/namenode/TestEditLog.java   |  6 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |  4 +-
 .../hdfs/server/namenode/TestSaveNamespace.java |  2 +-
 17 files changed, 126 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 097c051..7e75558 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -959,6 +959,9 @@ Release 2.9.0 - UNRELEASED
 HDFS-9541. Add hdfsStreamBuilder API to libhdfs to support 
defaultBlockSizes
 greater than 2 GB. (cmccabe via zhz)
 
+HDFS-9677. Rename generationStampV1/generationStampV2 to
+legacyGenerationStamp/generationStamp. (Mingliang Liu via jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a91109d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 9c71287..3f21d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -36,11 +36,11 @@ public class BlockIdManager {
* The global generation stamp for legacy blocks with randomly
* generated block IDs.
*/
-  private final GenerationStamp generationStampV1 = new GenerationStamp();
+  private final GenerationStamp legacyGenerationStamp = new GenerationStamp();
   /**
* The global generation stamp for this file system.
*/
-  private final GenerationStamp generationStampV2 = new GenerationStamp();
+  private final GenerationStamp generationStamp = new GenerationStamp();
   /**
* The value of the generation stamp when the first switch to sequential
* block IDs was made. Blocks with generation stamps below this value
@@ -49,7 +49,7 @@ public class BlockIdManager {
* (or initialized as an offset from the V1 (legacy) generation stamp on
* upgrade).
*/
-  private long generationStampV1Limit;
+  private long legacyGenerationStampLimit;
   /**
* The global block ID space for this file system.
*/
@@ -57,7 +57,8 @@ public class BlockIdManager {
   private final SequentialBlockGroupIdGenerator blockGroupIdGenerator;
 
   public BlockIdManager(BlockManager blockManager) {
-this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
+this.legacyGenerationStampLimit =
+HdfsConstants.GRANDFATHER_GENERATION_STAMP;
 this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager);
 this.blockGroupIdGenerator = new 
SequentialBlockGroupIdGenerator(blockManager);
   }
@@ -68,14 +69,14 @@ public class BlockIdManager {
* Should be invoked only during the first upgrade to
* sequential block IDs.
*/
-  

[33/50] [abbrv] hadoop git commit: YARN-3542. Refactored existing CPU cgroups support to use the newer and integrated ResourceHandler mechanism, and also deprecated the old LCEResourceHandler inteface

2016-01-27 Thread vvasudev
YARN-3542. Refactored existing CPU cgroups support to use the newer and 
integrated ResourceHandler mechanism, and also deprecated the old 
LCEResourceHandler inteface hierarchy. Contributed by Varun Vasudev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2085e60a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2085e60a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2085e60a

Branch: refs/heads/YARN-3926
Commit: 2085e60a9655b59aa2ba8917acdc511ab71ff6e4
Parents: e8650fe
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Authored: Mon Jan 25 16:19:03 2016 -0800
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) 

Committed: Mon Jan 25 16:19:36 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   4 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  12 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../nodemanager/LinuxContainerExecutor.java |  28 +-
 .../CGroupsCpuResourceHandlerImpl.java  | 235 +++
 .../linux/resources/CGroupsHandler.java |   4 +
 .../linux/resources/CpuResourceHandler.java |  32 ++
 .../linux/resources/ResourceHandlerModule.java  |  34 +++
 .../util/CgroupsLCEResourcesHandler.java|  68 +
 .../util/DefaultLCEResourcesHandler.java|   1 +
 .../nodemanager/util/LCEResourcesHandler.java   |   1 +
 .../TestCGroupsCpuResourceHandlerImpl.java  | 297 +++
 .../util/TestCgroupsLCEResourcesHandler.java|   1 +
 13 files changed, 661 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 41802ae..c2f16d5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -107,6 +107,10 @@ Release 2.9.0 - UNRELEASED
 YARN-4496. Improve HA ResourceManager Failover detection on the client.
 (Jian He via xgong)
 
+YARN-3542. Refactored existing CPU cgroups support to use the newer and
+integrated ResourceHandler mechanism, and also deprecated the old
+LCEResourceHandler inteface hierarchy. (Varun Vasudev via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 23c2969..e214a86 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -938,6 +938,18 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE =
   90.0f;
 
+  @Private
+  public static final String NM_CPU_RESOURCE_PREFIX = NM_PREFIX
+  + "resource.cpu.";
+
+  /** Enable cpu isolation. */
+  @Private
+  public static final String NM_CPU_RESOURCE_ENABLED =
+  NM_CPU_RESOURCE_PREFIX + "enabled";
+
+  @Private
+  public static final boolean DEFAULT_NM_CPU_RESOURCE_ENABLED = false;
+
   /**
* Prefix for disk configurations. Work in progress: This configuration
* parameter may be changed/removed in the future.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2085e60a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index 0e508ed..529d63b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -111,6 +111,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.NM_DISK_RESOURCE_ENABLED);
 configurationPrefixToSkipCompare
 

hadoop git commit: HADOOP-12492. maven install triggers bats test (aw)

2016-01-27 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 97056c335 -> 79d7949fb


HADOOP-12492. maven install triggers bats test (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79d7949f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79d7949f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79d7949f

Branch: refs/heads/trunk
Commit: 79d7949fbb19928e0cae4f6b5dd9f1af82242f53
Parents: 97056c3
Author: Allen Wittenauer 
Authored: Wed Jan 27 11:27:27 2016 -0800
Committer: Allen Wittenauer 
Committed: Wed Jan 27 11:27:27 2016 -0800

--
 hadoop-common-project/hadoop-common/pom.xml | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79d7949f/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 68ad350..7e4d090 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -1028,7 +1028,9 @@
 
   shelltest
   
-true
+
+  !skipTests
+
   
   
 
@@ -1037,7 +1039,7 @@
 
 
 common-test-bats-driver
-process-test-classes
+test
 
 run
 



hadoop git commit: HADOOP-12735. core-default.xml misspells hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

2016-01-27 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 79d7949fb -> 2e8ab3d46


HADOOP-12735. core-default.xml misspells 
hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e8ab3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e8ab3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e8ab3d4

Branch: refs/heads/trunk
Commit: 2e8ab3d46568162af6aa90b612ed61d487e7c7b0
Parents: 79d7949
Author: Colin Patrick Mccabe 
Authored: Wed Jan 27 11:39:55 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Wed Jan 27 11:39:55 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3b8376f..4da20e0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1651,6 +1651,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12718. Incorrect error message by fs -put local dir without
 permission. (John Zhuge via Yongjun Zhang)
 
+HADOOP-12735. core-default.xml misspells
+hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e8ab3d4/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index c25f49e..ed3802f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -428,7 +428,7 @@ for ldap providers in the same way as above does.
 
 
 
-  hadoop.work.around.non.threadsafe.getpwuid
+  hadoop.workaround.non.threadsafe.getpwuid
   false
   Some operating systems or authentication modules are known to
   have broken implementations of getpwuid_r and getpwgid_r, such that these



hadoop git commit: YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)

2016-01-27 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e8ab3d46 -> fb238d7e5


YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb238d7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb238d7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb238d7e

Branch: refs/heads/trunk
Commit: fb238d7e5dcd96466c8938b13ca7f13cedecb08a
Parents: 2e8ab3d
Author: Karthik Kambatla 
Authored: Wed Jan 27 11:47:29 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Jan 27 12:29:06 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSParentQueue.java   |   8 +
 .../resourcemanager/scheduler/fair/FSQueue.java |  11 +-
 .../webapp/FairSchedulerPage.java   |   1 +
 .../webapp/dao/FairSchedulerQueueInfo.java  |   7 +
 .../scheduler/fair/TestFairScheduler.java   | 327 +++
 8 files changed, 377 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2fbecdb..2fae034 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,8 @@ Release 2.9.0 - UNRELEASED
 YARN-1856. Added cgroups based memory monitoring for containers as another
 alternative to custom memory-monitoring. (Varun Vasudev via vinodkv)
 
+YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via 
kasha)
+
   IMPROVEMENTS
 
 YARN-4072. ApplicationHistoryServer, WebAppProxyServer, NodeManager and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb238d7e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index bf4eae8..180ae49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -98,6 +98,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   // Reservation system configuration
   private ReservationQueueConfiguration globalReservationQueueConfig;
 
+  private final Set nonPreemptableQueues;
+
   public AllocationConfiguration(Map minQueueResources,
   Map maxQueueResources,
   Map queueMaxApps, Map userMaxApps,
@@ -114,7 +116,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   QueuePlacementPolicy placementPolicy,
   Map configuredQueues,
   ReservationQueueConfiguration globalReservationQueueConfig,
-  Set reservableQueues) {
+  Set reservableQueues,
+  Set nonPreemptableQueues) {
 this.minQueueResources = minQueueResources;
 this.maxQueueResources = maxQueueResources;
 this.queueMaxApps = queueMaxApps;
@@ -135,6 +138,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 this.globalReservationQueueConfig = globalReservationQueueConfig;
 this.placementPolicy = placementPolicy;
 this.configuredQueues = configuredQueues;
+this.nonPreemptableQueues = nonPreemptableQueues;
   }
   
   public AllocationConfiguration(Configuration conf) {
@@ -161,6 +165,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 }
 placementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
 configuredQueues);
+nonPreemptableQueues = new HashSet();
   }
   
   /**
@@ -210,6 +215,10 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 -1f : fairSharePreemptionThreshold;
   }
 
+  

hadoop git commit: HADOOP-12735. core-default.xml misspells hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

2016-01-27 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a1f913fb6 -> b94fbdf6f


HADOOP-12735. core-default.xml misspells 
hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

(cherry picked from commit 2e8ab3d46568162af6aa90b612ed61d487e7c7b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b94fbdf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b94fbdf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b94fbdf6

Branch: refs/heads/branch-2
Commit: b94fbdf6f080a95ad8f2d787bc8e219e8eeea0f6
Parents: a1f913f
Author: Colin Patrick Mccabe 
Authored: Wed Jan 27 11:39:55 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Wed Jan 27 11:40:42 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b94fbdf6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7040048..02044fc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1018,6 +1018,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12718. Incorrect error message by fs -put local dir without
 permission. (John Zhuge via Yongjun Zhang)
 
+HADOOP-12735. core-default.xml misspells
+hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b94fbdf6/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index df9502f..7d6a2d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -437,7 +437,7 @@ for ldap providers in the same way as above does.
 
 
 
-  hadoop.work.around.non.threadsafe.getpwuid
+  hadoop.workaround.non.threadsafe.getpwuid
   false
   Some operating systems or authentication modules are known to
   have broken implementations of getpwuid_r and getpwgid_r, such that these



hadoop git commit: HADOOP-12735. core-default.xml misspells hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

2016-01-27 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 62206dd2e -> bc00b8ce9


HADOOP-12735. core-default.xml misspells 
hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)

(cherry picked from commit 2e8ab3d46568162af6aa90b612ed61d487e7c7b0)
(cherry picked from commit b94fbdf6f080a95ad8f2d787bc8e219e8eeea0f6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc00b8ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc00b8ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc00b8ce

Branch: refs/heads/branch-2.8
Commit: bc00b8ce9e28dc40b5a4c378b5c2ca86b35a318a
Parents: 62206dd
Author: Colin Patrick Mccabe 
Authored: Wed Jan 27 11:39:55 2016 -0800
Committer: Colin Patrick Mccabe 
Committed: Wed Jan 27 11:40:59 2016 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc00b8ce/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 040f143..f3b026f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -964,6 +964,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-12718. Incorrect error message by fs -put local dir without
 permission. (John Zhuge via Yongjun Zhang)
 
+HADOOP-12735. core-default.xml misspells
+hadoop.workaround.non.threadsafe.getpwuid (Ray Chiang via cmccabe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc00b8ce/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index c3cc693..f6a8f9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -437,7 +437,7 @@ for ldap providers in the same way as above does.
 
 
 
-  hadoop.work.around.non.threadsafe.getpwuid
+  hadoop.workaround.non.threadsafe.getpwuid
   false
   Some operating systems or authentication modules are known to
   have broken implementations of getpwuid_r and getpwgid_r, such that these



hadoop git commit: YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha) (cherry picked from commit fb238d7e5dcd96466c8938b13ca7f13cedecb08a)

2016-01-27 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b94fbdf6f -> c25154576


YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via kasha)
(cherry picked from commit fb238d7e5dcd96466c8938b13ca7f13cedecb08a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2515457
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2515457
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2515457

Branch: refs/heads/branch-2
Commit: c25154576b4528a02c9b1d53445bfa0bc9cc4a87
Parents: b94fbdf
Author: Karthik Kambatla 
Authored: Wed Jan 27 11:47:29 2016 -0800
Committer: Karthik Kambatla 
Committed: Wed Jan 27 12:29:25 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |   2 +
 .../scheduler/fair/AllocationConfiguration.java |  11 +-
 .../fair/AllocationFileLoaderService.java   |  16 +-
 .../scheduler/fair/FSParentQueue.java   |   8 +
 .../resourcemanager/scheduler/fair/FSQueue.java |  11 +-
 .../webapp/FairSchedulerPage.java   |   1 +
 .../webapp/dao/FairSchedulerQueueInfo.java  |   7 +
 .../scheduler/fair/TestFairScheduler.java   | 327 +++
 8 files changed, 377 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2515457/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c03b0f2..2ae02da 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -9,6 +9,8 @@ Release 2.9.0 - UNRELEASED
 YARN-1856. Added cgroups based memory monitoring for containers as another
 alternative to custom memory-monitoring. (Varun Vasudev via vinodkv)
 
+YARN-4462. FairScheduler: Disallow preemption from a queue. (Tao Jie via 
kasha)
+
   IMPROVEMENTS
 
 YARN-4072. ApplicationHistoryServer, WebAppProxyServer, NodeManager and

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2515457/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index bf4eae8..180ae49 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -98,6 +98,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   // Reservation system configuration
   private ReservationQueueConfiguration globalReservationQueueConfig;
 
+  private final Set nonPreemptableQueues;
+
   public AllocationConfiguration(Map minQueueResources,
   Map maxQueueResources,
   Map queueMaxApps, Map userMaxApps,
@@ -114,7 +116,8 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
   QueuePlacementPolicy placementPolicy,
   Map configuredQueues,
   ReservationQueueConfiguration globalReservationQueueConfig,
-  Set reservableQueues) {
+  Set reservableQueues,
+  Set nonPreemptableQueues) {
 this.minQueueResources = minQueueResources;
 this.maxQueueResources = maxQueueResources;
 this.queueMaxApps = queueMaxApps;
@@ -135,6 +138,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 this.globalReservationQueueConfig = globalReservationQueueConfig;
 this.placementPolicy = placementPolicy;
 this.configuredQueues = configuredQueues;
+this.nonPreemptableQueues = nonPreemptableQueues;
   }
   
   public AllocationConfiguration(Configuration conf) {
@@ -161,6 +165,7 @@ public class AllocationConfiguration extends 
ReservationSchedulerConfiguration {
 }
 placementPolicy = QueuePlacementPolicy.fromConfiguration(conf,
 configuredQueues);
+nonPreemptableQueues = new HashSet();
   }
   
   /**
@@ -210,6 +215,10 @@ public class AllocationConfiguration extends 

hadoop git commit: MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by Junping Du

2016-01-27 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c25154576 -> da18bbeda


MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by 
Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da18bbed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da18bbed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da18bbed

Branch: refs/heads/branch-2
Commit: da18bbedaffad1f3ac9c078675583e7cfb68790f
Parents: c251545
Author: Jian He 
Authored: Wed Jan 27 13:16:36 2016 -0800
Committer: Jian He 
Committed: Wed Jan 27 13:16:59 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 ++
 .../hadoop/mapreduce/v2/util/TestMRApps.java| 55 
 3 files changed, 62 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da18bbed/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d08a1a2..4dcad88 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response
 (Li Lu via jianhe)
 
+MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container.
+(Junping Du via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da18bbed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index c645465..feea789 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -261,6 +261,10 @@ public class MRApps extends Apps {
 addClasspathToEnv(environment, classpathEnvVar, conf);
 addClasspathToEnv(environment, hadoopClasspathEnvVar, conf);
 
+// MAPREDUCE-6619, retain $HADOOP_CLASSPATH
+MRApps.addToEnvironment(environment, hadoopClasspathEnvVar,
+System.getenv(hadoopClasspathEnvVar), conf);
+
 if (userClassesTakesPrecedence) {
   MRApps.setMRFrameworkClasspath(environment, conf);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da18bbed/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 3a417a0..f849d72 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -30,8 +30,10 @@ import static org.mockito.Mockito.when;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.net.URI;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -239,6 +241,12 @@ public class TestMRApps {
   testTGZ.getAbsolutePath())).toString();
 conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
 conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
+// add hadoop.tgz to env HADOOP_CLASSPATH
+Map newEnv = new HashMap();
+newEnv.put(ApplicationConstants.Environment.HADOOP_CLASSPATH.name(),
+"hadoop.tgz");
+setEnv(newEnv);
+
 Map environment = new HashMap();
 MRApps.setClasspath(environment, conf);
 

hadoop git commit: MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by Junping Du

2016-01-27 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 07704aab8 -> a1b75ff8c


MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by 
Junping Du

(cherry picked from commit da18bbedaffad1f3ac9c078675583e7cfb68790f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1b75ff8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1b75ff8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1b75ff8

Branch: refs/heads/branch-2.7
Commit: a1b75ff8c533c5db7dad643a5aeaaa8b7104544d
Parents: 07704aa
Author: Jian He 
Authored: Wed Jan 27 13:16:36 2016 -0800
Committer: Jian He 
Committed: Wed Jan 27 13:20:16 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 ++
 .../hadoop/mapreduce/v2/util/TestMRApps.java| 55 
 3 files changed, 62 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b75ff8/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bd6e77a..b3f47f7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -31,6 +31,9 @@ Release 2.7.3 - UNRELEASED
 MAPREDUCE-6554. MRAppMaster servicestart failing with NPE in
 MRAppMaster#parsePreviousJobHistory (Bibin A Chundatt via jlowe)
 
+MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. (Junping
+Du via jianhe)
+
 Release 2.7.2 - 2016-01-25
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b75ff8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 0edc191..602d1bf 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -261,6 +261,10 @@ public class MRApps extends Apps {
 addClasspathToEnv(environment, classpathEnvVar, conf);
 addClasspathToEnv(environment, hadoopClasspathEnvVar, conf);
 
+// MAPREDUCE-6619, retain $HADOOP_CLASSPATH
+MRApps.addToEnvironment(environment, hadoopClasspathEnvVar,
+System.getenv(hadoopClasspathEnvVar), conf);
+
 if (userClassesTakesPrecedence) {
   MRApps.setMRFrameworkClasspath(environment, conf);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1b75ff8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 87473ba..ca30561 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -30,8 +30,10 @@ import static org.mockito.Mockito.when;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.net.URI;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -239,6 +241,12 @@ public class TestMRApps {
   testTGZ.getAbsolutePath())).toString();
 conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
 conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
+// add hadoop.tgz to env HADOOP_CLASSPATH
+Map newEnv = new HashMap();
+newEnv.put(ApplicationConstants.Environment.HADOOP_CLASSPATH.name(),
+"hadoop.tgz");
+setEnv(newEnv);
+
 Map environment = new HashMap

hadoop git commit: MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by Junping Du

2016-01-27 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 bc00b8ce9 -> d83b124c9


MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by 
Junping Du

(cherry picked from commit da18bbedaffad1f3ac9c078675583e7cfb68790f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d83b124c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d83b124c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d83b124c

Branch: refs/heads/branch-2.8
Commit: d83b124c97825af33b615c073afdff8b5be70d96
Parents: bc00b8c
Author: Jian He 
Authored: Wed Jan 27 13:16:36 2016 -0800
Committer: Jian He 
Committed: Wed Jan 27 13:18:19 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 ++
 .../hadoop/mapreduce/v2/util/TestMRApps.java| 55 
 3 files changed, 62 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83b124c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8401b6a..c64c65f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6610. JobHistoryEventHandler should not swallow timeline response
 (Li Lu via jianhe)
 
+MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container.
+(Junping Du via jianhe)
+
 Release 2.7.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83b124c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index c645465..feea789 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -261,6 +261,10 @@ public class MRApps extends Apps {
 addClasspathToEnv(environment, classpathEnvVar, conf);
 addClasspathToEnv(environment, hadoopClasspathEnvVar, conf);
 
+// MAPREDUCE-6619, retain $HADOOP_CLASSPATH
+MRApps.addToEnvironment(environment, hadoopClasspathEnvVar,
+System.getenv(hadoopClasspathEnvVar), conf);
+
 if (userClassesTakesPrecedence) {
   MRApps.setMRFrameworkClasspath(environment, conf);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d83b124c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 3a417a0..f849d72 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -30,8 +30,10 @@ import static org.mockito.Mockito.when;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.net.URI;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -239,6 +241,12 @@ public class TestMRApps {
   testTGZ.getAbsolutePath())).toString();
 conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
 conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
+// add hadoop.tgz to env HADOOP_CLASSPATH
+Map newEnv = new HashMap();
+newEnv.put(ApplicationConstants.Environment.HADOOP_CLASSPATH.name(),
+"hadoop.tgz");
+setEnv(newEnv);
+
 Map environment = new HashMap();
 

hadoop git commit: MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by Junping Du

2016-01-27 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.6 8ef73cd4c -> ac43c5635


MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container. Contributed by 
Junping Du

(cherry picked from commit da18bbedaffad1f3ac9c078675583e7cfb68790f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac43c563
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac43c563
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac43c563

Branch: refs/heads/branch-2.6
Commit: ac43c56358a8230484c5ccf81dc496931447f3a4
Parents: 8ef73cd
Author: Jian He 
Authored: Wed Jan 27 13:16:36 2016 -0800
Committer: Jian He 
Committed: Wed Jan 27 13:22:59 2016 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 ++
 .../hadoop/mapreduce/v2/util/TestMRApps.java| 55 
 3 files changed, 62 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac43c563/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 514cc44..aeb7597 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -68,6 +68,9 @@ Release 2.6.3 - 2015-12-17
 MAPREDUCE-6549. multibyte delimiters with LineRecordReader cause
 duplicate records (wilfreds via rkanter)
 
+MAPREDUCE-6619. HADOOP_CLASSPATH is overwritten in MR container.
+(Junping Du via jianhe)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac43c563/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index af62c1c..4c7a3cf 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -258,6 +258,10 @@ public class MRApps extends Apps {
 addClasspathToEnv(environment, classpathEnvVar, conf);
 addClasspathToEnv(environment, hadoopClasspathEnvVar, conf);
 
+// MAPREDUCE-6619, retain $HADOOP_CLASSPATH
+MRApps.addToEnvironment(environment, hadoopClasspathEnvVar,
+System.getenv(hadoopClasspathEnvVar), conf);
+
 if (userClassesTakesPrecedence) {
   MRApps.setMRFrameworkClasspath(environment, conf);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac43c563/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
index 87473ba..ca30561 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
@@ -30,8 +30,10 @@ import static org.mockito.Mockito.when;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.lang.reflect.Field;
 import java.net.URI;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -239,6 +241,12 @@ public class TestMRApps {
   testTGZ.getAbsolutePath())).toString();
 conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
 conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
+// add hadoop.tgz to env HADOOP_CLASSPATH
+Map newEnv = new HashMap();
+newEnv.put(ApplicationConstants.Environment.HADOOP_CLASSPATH.name(),
+"hadoop.tgz");
+setEnv(newEnv);
+
 Map environment = new HashMap();
 

[2/3] hadoop git commit: YARN-4224. Support fetching entities by UID and change the REST interface to conform to current REST APIs' in YARN. (Varun Saxena via gtcarrera9)

2016-01-27 Thread gtcarrera9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e62207/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
index a054ee5..7a70de8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java
@@ -18,17 +18,12 @@
 
 package org.apache.hadoop.yarn.server.timelineservice.reader;
 
-import java.io.IOException;
 import java.text.DateFormat;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.Collections;
 import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Locale;
-import java.util.Map;
 import java.util.Set;
 import java.util.TimeZone;
 
@@ -51,10 +46,11 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity;
+import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType;
-import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
@@ -63,7 +59,7 @@ import org.apache.hadoop.yarn.webapp.NotFoundException;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.inject.Singleton;
 
-/** REST end point for Timeline Reader */
+/** REST end point for Timeline Reader. */
 @Private
 @Unstable
 @Singleton
@@ -167,117 +163,6 @@ public class TimelineReaderWebServices {
 }
   }
 
-  private static Set parseValuesStr(String str, String delimiter) {
-if (str == null || str.isEmpty()) {
-  return null;
-}
-Set strSet = new HashSet();
-String[] strs = str.split(delimiter);
-for (String aStr : strs) {
-  strSet.add(aStr.trim());
-}
-return strSet;
-  }
-
-  @SuppressWarnings("unchecked")
-  private static  void parseKeyValues(Map map, String str,
-  String pairsDelim, String keyValuesDelim, boolean stringValue,
-  boolean multipleValues) {
-String[] pairs = str.split(pairsDelim);
-for (String pair : pairs) {
-  if (pair == null || pair.trim().isEmpty()) {
-continue;
-  }
-  String[] pairStrs = pair.split(keyValuesDelim);
-  if (pairStrs.length < 2) {
-continue;
-  }
-  if (!stringValue) {
-try {
-  Object value =
-  GenericObjectMapper.OBJECT_READER.readValue(pairStrs[1].trim());
-  map.put(pairStrs[0].trim(), (T) value);
-} catch (IOException e) {
-  map.put(pairStrs[0].trim(), (T) pairStrs[1].trim());
-}
-  } else {
-String key = pairStrs[0].trim();
-if (multipleValues) {
-  Set values = new HashSet();
-  for (int i = 1; i < pairStrs.length; i++) {
-values.add(pairStrs[i].trim());
-  }
-  map.put(key, (T) values);
-} else {
-  map.put(key, (T) pairStrs[1].trim());
-}
-  }
-}
-  }
-
-  private static Map parseKeyStrValuesStr(String str,
-  String pairsDelim, String keyValuesDelim) {
-if (str == null) {
-  return null;
-}
-Map map = new HashMap();
-parseKeyValues(map, str,pairsDelim, keyValuesDelim, true, true);
-return map;
-  }
-
-  private static Map parseKeyStrValueStr(String str,
-  String pairsDelim, String keyValDelim) {
-if (str == null) {
-  return null;
-}
-Map map = new HashMap();
-parseKeyValues(map, str, pairsDelim, keyValDelim, true, false);
-return map;
-  }
-
-  private static Map 

[3/3] hadoop git commit: YARN-4224. Support fetching entities by UID and change the REST interface to conform to current REST APIs' in YARN. (Varun Saxena via gtcarrera9)

2016-01-27 Thread gtcarrera9
YARN-4224. Support fetching entities by UID and change the REST
interface to conform to current REST APIs' in YARN. (Varun Saxena via
gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2e62207
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2e62207
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2e62207

Branch: refs/heads/YARN-2928
Commit: f2e622078f4729ba0f7fed5d3ac2afd8cd5e2c52
Parents: bad1b45
Author: Li Lu 
Authored: Wed Jan 27 14:04:09 2016 -0800
Committer: Li Lu 
Committed: Wed Jan 27 14:04:09 2016 -0800

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../records/timelineservice/TimelineEntity.java |   13 +
 .../server/timelineservice/TimelineContext.java |  146 ++
 .../collector/TimelineCollectorContext.java |   86 +-
 .../server/timelineservice/package-info.java|   28 +
 .../reader/TimelineReaderContext.java   |   88 ++
 .../reader/TimelineReaderManager.java   |   82 +-
 .../reader/TimelineReaderUtils.java |  171 ++
 .../reader/TimelineReaderWebServices.java   | 1469 +++---
 .../reader/TimelineReaderWebServicesUtils.java  |  222 +++
 .../reader/TimelineUIDConverter.java|  245 +++
 .../timelineservice/storage/TimelineReader.java |   16 +-
 .../storage/reader/GenericEntityReader.java |7 +-
 .../reader/TestTimelineReaderUtils.java |   55 +
 .../reader/TestTimelineReaderWebServices.java   |   83 +-
 ...stTimelineReaderWebServicesHBaseStorage.java |  348 -
 .../reader/TestTimelineUIDConverter.java|   97 ++
 ...TestPhoenixOfflineAggregationWriterImpl.java |2 +-
 18 files changed, 2784 insertions(+), 377 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e62207/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d3db584..9b7ed73 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -129,6 +129,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 
   IMPROVEMENTS
 
+YARN-4224. Support fetching entities by UID and change the REST interface 
+to conform to current REST APIs' in YARN. (Varun Saxena via gtcarrera9)
+
 YARN-4200. Refactor reader classes in storage to nest under hbase specific 
 package name. Contributed by Li Lu. 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e62207/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index dcf2473..a661f7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -504,6 +504,19 @@ public class TimelineEntity implements 
Comparable {
 }
   }
 
+  /**
+   * Set UID in info which will be then used for query by UI.
+   * @param uidKey key for UID in info.
+   * @param uId UID to be set for the key.
+   */
+  public void setUID(String uidKey, String uId) {
+if (real == null) {
+  info.put(uidKey, uId);
+} else {
+  real.addInfo(uidKey, uId);
+}
+  }
+
   public boolean isValid() {
 return (getId() != null && getType() != null);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e62207/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/TimelineContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/TimelineContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/TimelineContext.java
new file mode 100644
index 000..694b709
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/TimelineContext.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation 

[1/3] hadoop git commit: YARN-4224. Support fetching entities by UID and change the REST interface to conform to current REST APIs' in YARN. (Varun Saxena via gtcarrera9)

2016-01-27 Thread gtcarrera9
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 bad1b455b -> f2e622078


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e62207/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
new file mode 100644
index 000..d052d51
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServicesUtils.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.timelineservice.reader;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.server.timeline.GenericObjectMapper;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
+
+/**
+ * Set of utility methods to be used by timeline reader web services.
+ */
+final class TimelineReaderWebServicesUtils {
+  private TimelineReaderWebServicesUtils() {
+  }
+
+  /**
+   * Parse a delimited string and convert it into a set of strings. For
+   * instance, if delimiter is ",", then the string should be represented as
+   * "value1,value2,value3".
+   * @param str delimited string.
+   * @param delimiter string is delimited by this delimiter.
+   * @return set of strings.
+   */
+  static Set parseValuesStr(String str, String delimiter) {
+if (str == null || str.isEmpty()) {
+  return null;
+}
+Set strSet = new HashSet();
+String[] strs = str.split(delimiter);
+for (String aStr : strs) {
+  strSet.add(aStr.trim());
+}
+return strSet;
+  }
+
+  @SuppressWarnings("unchecked")
+  private static  void parseKeyValues(Map map, String str,
+  String pairsDelim, String keyValuesDelim, boolean stringValue,
+  boolean multipleValues) {
+String[] pairs = str.split(pairsDelim);
+for (String pair : pairs) {
+  if (pair == null || pair.trim().isEmpty()) {
+continue;
+  }
+  String[] pairStrs = pair.split(keyValuesDelim);
+  if (pairStrs.length < 2) {
+continue;
+  }
+  if (!stringValue) {
+try {
+  Object value =
+  GenericObjectMapper.OBJECT_READER.readValue(pairStrs[1].trim());
+  map.put(pairStrs[0].trim(), (T) value);
+} catch (IOException e) {
+  map.put(pairStrs[0].trim(), (T) pairStrs[1].trim());
+}
+  } else {
+String key = pairStrs[0].trim();
+if (multipleValues) {
+  Set values = new HashSet();
+  for (int i = 1; i < pairStrs.length; i++) {
+values.add(pairStrs[i].trim());
+  }
+  map.put(key, (T) values);
+} else {
+  map.put(key, (T) pairStrs[1].trim());
+}
+  }
+}
+  }
+
+  /**
+   * Parse a delimited string and convert it into a map of key-values with each
+   * key having a set of values. Both the key and values are interpreted as
+   * strings.
+   * For instance, if pairsDelim is "," and keyValuesDelim is ":", then the
+   * string should be represented as
+   * "key1:value11:value12:value13,key2:value21,key3:value31:value32".
+   * @param str delimited string represented as multiple keys having multiple
+   * values.
+   * @param pairsDelim key-values pairs are delimited by this delimiter.
+   * @param keyValuesDelim values for a key are delimited by this delimiter.
+   * @return a map of 

hadoop git commit: HDFS-9654. Code refactoring for HDFS-8578.

2016-01-27 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 a1b75ff8c -> 672481e28


HDFS-9654. Code refactoring for HDFS-8578.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/672481e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/672481e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/672481e2

Branch: refs/heads/branch-2.7
Commit: 672481e28c3fff2bb6aa02e9d9fafbb7b0866cc5
Parents: a1b75ff
Author: Tsz-Wo Nicholas Sze 
Authored: Thu Jan 28 10:56:01 2016 +0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Thu Jan 28 11:09:05 2016 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../hadoop/hdfs/server/common/Storage.java  |   3 +-
 .../server/datanode/BlockPoolSliceStorage.java  | 131 +
 .../hdfs/server/datanode/DataStorage.java   | 285 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  15 +
 .../org/apache/hadoop/hdfs/TestReplication.java |   3 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java|   2 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../datanode/TestDataNodeHotSwapVolumes.java|  48 +++-
 .../hdfs/server/datanode/TestDataStorage.java   |   7 +-
 .../fsdataset/impl/TestFsDatasetImpl.java   |   2 +-
 11 files changed, 298 insertions(+), 202 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/672481e2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5043923..d16bccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -25,6 +25,8 @@ Release 2.7.3 - UNRELEASED
 HDFS-9634. webhdfs client side exceptions don't provide enough details
 (Eric Payne via kihwal)
 
+HDFS-9654. Code refactoring for HDFS-8578.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/672481e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index d617fc5..aa076ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -625,7 +625,8 @@ public abstract class Storage extends StorageInfo {
 rename(getLastCheckpointTmp(), curDir);
 return;
   default:
-throw new IOException("Unexpected FS state: " + curState);
+throw new IOException("Unexpected FS state: " + curState
++ " for storage directory: " + rootPath);
   }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/672481e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index cecca9a..351ec1d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -18,10 +18,21 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -34,18 +45,9 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.util.Daemon;