hadoop git commit: HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.

2015-03-02 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 22e6b2d3e - f1b28c19d


HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1b28c19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1b28c19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1b28c19

Branch: refs/heads/HDFS-7285
Commit: f1b28c19d4fc6693abba544fb2d11469656970de
Parents: 22e6b2d
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 13:44:33 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 2 13:44:33 2015 -0800

--
 .../server/blockmanagement/BlockIdManager.java  |  31 +++-
 .../hdfs/server/blockmanagement/BlockInfo.java  |   4 +-
 .../blockmanagement/BlockInfoContiguous.java|   5 +
 .../blockmanagement/BlockInfoStriped.java   |   8 +-
 .../server/blockmanagement/BlockManager.java|  54 ---
 .../hdfs/server/blockmanagement/BlocksMap.java  |  20 ++-
 .../hdfs/server/namenode/FSDirectory.java   |  27 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  69 ++---
 .../hdfs/server/namenode/FSImageFormat.java |  12 +-
 .../server/namenode/FSImageFormatPBINode.java   |   5 +-
 .../server/namenode/FSImageFormatProtobuf.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  39 ++---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  25 +++-
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../hdfs/server/namenode/TestAddBlockgroup.java |  85 ---
 .../server/namenode/TestAddStripedBlocks.java   | 146 +++
 17 files changed, 355 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1b28c19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3ae54ce..1d69d74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -103,21 +103,38 @@ public class BlockIdManager {
   }
 
   /**
-   * Sets the maximum allocated block ID for this filesystem. This is
+   * Sets the maximum allocated contiguous block ID for this filesystem. This 
is
* the basis for allocating new block IDs.
*/
-  public void setLastAllocatedBlockId(long blockId) {
+  public void setLastAllocatedContiguousBlockId(long blockId) {
 blockIdGenerator.skipTo(blockId);
   }
 
   /**
-   * Gets the maximum sequentially allocated block ID for this filesystem
+   * Gets the maximum sequentially allocated contiguous block ID for this
+   * filesystem
*/
-  public long getLastAllocatedBlockId() {
+  public long getLastAllocatedContiguousBlockId() {
 return blockIdGenerator.getCurrentValue();
   }
 
   /**
+   * Sets the maximum allocated striped block ID for this filesystem. This is
+   * the basis for allocating new block IDs.
+   */
+  public void setLastAllocatedStripedBlockId(long blockId) {
+blockGroupIdGenerator.skipTo(blockId);
+  }
+
+  /**
+   * Gets the maximum sequentially allocated striped block ID for this
+   * filesystem
+   */
+  public long getLastAllocatedStripedBlockId() {
+return blockGroupIdGenerator.getCurrentValue();
+  }
+
+  /**
* Sets the current generation stamp for legacy blocks
*/
   public void setGenerationStampV1(long stamp) {
@@ -188,11 +205,11 @@ public class BlockIdManager {
   /**
* Increments, logs and then returns the block ID
*/
-  public long nextBlockId() {
+  public long nextContiguousBlockId() {
 return blockIdGenerator.nextValue();
   }
 
-  public long nextBlockGroupId() {
+  public long nextStripedBlockId() {
 return blockGroupIdGenerator.nextValue();
   }
 
@@ -216,7 +233,7 @@ public class BlockIdManager {
 return id  0;
   }
 
-  public static long convertToGroupID(long id) {
+  public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1b28c19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 

hadoop git commit: YARN-3281. Added RMStateStore to StateMachine visualization list. Contributed by Chengbing Liu (cherry picked from commit 5d0bae550f5b9a6005aa1d373cfe1ec80513dbd9)

2015-03-02 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fcd817afb - 76b7283fa


YARN-3281. Added RMStateStore to StateMachine visualization list. Contributed 
by Chengbing Liu
(cherry picked from commit 5d0bae550f5b9a6005aa1d373cfe1ec80513dbd9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76b7283f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76b7283f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76b7283f

Branch: refs/heads/branch-2
Commit: 76b7283fa18641ce6e6c5ab5a774fdf0aa4564df
Parents: fcd817a
Author: Jian He jia...@apache.org
Authored: Mon Mar 2 14:39:49 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Mon Mar 2 14:43:12 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop-yarn-server-resourcemanager/pom.xml| 7 ---
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b7283f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eaa8ed4..801d960 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -300,6 +300,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3262. Surface application outstanding resource requests table 
 in RM web UI. (Jian He via wangda)
 
+YARN-3281. Added RMStateStore to StateMachine visualization list.
+(Chengbing Liu via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b7283f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index a41b94a..2e4feb1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -278,7 +278,7 @@
   source
 directory${basedir}/src/main/proto/directory
 includes
- 
includeyarn_server_resourcemanager_recovery.proto/include
+  includeyarn_server_resourcemanager_recovery.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output
@@ -331,10 +331,11 @@
 /goals
 configuration
   
mainClassorg.apache.hadoop.yarn.state.VisualizeStateMachine/mainClass
- classpathScopecompile/classpathScope
+  classpathScopecompile/classpathScope
   arguments
 argumentResourceManager/argument
-
argumentorg.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl,
+
argumentorg.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore,
+  
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl/argument



hadoop git commit: YARN-3281. Added RMStateStore to StateMachine visualization list. Contributed by Chengbing Liu

2015-03-02 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk ca1c00bf8 - 5d0bae550


YARN-3281. Added RMStateStore to StateMachine visualization list. Contributed 
by Chengbing Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d0bae55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d0bae55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d0bae55

Branch: refs/heads/trunk
Commit: 5d0bae550f5b9a6005aa1d373cfe1ec80513dbd9
Parents: ca1c00b
Author: Jian He jia...@apache.org
Authored: Mon Mar 2 14:39:49 2015 -0800
Committer: Jian He jia...@apache.org
Committed: Mon Mar 2 14:39:49 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop-yarn-server-resourcemanager/pom.xml| 7 ---
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d0bae55/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cef1758..c7dac60 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -345,6 +345,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3262. Surface application outstanding resource requests table 
 in RM web UI. (Jian He via wangda)
 
+YARN-3281. Added RMStateStore to StateMachine visualization list.
+(Chengbing Liu via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d0bae55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
index ff429cc..aaa0de5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
@@ -278,7 +278,7 @@
   source
 directory${basedir}/src/main/proto/directory
 includes
- 
includeyarn_server_resourcemanager_recovery.proto/include
+  includeyarn_server_resourcemanager_recovery.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output
@@ -331,10 +331,11 @@
 /goals
 configuration
   
mainClassorg.apache.hadoop.yarn.state.VisualizeStateMachine/mainClass
- classpathScopecompile/classpathScope
+  classpathScopecompile/classpathScope
   arguments
 argumentResourceManager/argument
-
argumentorg.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl,
+
argumentorg.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore,
+  
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl,
   
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl/argument



hadoop git commit: HDFS-7785. Improve diagnostics information for HttpPutFailedException. Contributed by Chengbing Liu.

2015-03-02 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5d0bae550 - c5eac9c6f


HDFS-7785. Improve diagnostics information for HttpPutFailedException. 
Contributed by Chengbing Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5eac9c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5eac9c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5eac9c6

Branch: refs/heads/trunk
Commit: c5eac9c6fe937ff32f4efed89d34c06974fac4d6
Parents: 5d0bae5
Author: Haohui Mai whe...@apache.org
Authored: Mon Mar 2 15:35:02 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Mon Mar 2 15:35:02 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java  | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5eac9c6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d5208da..43505d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1064,6 +1064,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get
 files list for scanning (J.Andreina via vinayakumarb)
 
+HDFS-7785. Improve diagnostics information for HttpPutFailedException.
+(Chengbing Liu via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5eac9c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index c1e9d7f..0d32758 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -291,7 +291,9 @@ public class TransferFsImage {
 
   int responseCode = connection.getResponseCode();
   if (responseCode != HttpURLConnection.HTTP_OK) {
-throw new HttpPutFailedException(connection.getResponseMessage(),
+throw new HttpPutFailedException(String.format(
+Image uploading failed, status: %d, url: %s, message: %s,
+responseCode, urlWithParams, connection.getResponseMessage()),
 responseCode);
   }
 } catch (AuthenticationException e) {



hadoop git commit: HDFS-7785. Improve diagnostics information for HttpPutFailedException. Contributed by Chengbing Liu.

2015-03-02 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 76b7283fa - 651aa8761


HDFS-7785. Improve diagnostics information for HttpPutFailedException. 
Contributed by Chengbing Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/651aa876
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/651aa876
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/651aa876

Branch: refs/heads/branch-2
Commit: 651aa8761e902f722dcbdba866b55fa752e4c043
Parents: 76b7283
Author: Haohui Mai whe...@apache.org
Authored: Mon Mar 2 15:35:02 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Mon Mar 2 15:35:19 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java  | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/651aa876/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d9b96ab..c945e16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -762,6 +762,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get
 files list for scanning (J.Andreina via vinayakumarb)
 
+HDFS-7785. Improve diagnostics information for HttpPutFailedException.
+(Chengbing Liu via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/651aa876/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index c1e9d7f..0d32758 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -291,7 +291,9 @@ public class TransferFsImage {
 
   int responseCode = connection.getResponseCode();
   if (responseCode != HttpURLConnection.HTTP_OK) {
-throw new HttpPutFailedException(connection.getResponseMessage(),
+throw new HttpPutFailedException(String.format(
+Image uploading failed, status: %d, url: %s, message: %s,
+responseCode, urlWithParams, connection.getResponseMessage()),
 responseCode);
   }
 } catch (AuthenticationException e) {



hadoop git commit: YARN-3270. Fix node label expression not getting set in ApplicationSubmissionContext (Rohit Agarwal via wangda)

2015-03-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 651aa8761 - 19a8e4823


YARN-3270. Fix node label expression not getting set in 
ApplicationSubmissionContext (Rohit Agarwal via wangda)

(cherry picked from commit abac6eb9d530bb1e6ff58ec3c75b17d840a0ee3f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a8e482
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a8e482
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a8e482

Branch: refs/heads/branch-2
Commit: 19a8e48230435331191611f6f96671b5158f7be5
Parents: 651aa87
Author: Wangda Tan wan...@apache.org
Authored: Mon Mar 2 17:21:19 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon Mar 2 17:24:55 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/api/records/ApplicationSubmissionContext.java | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a8e482/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 801d960..fe280c4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -638,6 +638,9 @@ Release 2.7.0 - UNRELEASED
 all Schedulers even when using ParameterizedSchedulerTestBase. 
 (Anubhav Dhoot via devaraj)
 
+YARN-3270. Fix node label expression not getting set in 
+ApplicationSubmissionContext (Rohit Agarwal via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a8e482/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
index f1ebbfe..c4014fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
@@ -155,6 +155,7 @@ public abstract class ApplicationSubmissionContext {
 context.setMaxAppAttempts(maxAppAttempts);
 context.setApplicationType(applicationType);
 context.setKeepContainersAcrossApplicationAttempts(keepContainers);
+context.setNodeLabelExpression(appLabelExpression);
 context.setAMContainerResourceRequest(resourceRequest);
 return context;
   }



[2/2] hadoop git commit: YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's available resource-limit from the parent queue. Contributed by Wangda Tan.

2015-03-02 Thread vinodkv
YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's 
available resource-limit from the parent queue. Contributed by Wangda Tan.

(cherry picked from commit 14dd647c556016d351f425ee956ccf800ccb9ce2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/253c7854
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/253c7854
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/253c7854

Branch: refs/heads/branch-2
Commit: 253c78548b2fb8a8dd8bfb1bd87b6740d0f4616a
Parents: 19a8e48
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Mar 2 17:52:47 2015 -0800
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Mar 2 17:54:01 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/ResourceLimits.java   |  40 +++
 .../scheduler/ResourceUsage.java|  61 ++---
 .../scheduler/capacity/AbstractCSQueue.java |  24 +-
 .../scheduler/capacity/CSQueue.java |  11 +-
 .../scheduler/capacity/CSQueueUtils.java|  48 
 .../capacity/CapacityHeadroomProvider.java  |  16 +-
 .../scheduler/capacity/CapacityScheduler.java   |  30 ++-
 .../scheduler/capacity/LeafQueue.java   | 131 +-
 .../scheduler/capacity/ParentQueue.java |  53 +++-
 .../yarn/server/resourcemanager/MockAM.java |  11 +-
 .../scheduler/TestResourceUsage.java|   2 +-
 .../capacity/TestApplicationLimits.java |  32 +--
 .../scheduler/capacity/TestCSQueueUtils.java| 250 ---
 .../capacity/TestCapacityScheduler.java |  85 ++-
 .../scheduler/capacity/TestChildQueueOrder.java |  36 ++-
 .../scheduler/capacity/TestLeafQueue.java   | 221 ++--
 .../scheduler/capacity/TestParentQueue.java | 106 
 .../scheduler/capacity/TestReservations.java| 100 +---
 19 files changed, 646 insertions(+), 614 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/253c7854/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index fe280c4..73bcaf0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -641,6 +641,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3270. Fix node label expression not getting set in 
 ApplicationSubmissionContext (Rohit Agarwal via wangda)
 
+YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
+available resource-limit from the parent queue. (Wangda Tan via vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/253c7854/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
new file mode 100644
index 000..12333e8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Resource limits for queues/applications, this means max overall (please note
+ * that, it's not extra) resource you can get.
+ */
+public class ResourceLimits {
+  public ResourceLimits(Resource limit) {
+this.limit = limit;
+  }
+ 

[1/2] hadoop git commit: YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's available resource-limit from the parent queue. Contributed by Wangda Tan.

2015-03-02 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 19a8e4823 - 253c78548


http://git-wip-us.apache.org/repos/asf/hadoop/blob/253c7854/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index ead5719..a5a2e5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -73,6 +73,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -294,11 +295,13 @@ public class TestLeafQueue {
  //Verify the value for getAMResourceLimit for queues with  .1 maxcap
  Resource clusterResource = Resource.newInstance(50 * GB, 50);
  
- a.updateClusterResource(clusterResource);
+a.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
  assertEquals(Resource.newInstance(1 * GB, 1), 
a.getAMResourceLimit());
 
- b.updateClusterResource(clusterResource);
+ b.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
  assertEquals(Resource.newInstance(5 * GB, 1), 
b.getAMResourceLimit());
   }
@@ -347,7 +350,8 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(
 (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
@@ -482,7 +486,8 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(1*GB, a.getUsedResources().getMemory());
 assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -492,7 +497,8 @@ public class TestLeafQueue {
 
 // Also 2nd - minCapacity = 1024 since (.1 * 8G)  minAlloc, also
 // you can get one container more than user-limit
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(2*GB, a.getUsedResources().getMemory());
 assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -500,7 +506,8 @@ public class TestLeafQueue {
 assertEquals(2*GB, a.getMetrics().getAllocatedMB());
 
 // Can't allocate 3rd due to user-limit
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(2*GB, a.getUsedResources().getMemory());
 assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -509,7 +516,8 @@ public class TestLeafQueue {
 
 // Bump up user-limit-factor, now allocate should work
 a.setUserLimitFactor(10);
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(3*GB, a.getUsedResources().getMemory());
 assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, 

hadoop git commit: HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it to wrapped FS. (gera)

2015-03-02 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fe7df5b4d - 5e235802d


HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it to 
wrapped FS. (gera)

(cherry picked from commit b18d3830aca00f44d31e42839578f24eecedf2c8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e235802
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e235802
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e235802

Branch: refs/heads/branch-2
Commit: 5e235802deb68082c50cbb6b10caa5472382ab32
Parents: fe7df5b
Author: Gera Shegalov g...@apache.org
Authored: Tue Feb 17 00:24:37 2015 -0800
Committer: Gera Shegalov g...@apache.org
Committed: Mon Mar 2 18:16:58 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/FilterFileSystem.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e235802/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9f82722..107940e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -623,6 +623,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11615. Update ServiceLevelAuth.md for YARN.
 (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
+to wrapped FS. (gera)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e235802/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index d4080ad..d14a272 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -190,7 +190,7 @@ public class FilterFileSystem extends FileSystem {
 Progressable progress,
 ChecksumOpt checksumOpt) throws IOException {
 return fs.create(f, permission,
-  flags, bufferSize, replication, blockSize, progress);
+  flags, bufferSize, replication, blockSize, progress, checksumOpt);
   }
   
   @Override



[2/2] hadoop git commit: YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's available resource-limit from the parent queue. Contributed by Wangda Tan.

2015-03-02 Thread vinodkv
YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's 
available resource-limit from the parent queue. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14dd647c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14dd647c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14dd647c

Branch: refs/heads/trunk
Commit: 14dd647c556016d351f425ee956ccf800ccb9ce2
Parents: abac6eb
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Mar 2 17:52:47 2015 -0800
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Mar 2 17:52:47 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/ResourceLimits.java   |  40 +++
 .../scheduler/ResourceUsage.java|  61 ++---
 .../scheduler/capacity/AbstractCSQueue.java |  24 +-
 .../scheduler/capacity/CSQueue.java |  11 +-
 .../scheduler/capacity/CSQueueUtils.java|  48 
 .../capacity/CapacityHeadroomProvider.java  |  16 +-
 .../scheduler/capacity/CapacityScheduler.java   |  30 ++-
 .../scheduler/capacity/LeafQueue.java   | 131 +-
 .../scheduler/capacity/ParentQueue.java |  53 +++-
 .../yarn/server/resourcemanager/MockAM.java |  11 +-
 .../scheduler/TestResourceUsage.java|   2 +-
 .../capacity/TestApplicationLimits.java |  32 +--
 .../scheduler/capacity/TestCSQueueUtils.java| 250 ---
 .../capacity/TestCapacityScheduler.java |  85 ++-
 .../scheduler/capacity/TestChildQueueOrder.java |  36 ++-
 .../scheduler/capacity/TestLeafQueue.java   | 221 ++--
 .../scheduler/capacity/TestParentQueue.java | 106 
 .../scheduler/capacity/TestReservations.java| 100 +---
 19 files changed, 646 insertions(+), 614 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14dd647c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d07aa26..0850f0b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -686,6 +686,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3270. Fix node label expression not getting set in 
 ApplicationSubmissionContext (Rohit Agarwal via wangda)
 
+YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a 
queue's
+available resource-limit from the parent queue. (Wangda Tan via vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/14dd647c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
new file mode 100644
index 000..12333e8
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceLimits.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+/**
+ * Resource limits for queues/applications, this means max overall (please note
+ * that, it's not extra) resource you can get.
+ */
+public class ResourceLimits {
+  public ResourceLimits(Resource limit) {
+this.limit = limit;
+  }
+  
+  volatile Resource limit;
+  public Resource getLimit() {
+

[1/2] hadoop git commit: YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's available resource-limit from the parent queue. Contributed by Wangda Tan.

2015-03-02 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk abac6eb9d - 14dd647c5


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14dd647c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index ead5719..a5a2e5f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -73,6 +73,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
@@ -294,11 +295,13 @@ public class TestLeafQueue {
  //Verify the value for getAMResourceLimit for queues with  .1 maxcap
  Resource clusterResource = Resource.newInstance(50 * GB, 50);
  
- a.updateClusterResource(clusterResource);
+a.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
  assertEquals(Resource.newInstance(1 * GB, 1), 
a.getAMResourceLimit());
 
- b.updateClusterResource(clusterResource);
+ b.updateClusterResource(clusterResource,
+new ResourceLimits(clusterResource));
  assertEquals(Resource.newInstance(5 * GB, 1), 
b.getAMResourceLimit());
   }
@@ -347,7 +350,8 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(
 (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - 
(1*GB),
 a.getMetrics().getAvailableMB());
@@ -482,7 +486,8 @@ public class TestLeafQueue {
 // Start testing...
 
 // Only 1 container
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(1*GB, a.getUsedResources().getMemory());
 assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -492,7 +497,8 @@ public class TestLeafQueue {
 
 // Also 2nd - minCapacity = 1024 since (.1 * 8G)  minAlloc, also
 // you can get one container more than user-limit
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(2*GB, a.getUsedResources().getMemory());
 assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -500,7 +506,8 @@ public class TestLeafQueue {
 assertEquals(2*GB, a.getMetrics().getAllocatedMB());
 
 // Can't allocate 3rd due to user-limit
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(2*GB, a.getUsedResources().getMemory());
 assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
@@ -509,7 +516,8 @@ public class TestLeafQueue {
 
 // Bump up user-limit-factor, now allocate should work
 a.setUserLimitFactor(10);
-a.assignContainers(clusterResource, node_0, false);
+a.assignContainers(clusterResource, node_0, false,
+new ResourceLimits(clusterResource));
 assertEquals(3*GB, a.getUsedResources().getMemory());
 assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
 assertEquals(0*GB, 

hadoop git commit: YARN-3270. Fix node label expression not getting set in ApplicationSubmissionContext (Rohit Agarwal via wangda)

2015-03-02 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk c5eac9c6f - abac6eb9d


YARN-3270. Fix node label expression not getting set in 
ApplicationSubmissionContext (Rohit Agarwal via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abac6eb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abac6eb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abac6eb9

Branch: refs/heads/trunk
Commit: abac6eb9d530bb1e6ff58ec3c75b17d840a0ee3f
Parents: c5eac9c
Author: Wangda Tan wan...@apache.org
Authored: Mon Mar 2 17:21:19 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon Mar 2 17:21:19 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/api/records/ApplicationSubmissionContext.java | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abac6eb9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c7dac60..d07aa26 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -683,6 +683,9 @@ Release 2.7.0 - UNRELEASED
 all Schedulers even when using ParameterizedSchedulerTestBase. 
 (Anubhav Dhoot via devaraj)
 
+YARN-3270. Fix node label expression not getting set in 
+ApplicationSubmissionContext (Rohit Agarwal via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abac6eb9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
index f1ebbfe..c4014fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
@@ -155,6 +155,7 @@ public abstract class ApplicationSubmissionContext {
 context.setMaxAppAttempts(maxAppAttempts);
 context.setApplicationType(applicationType);
 context.setKeepContainersAcrossApplicationAttempts(keepContainers);
+context.setNodeLabelExpression(appLabelExpression);
 context.setAMContainerResourceRequest(resourceRequest);
 return context;
   }



hadoop git commit: HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it to wrapped FS. (gera)

2015-03-02 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/trunk 431e7d84c - b18d3830a


HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it to 
wrapped FS. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b18d3830
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b18d3830
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b18d3830

Branch: refs/heads/trunk
Commit: b18d3830aca00f44d31e42839578f24eecedf2c8
Parents: 431e7d8
Author: Gera Shegalov g...@apache.org
Authored: Tue Feb 17 00:24:37 2015 -0800
Committer: Gera Shegalov g...@apache.org
Committed: Mon Mar 2 18:09:23 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/FilterFileSystem.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b18d3830/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b8ed286..ebe23c7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1031,6 +1031,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11615. Update ServiceLevelAuth.md for YARN.
 (Brahma Reddy Battula via aajisaka)
 
+HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
+to wrapped FS. (gera)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b18d3830/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index d4080ad..d14a272 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -190,7 +190,7 @@ public class FilterFileSystem extends FileSystem {
 Progressable progress,
 ChecksumOpt checksumOpt) throws IOException {
 return fs.create(f, permission,
-  flags, bufferSize, replication, blockSize, progress);
+  flags, bufferSize, replication, blockSize, progress, checksumOpt);
   }
   
   @Override



hadoop git commit: HADOOP-11449 / HADOOP-10320 ; reverting patch 6cda3a7 which triggered regression in javadocs on some java8 compilers (stevel on behalf of cnauroth)

2015-03-02 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 253c78548 - fe7df5b4d


HADOOP-11449 / HADOOP-10320 ; reverting patch 6cda3a7 which triggered 
regression in javadocs on some java8 compilers (stevel on behalf of cnauroth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe7df5b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe7df5b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe7df5b4

Branch: refs/heads/branch-2
Commit: fe7df5b4d87298571da45562402976810899b6a9
Parents: 253c785
Author: Steve Loughran ste...@apache.org
Authored: Mon Mar 2 18:15:24 2015 -0800
Committer: Steve Loughran ste...@apache.org
Committed: Mon Mar 2 18:15:24 2015 -0800

--
 .../java/org/apache/hadoop/classification/InterfaceStability.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7df5b4/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
--
diff --git 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
index ec660ed..8454553 100644
--- 
a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
+++ 
b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/InterfaceStability.java
@@ -24,7 +24,6 @@ import java.lang.annotation.RetentionPolicy;
 /**
  * Annotation to inform users of how much to rely on a particular package,
  * class or method not changing over time.
- * /ul
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving



hadoop git commit: HDFS-7302. Remove downgrade from namenode -rollingUpgrade startup option since it may incorrectly finalize an ongoing rolling upgrade. Contributed by Kai Sasaki

2015-03-02 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 14dd647c5 - 431e7d84c


HDFS-7302. Remove downgrade from namenode -rollingUpgrade startup option 
since it may incorrectly finalize an ongoing rolling upgrade.
Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/431e7d84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/431e7d84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/431e7d84

Branch: refs/heads/trunk
Commit: 431e7d84c7b68b34ff18de19afe8e46637047fa6
Parents: 14dd647
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Mar 3 10:04:08 2015 +0800
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Mar 3 10:04:08 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 
 .../hadoop/hdfs/server/common/HdfsServerConstants.java  | 10 +-
 .../hadoop/hdfs/server/namenode/FSEditLogLoader.java|  3 ---
 .../org/apache/hadoop/hdfs/server/namenode/FSImage.java |  4 
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  3 +--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md   |  2 +-
 .../hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml| 11 +--
 .../apache/hadoop/hdfs/TestRollingUpgradeDowngrade.java | 12 
 .../hdfs/server/datanode/TestHdfsServerConstants.java   |  3 ---
 .../hdfs/server/namenode/TestNameNodeOptionParsing.java |  8 
 10 files changed, 28 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/431e7d84/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 43505d7..52e5d3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -14,6 +14,10 @@ Trunk (Unreleased)
 
 HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
 
+HDFS-7302. Remove downgrade from namenode -rollingUpgrade startup
+option since it may incorrectly finalize an ongoing rolling upgrade.
+(Kai Sasaki via szetszwo)
+
   NEW FEATURES
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/431e7d84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 9bba2c9..ff64524 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -49,7 +49,7 @@ public final class HdfsServerConstants {
 
   /** Startup options for rolling upgrade. */
   public static enum RollingUpgradeStartupOption{
-ROLLBACK, DOWNGRADE, STARTED;
+ROLLBACK, STARTED;
 
 public String getOptionString() {
   return StartupOption.ROLLINGUPGRADE.getName() +  
@@ -64,6 +64,14 @@ public final class HdfsServerConstants {
 private static final RollingUpgradeStartupOption[] VALUES = values();
 
 static RollingUpgradeStartupOption fromString(String s) {
+  if (downgrade.equalsIgnoreCase(s)) {
+throw new IllegalArgumentException(
+The \downgrade\ option is no longer supported
++  since it may incorrectly finalize an ongoing rolling 
upgrade.
++  For downgrade instruction, please see the documentation
++  
(http://hadoop.apache.org/docs/current/hadoop-project-dist/;
++ hadoop-hdfs/HdfsRollingUpgrade.html#Downgrade).);
+  }
   for(RollingUpgradeStartupOption opt : VALUES) {
 if (opt.name().equalsIgnoreCase(s)) {
   return opt;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/431e7d84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index a09df82..51c167a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ 

hadoop git commit: MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu Kobayashi.

2015-03-02 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6090f5172 - b1e7f9c58


MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu 
Kobayashi.

(cherry picked from commit 742f9d90c00f823ad7fea7e79702fcf238fa5721)

Conflicts:
hadoop-mapreduce-project/CHANGES.txt

hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1e7f9c5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1e7f9c5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1e7f9c5

Branch: refs/heads/branch-2
Commit: b1e7f9c58862d1044ad8ac6c57f8a055f27e2764
Parents: 6090f51
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 3 16:21:16 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 3 16:24:27 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/site/apt/HistoryServerRest.apt.vm | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1e7f9c5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c51bd23..47152f9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -151,6 +151,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-2815. JavaDoc does not generate correctly for
 MultithreadedMapRunner. (Chris Palmer via aajisaka)
 
+MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
+via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1e7f9c5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
index 5a1e36bf..de352aa 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
@@ -2137,7 +2137,7 @@ MapReduce History Server REST API's.
   Use the following URI to obtain an Task Attempt Object, from a task 
identified by the {attemptid} value. 
 
 --
-  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempt/{attemptid}
+  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
 --
 
 *** HTTP Operations Supported 



hadoop git commit: MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu Kobayashi.

2015-03-02 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk d1c6accb6 - 742f9d90c


MAPREDUCE-6268. Fix typo in Task Attempt API's URL. Contributed by Ryu 
Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/742f9d90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/742f9d90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/742f9d90

Branch: refs/heads/trunk
Commit: 742f9d90c00f823ad7fea7e79702fcf238fa5721
Parents: d1c6acc
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 3 16:21:16 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 3 16:21:16 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/site/markdown/HistoryServerRest.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/742f9d90/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ccd24a6..5fd7d30 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -399,6 +399,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. 
 (Varun Saxena via kasha)
 
+MAPREDUCE-6268. Fix typo in Task Attempt API's URL. (Ryu Kobayashi
+via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742f9d90/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
index 8a78754..b4ce00a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/markdown/HistoryServerRest.md
@@ -1889,7 +1889,7 @@ A Task Attempt resource contains information about a 
particular task attempt wit
 
 Use the following URI to obtain an Task Attempt Object, from a task identified 
by the attemptid value.
 
-  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempt/{attemptid}
+  * http://history server http 
address:port/ws/v1/history/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
 
  HTTP Operations Supported
 



hadoop git commit: HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' message. Contributed by Jing Zhao.

2015-03-02 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/trunk b18d3830a - b442aeec9


HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' 
message. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b442aeec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b442aeec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b442aeec

Branch: refs/heads/trunk
Commit: b442aeec95abfa1c6f835a116dfe6e186b0d841d
Parents: b18d383
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 20:22:04 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 2 20:22:04 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 8 +---
 2 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b442aeec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 52e5d3c..fe78097 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1071,6 +1071,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7785. Improve diagnostics information for HttpPutFailedException.
 (Chengbing Liu via wheat9)
 
+HDFS-7871. NameNodeEditLogRoller can keep printing Swallowing exception
+message. (jing9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b442aeec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7cd194e..d2b48f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4558,14 +4558,16 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 + rollThreshold);
 rollEditLog();
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(Swallowing exception in 
+  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
+}
+try {
   Thread.sleep(sleepIntervalMs);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
   +  was interrupted, exiting);
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(Swallowing exception in 
-  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
 }
   }
 }



hadoop git commit: HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' message. Contributed by Jing Zhao.

2015-03-02 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5e235802d - 6090f5172


HDFS-7871. NameNodeEditLogRoller can keep printing 'Swallowing exception' 
message. Contributed by Jing Zhao.

(cherry picked from commit b442aeec95abfa1c6f835a116dfe6e186b0d841d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6090f517
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6090f517
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6090f517

Branch: refs/heads/branch-2
Commit: 6090f51725e2b44d794433ed72a1901fae2ba7e3
Parents: 5e23580
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 20:22:04 2015 -0800
Committer: Jing Zhao ji...@apache.org
Committed: Mon Mar 2 20:23:10 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 8 +---
 2 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6090f517/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c945e16..354b99b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7785. Improve diagnostics information for HttpPutFailedException.
 (Chengbing Liu via wheat9)
 
+HDFS-7871. NameNodeEditLogRoller can keep printing Swallowing exception
+message. (jing9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6090f517/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index c7d9350..17f313d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4558,14 +4558,16 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 + rollThreshold);
 rollEditLog();
   }
+} catch (Exception e) {
+  FSNamesystem.LOG.error(Swallowing exception in 
+  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
+}
+try {
   Thread.sleep(sleepIntervalMs);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName()
   +  was interrupted, exiting);
   break;
-} catch (Exception e) {
-  FSNamesystem.LOG.error(Swallowing exception in 
-  + NameNodeEditLogRoller.class.getSimpleName() + :, e);
 }
   }
 }



[3/3] hadoop git commit: HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)

2015-03-02 Thread ozawa
HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d1c6accb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d1c6accb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d1c6accb

Branch: refs/heads/trunk
Commit: d1c6accb6f87b08975175580e15f1ff1fe29ab04
Parents: b442aee
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Mar 3 14:12:34 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Mar 3 14:17:52 2015 +0900

--
 .../classification/tools/StabilityOptions.java  |  5 ++-
 .../AltKerberosAuthenticationHandler.java   |  6 ++-
 .../authentication/util/TestKerberosUtil.java   | 14 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../org/apache/hadoop/conf/Configuration.java   |  6 +--
 .../org/apache/hadoop/crypto/CipherSuite.java   |  3 +-
 .../hadoop/crypto/key/JavaKeyStoreProvider.java |  3 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  7 +++-
 .../java/org/apache/hadoop/fs/StorageType.java  |  3 +-
 .../apache/hadoop/fs/permission/AclEntry.java   |  5 ++-
 .../apache/hadoop/fs/shell/XAttrCommands.java   |  2 +-
 .../org/apache/hadoop/fs/shell/find/Name.java   |  5 ++-
 .../io/compress/CompressionCodecFactory.java|  7 ++--
 .../hadoop/metrics2/impl/MetricsConfig.java |  7 ++--
 .../hadoop/metrics2/impl/MetricsSystemImpl.java |  5 ++-
 .../hadoop/security/SaslPropertiesResolver.java |  3 +-
 .../apache/hadoop/security/SecurityUtil.java| 12 +++---
 .../hadoop/security/WhitelistBasedResolver.java |  3 +-
 .../security/ssl/FileBasedKeyStoresFactory.java |  4 +-
 .../apache/hadoop/security/ssl/SSLFactory.java  |  5 ++-
 .../security/ssl/SSLHostnameVerifier.java   | 10 +++--
 .../DelegationTokenAuthenticationHandler.java   |  3 +-
 .../web/DelegationTokenAuthenticator.java   |  3 +-
 .../apache/hadoop/util/ComparableVersion.java   |  3 +-
 .../org/apache/hadoop/util/StringUtils.java | 40 +++-
 .../hadoop/fs/FileSystemContractBaseTest.java   |  4 +-
 .../java/org/apache/hadoop/ipc/TestIPC.java |  2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java |  2 +-
 .../hadoop/security/TestSecurityUtil.java   | 10 +++--
 .../security/TestUserGroupInformation.java  |  5 ++-
 .../hadoop/test/TimedOutTestsListener.java  |  6 ++-
 .../org/apache/hadoop/util/TestStringUtils.java | 21 ++
 .../org/apache/hadoop/util/TestWinUtils.java|  6 ++-
 .../java/org/apache/hadoop/nfs/NfsExports.java  |  5 ++-
 .../server/CheckUploadContentTypeFilter.java|  4 +-
 .../hadoop/fs/http/server/FSOperations.java |  7 +++-
 .../http/server/HttpFSParametersProvider.java   |  4 +-
 .../org/apache/hadoop/lib/server/Server.java|  3 +-
 .../service/hadoop/FileSystemAccessService.java |  6 ++-
 .../org/apache/hadoop/lib/wsrs/EnumParam.java   |  2 +-
 .../apache/hadoop/lib/wsrs/EnumSetParam.java|  3 +-
 .../hadoop/lib/wsrs/ParametersProvider.java |  3 +-
 .../org/apache/hadoop/hdfs/XAttrHelper.java | 19 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +-
 .../BlockStoragePolicySuite.java|  4 +-
 .../hdfs/server/common/HdfsServerConstants.java |  5 ++-
 .../hdfs/server/datanode/StorageLocation.java   |  4 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |  3 +-
 .../namenode/QuotaByStorageTypeEntry.java   |  3 +-
 .../hdfs/server/namenode/SecondaryNameNode.java |  2 +-
 .../org/apache/hadoop/hdfs/tools/GetConf.java   | 17 +
 .../OfflineEditsVisitorFactory.java |  7 ++--
 .../offlineImageViewer/FSImageHandler.java  |  4 +-
 .../org/apache/hadoop/hdfs/web/AuthFilter.java  |  3 +-
 .../org/apache/hadoop/hdfs/web/ParamFilter.java |  3 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  5 ++-
 .../hadoop/hdfs/web/resources/EnumParam.java|  3 +-
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  3 +-
 .../namenode/snapshot/TestSnapshotManager.java  |  6 +--
 .../jobhistory/JobHistoryEventHandler.java  |  3 +-
 .../mapreduce/v2/app/webapp/AppController.java  |  6 +--
 .../apache/hadoop/mapreduce/TypeConverter.java  |  3 +-
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |  4 +-
 .../hadoop/mapreduce/TestTypeConverter.java |  6 ++-
 .../java/org/apache/hadoop/mapred/Task.java |  2 +-
 .../counters/FileSystemCounterGroup.java|  4 +-
 .../mapreduce/filecache/DistributedCache.java   |  4 +-
 .../hadoop/mapreduce/lib/db/DBInputFormat.java  |  5 ++-
 .../org/apache/hadoop/mapreduce/tools/CLI.java  |  9 +++--
 .../java/org/apache/hadoop/fs/TestDFSIO.java| 18 -
 .../org/apache/hadoop/fs/TestFileSystem.java|  4 +-
 .../org/apache/hadoop/fs/slive/Constants.java   |  6 ++-
 .../apache/hadoop/fs/slive/OperationData.java   |  3 +-
 .../apache/hadoop/fs/slive/OperationOutput.java |  4 +-
 

[2/3] hadoop git commit: HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)

2015-03-02 Thread ozawa
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
index 92a16cd..e6cf16c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -79,19 +80,19 @@ public class GetConf extends Configured implements Tool {
 private static final MapString, CommandHandler map;
 static  {
   map = new HashMapString, CommandHandler();
-  map.put(NAMENODE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(NAMENODE.getName()),
   new NameNodesCommandHandler());
-  map.put(SECONDARY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(SECONDARY.getName()),
   new SecondaryNameNodesCommandHandler());
-  map.put(BACKUP.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(BACKUP.getName()),
   new BackupNodesCommandHandler());
-  map.put(INCLUDE_FILE.getName().toLowerCase(), 
+  map.put(StringUtils.toLowerCase(INCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS));
-  map.put(EXCLUDE_FILE.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(EXCLUDE_FILE.getName()),
   new CommandHandler(DFSConfigKeys.DFS_HOSTS_EXCLUDE));
-  map.put(NNRPCADDRESSES.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(NNRPCADDRESSES.getName()),
   new NNRpcAddressesCommandHandler());
-  map.put(CONFKEY.getName().toLowerCase(),
+  map.put(StringUtils.toLowerCase(CONFKEY.getName()),
   new PrintConfKeyCommandHandler());
 }
 
@@ -116,7 +117,7 @@ public class GetConf extends Configured implements Tool {
 }
 
 public static CommandHandler getHandler(String cmd) {
-  return map.get(cmd.toLowerCase());
+  return map.get(StringUtils.toLowerCase(cmd));
 }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
index c4b8424..de3aceb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
@@ -24,6 +24,7 @@ import java.io.OutputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * EditsVisitorFactory for different implementations of EditsVisitor
@@ -43,7 +44,7 @@ public class OfflineEditsVisitorFactory {
*/
   static public OfflineEditsVisitor getEditsVisitor(String filename,
 String processor, boolean printToScreen) throws IOException {
-if(processor.toLowerCase().equals(binary)) {
+if(StringUtils.equalsIgnoreCase(binary, processor)) {
   return new BinaryEditsVisitor(filename);
 }
 OfflineEditsVisitor vis;
@@ -59,9 +60,9 @@ public class OfflineEditsVisitorFactory {
 outs[1] = System.out;
 out = new TeeOutputStream(outs);
   }
-  if(processor.toLowerCase().equals(xml)) {
+  if(StringUtils.equalsIgnoreCase(xml, processor)) {
 vis = new XmlEditsVisitor(out);
-  } else if(processor.toLowerCase().equals(stats)) {
+  } else if(StringUtils.equalsIgnoreCase(stats, processor)) {
 vis = new StatisticsEditsVisitor(out);
   } else {
 throw new IOException(Unknown proccesor  + processor +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
--
diff --git 

[1/3] hadoop git commit: HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)

2015-03-02 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk b442aeec9 - d1c6accb6


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 46b45f8..21d70b4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
@@ -756,7 +757,7 @@ public class ClientRMService extends AbstractService 
implements
   if (applicationTypes != null  !applicationTypes.isEmpty()) {
 String appTypeToMatch = caseSensitive
 ? application.getApplicationType()
-: application.getApplicationType().toLowerCase();
+: StringUtils.toLowerCase(application.getApplicationType());
 if (!applicationTypes.contains(appTypeToMatch)) {
   continue;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
index 230f9a9..d6e9e45 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.resource;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.util.StringUtils;
 
 @Private
 @Evolving
@@ -61,7 +62,7 @@ public class ResourceWeights {
 sb.append(, );
   }
   ResourceType resourceType = ResourceType.values()[i];
-  sb.append(resourceType.name().toLowerCase());
+  sb.append(StringUtils.toLowerCase(resourceType.name()));
   sb.append(String.format( weight=%.1f, getWeight(resourceType)));
 }
 sb.append();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d1c6accb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 3528c2d..102e553 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ 

hadoop git commit: HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)

2015-03-02 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/trunk 67ed59348 - cbb492578


HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbb49257
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbb49257
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbb49257

Branch: refs/heads/trunk
Commit: cbb492578ef09300821b7199de54c6508f9d7fe8
Parents: 67ed593
Author: Gera Shegalov g...@apache.org
Authored: Thu Feb 12 04:32:43 2015 -0800
Committer: Gera Shegalov g...@apache.org
Committed: Mon Mar 2 00:55:35 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/tools/DFSck.java | 31 +---
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 14 ++---
 .../namenode/TestFsckWithMultipleNameNodes.java | 20 +
 4 files changed, 53 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ca16af..d5208da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
 (Takanobu Asanuma via szetszwo)
 
+HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
+(gera)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index ec83a90..dc6d9d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -225,6 +225,14 @@ public class DFSck extends Configured implements Tool {
 return errCode;
   }
   
+
+  private Path getResolvedPath(String dir) throws IOException {
+Configuration conf = getConf();
+Path dirPath = new Path(dir);
+FileSystem fs = dirPath.getFileSystem(conf);
+return fs.resolvePath(dirPath);
+  }
+
   /**
* Derive the namenode http address from the current file system,
* either default or as set by -fs in the generic options.
@@ -236,19 +244,12 @@ public class DFSck extends Configured implements Tool {
 Configuration conf = getConf();
 
 //get the filesystem object to verify it is an HDFS system
-final FileSystem fs;
-try {
-  fs = target.getFileSystem(conf);
-} catch (IOException ioe) {
-  System.err.println(FileSystem is inaccessible due to:\n
-  + StringUtils.stringifyException(ioe));
-  return null;
-}
+final FileSystem fs = target.getFileSystem(conf);
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println(FileSystem is  + fs.getUri());
   return null;
 }
-
+
 return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
 DFSUtil.getHttpClientScheme(conf));
   }
@@ -303,8 +304,16 @@ public class DFSck extends Configured implements Tool {
   dir = /;
 }
 
-final Path dirpath = new Path(dir);
-final URI namenodeAddress = getCurrentNamenodeAddress(dirpath);
+Path dirpath = null;
+URI namenodeAddress = null;
+try {
+  dirpath = getResolvedPath(dir);
+  namenodeAddress = getCurrentNamenodeAddress(dirpath);
+} catch (IOException ioe) {
+  System.err.println(FileSystem is inaccessible due to:\n
+  + StringUtils.stringifyException(ioe));
+}
+
 if (namenodeAddress == null) {
   //Error message already output in {@link #getCurrentNamenodeAddress()}
   System.err.println(DFSck exiting.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb49257/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 1053b5f..409fffc 100644
--- 

hadoop git commit: HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)

2015-03-02 Thread gera
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a5f3156b3 - 52d0724a2


HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)

(cherry picked from commit cbb492578ef09300821b7199de54c6508f9d7fe8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52d0724a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52d0724a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52d0724a

Branch: refs/heads/branch-2
Commit: 52d0724a20112e29698a8b175866a9e714ace652
Parents: a5f3156
Author: Gera Shegalov g...@apache.org
Authored: Thu Feb 12 04:32:43 2015 -0800
Committer: Gera Shegalov g...@apache.org
Committed: Mon Mar 2 01:05:15 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/tools/DFSck.java | 31 +---
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 14 ++---
 .../namenode/TestFsckWithMultipleNameNodes.java | 20 +
 4 files changed, 53 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d0724a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 028521e..d9b96ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -396,6 +396,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
 (Takanobu Asanuma via szetszwo)
 
+HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
+(gera)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d0724a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 5b43290..900d8ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -223,6 +223,14 @@ public class DFSck extends Configured implements Tool {
 return errCode;
   }
   
+
+  private Path getResolvedPath(String dir) throws IOException {
+Configuration conf = getConf();
+Path dirPath = new Path(dir);
+FileSystem fs = dirPath.getFileSystem(conf);
+return fs.resolvePath(dirPath);
+  }
+
   /**
* Derive the namenode http address from the current file system,
* either default or as set by -fs in the generic options.
@@ -234,19 +242,12 @@ public class DFSck extends Configured implements Tool {
 Configuration conf = getConf();
 
 //get the filesystem object to verify it is an HDFS system
-final FileSystem fs;
-try {
-  fs = target.getFileSystem(conf);
-} catch (IOException ioe) {
-  System.err.println(FileSystem is inaccessible due to:\n
-  + StringUtils.stringifyException(ioe));
-  return null;
-}
+final FileSystem fs = target.getFileSystem(conf);
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println(FileSystem is  + fs.getUri());
   return null;
 }
-
+
 return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
 DFSUtil.getHttpClientScheme(conf));
   }
@@ -300,8 +301,16 @@ public class DFSck extends Configured implements Tool {
   dir = /;
 }
 
-final Path dirpath = new Path(dir);
-final URI namenodeAddress = getCurrentNamenodeAddress(dirpath);
+Path dirpath = null;
+URI namenodeAddress = null;
+try {
+  dirpath = getResolvedPath(dir);
+  namenodeAddress = getCurrentNamenodeAddress(dirpath);
+} catch (IOException ioe) {
+  System.err.println(FileSystem is inaccessible due to:\n
+  + StringUtils.stringifyException(ioe));
+}
+
 if (namenodeAddress == null) {
   //Error message already output in {@link #getCurrentNamenodeAddress()}
   System.err.println(DFSck exiting.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52d0724a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 3c69f1d..b8024a7 100644
--- 

hadoop git commit: HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai Zheng.

2015-03-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk cbb492578 - ca1c00bf8


HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca1c00bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca1c00bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca1c00bf

Branch: refs/heads/trunk
Commit: ca1c00bf814a8b8290a81d06b1f4918c36c7d9e0
Parents: cbb4925
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Mar 2 01:09:54 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Mon Mar 2 01:12:44 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/fs/CommonConfigurationKeys.java  | 17 +++-
 .../io/compress/CompressionCodecFactory.java| 21 +---
 .../hadoop/io/compress/TestCodecFactory.java|  3 ++-
 4 files changed, 31 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca1c00bf/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4c0c375..b8ed286 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -642,6 +642,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10976. moving the source code of hadoop-tools docs to the
 directory under hadoop-tools (Masatake Iwasaki via aw)
 
+HADOOP-11658. Externalize io.compression.codecs property.
+(Kai Zheng via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca1c00bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 442dc7d..7575496 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -91,17 +91,24 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String IPC_CALLQUEUE_IMPL_KEY = callqueue.impl;
   public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = 
identity-provider.impl;
 
+  /** This is for specifying the implementation for the mappings from
+   * hostnames to the racks they belong to
+   */
+  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
+  net.topology.configured.node.mapping;
+
+  /**
+   * Supported compression codec classes
+   */
+  public static final String IO_COMPRESSION_CODECS_KEY = 
io.compression.codecs;
+
   /** Internal buffer size for Lzo compressor/decompressors */
   public static final String  IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY =
 io.compression.codec.lzo.buffersize;
+
   /** Default value for IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY */
   public static final int IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT =
 64*1024;
-  /** This is for specifying the implementation for the mappings from
-   * hostnames to the racks they belong to
-   */
-  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
-net.topology.configured.node.mapping;
 
   /** Internal buffer size for Snappy compressor/decompressors */
   public static final String IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca1c00bf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index eb35759..7476a15 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import 

hadoop git commit: HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai Zheng.

2015-03-02 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 52d0724a2 - fcd817afb


HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai 
Zheng.

(cherry picked from commit 0650e1c56f2d87ca9bdc51d7d4678895f494464a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcd817af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcd817af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcd817af

Branch: refs/heads/branch-2
Commit: fcd817afb0ccb64b78963007b8d64511b36594fb
Parents: 52d0724
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Mar 2 01:09:54 2015 -0800
Committer: Akira Ajisaka aajis...@apache.org
Committed: Mon Mar 2 01:13:10 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/fs/CommonConfigurationKeys.java  | 17 +++-
 .../io/compress/CompressionCodecFactory.java| 21 +---
 .../hadoop/io/compress/TestCodecFactory.java|  3 ++-
 4 files changed, 31 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd817af/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 686b347..9f82722 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -224,6 +224,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10976. moving the source code of hadoop-tools docs to the
 directory under hadoop-tools (Masatake Iwasaki via aw)
 
+HADOOP-11658. Externalize io.compression.codecs property.
+(Kai Zheng via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd817af/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 442dc7d..7575496 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -91,17 +91,24 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String IPC_CALLQUEUE_IMPL_KEY = callqueue.impl;
   public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = 
identity-provider.impl;
 
+  /** This is for specifying the implementation for the mappings from
+   * hostnames to the racks they belong to
+   */
+  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
+  net.topology.configured.node.mapping;
+
+  /**
+   * Supported compression codec classes
+   */
+  public static final String IO_COMPRESSION_CODECS_KEY = 
io.compression.codecs;
+
   /** Internal buffer size for Lzo compressor/decompressors */
   public static final String  IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY =
 io.compression.codec.lzo.buffersize;
+
   /** Default value for IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY */
   public static final int IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT =
 64*1024;
-  /** This is for specifying the implementation for the mappings from
-   * hostnames to the racks they belong to
-   */
-  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
-net.topology.configured.node.mapping;
 
   /** Internal buffer size for Snappy compressor/decompressors */
   public static final String IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd817af/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index eb35759..7476a15 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import 

[01/50] [abbrv] hadoop git commit: HDFS-7467. Provide storage tier information for a directory via fsck. (Benoy Antony)

2015-03-02 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 edb292688 - 22e6b2d3e


HDFS-7467. Provide storage tier information for a directory via fsck. (Benoy 
Antony)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7911e1d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7911e1d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7911e1d7

Branch: refs/heads/HDFS-7285
Commit: 7911e1d72e02130ba0f4f0042510ac8b09018ff3
Parents: 11a1c72
Author: Benoy Antony be...@apache.org
Authored: Wed Feb 25 16:19:35 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 .../hdfs/server/namenode/NamenodeFsck.java  |  23 +-
 .../server/namenode/StoragePolicySummary.java   | 257 +++
 .../org/apache/hadoop/hdfs/tools/DFSck.java |   2 +
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  78 +-
 .../namenode/TestStoragePolicySummary.java  | 201 +++
 5 files changed, 548 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7911e1d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index a3e3a55..f36b773 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -25,6 +25,7 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
 import java.util.Iterator;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -128,6 +130,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   private boolean showBlocks = false;
   private boolean showLocations = false;
   private boolean showRacks = false;
+  private boolean showStoragePolcies = false;
   private boolean showprogress = false;
   private boolean showCorruptFileBlocks = false;
 
@@ -165,6 +168,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   private ListString snapshottableDirs = null;
 
   private final BlockPlacementPolicy bpPolicy;
+  private StoragePolicySummary storageTypeSummary = null;
 
   /**
* Filesystem checker.
@@ -201,6 +205,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   else if (key.equals(blocks)) { this.showBlocks = true; }
   else if (key.equals(locations)) { this.showLocations = true; }
   else if (key.equals(racks)) { this.showRacks = true; }
+  else if (key.equals(storagepolicies)) { this.showStoragePolcies = 
true; }
   else if (key.equals(showprogress)) { this.showprogress = true; }
   else if (key.equals(openforwrite)) {this.showOpenFiles = true; }
   else if (key.equals(listcorruptfileblocks)) {
@@ -335,6 +340,11 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   return;
 }
 
+if (this.showStoragePolcies) {
+  storageTypeSummary = new StoragePolicySummary(
+  namenode.getNamesystem().getBlockManager().getStoragePolicies());
+}
+
 Result res = new Result(conf);
 
 check(path, file, res);
@@ -343,6 +353,10 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 out.println( Number of data-nodes:\t\t + totalDatanodes);
 out.println( Number of racks:\t\t + networktopology.getNumOfRacks());
 
+if (this.showStoragePolcies) {
+  out.print(storageTypeSummary.toString());
+}
+
 out.println(FSCK ended at  + new Date() +  in 
 + (Time.now() - startTime +  milliseconds));
 
@@ -493,7 +507,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   boolean isCorrupt = lBlk.isCorrupt();
   String blkName = block.toString();
   DatanodeInfo[] locs = lBlk.getLocations();
-  NumberReplicas numberReplicas = 
namenode.getNamesystem().getBlockManager().countNodes(block.getLocalBlock());
+  NumberReplicas numberReplicas =
+  

[32/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
new file mode 100644
index 000..1812a44
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -0,0 +1,233 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+Hadoop: Fair Scheduler
+==
+
+* [Purpose](#Purpose)
+* [Introduction](#Introduction)
+* [Hierarchical queues with pluggable 
policies](#Hierarchical_queues_with_pluggable_policies)
+* [Automatically placing applications in 
queues](#Automatically_placing_applications_in_queues)
+* [Installation](#Installation)
+* [Configuration](#Configuration)
+* [Properties that can be placed in 
yarn-site.xml](#Properties_that_can_be_placed_in_yarn-site.xml)
+* [Allocation file format](#Allocation_file_format)
+* [Queue Access Control Lists](#Queue_Access_Control_Lists)
+* [Administration](#Administration)
+* [Modifying configuration at runtime](#Modifying_configuration_at_runtime)
+* [Monitoring through web UI](#Monitoring_through_web_UI)
+* [Moving applications between queues](#Moving_applications_between_queues)
+
+##Purpose
+
+This document describes the `FairScheduler`, a pluggable scheduler for Hadoop 
that allows YARN applications to share resources in large clusters fairly.
+
+##Introduction
+
+Fair scheduling is a method of assigning resources to applications such that 
all apps get, on average, an equal share of resources over time. Hadoop NextGen 
is capable of scheduling multiple resource types. By default, the Fair 
Scheduler bases scheduling fairness decisions only on memory. It can be 
configured to schedule with both memory and CPU, using the notion of Dominant 
Resource Fairness developed by Ghodsi et al. When there is a single app 
running, that app uses the entire cluster. When other apps are submitted, 
resources that free up are assigned to the new apps, so that each app 
eventually on gets roughly the same amount of resources. Unlike the default 
Hadoop scheduler, which forms a queue of apps, this lets short apps finish in 
reasonable time while not starving long-lived apps. It is also a reasonable way 
to share a cluster between a number of users. Finally, fair sharing can also 
work with app priorities - the priorities are used as weights to determine the 
fraction of t
 otal resources that each app should get.
+
+The scheduler organizes apps further into queues, and shares resources 
fairly between these queues. By default, all users share a single queue, named 
default. If an app specifically lists a queue in a container resource 
request, the request is submitted to that queue. It is also possible to assign 
queues based on the user name included with the request through configuration. 
Within each queue, a scheduling policy is used to share resources between the 
running apps. The default is memory-based fair sharing, but FIFO and 
multi-resource with Dominant Resource Fairness can also be configured. Queues 
can be arranged in a hierarchy to divide resources and configured with weights 
to share the cluster in specific proportions.
+
+In addition to providing fair sharing, the Fair Scheduler allows assigning 
guaranteed minimum shares to queues, which is useful for ensuring that certain 
users, groups or production applications always get sufficient resources. When 
a queue contains apps, it gets at least its minimum share, but when the queue 
does not need its full guaranteed share, the excess is split between other 
running apps. This lets the scheduler guarantee capacity for queues while 
utilizing resources efficiently when these queues don't contain applications.
+
+The Fair Scheduler lets all apps run by default, but it is also possible to 
limit the number of running apps per user and per queue through the config 
file. This can be useful when a user must submit hundreds of apps at once, or 
in general to improve performance if running too many apps at once would cause 
too much intermediate data to be created or too much context-switching. 
Limiting the apps does not cause any subsequently submitted apps 

[05/50] [abbrv] hadoop git commit: HADOOP-11480. Typo in hadoop-aws/index.md uses wrong scheme for test.fs.s3.name. Contributed by Ted Yu.

2015-03-02 Thread zhz
HADOOP-11480. Typo in hadoop-aws/index.md uses wrong scheme for 
test.fs.s3.name. Contributed by Ted Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3702ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3702ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3702ef

Branch: refs/heads/HDFS-7285
Commit: ef3702ef300b5d897592d9d9a5990282c3e0f36a
Parents: fe66fe4
Author: Akira Ajisaka aajis...@apache.org
Authored: Tue Feb 24 17:11:46 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3702ef/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 96d5724..a5a11b9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -996,6 +996,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11619. FTPFileSystem should override getDefaultPort.
 (Brahma Reddy Battula via gera)
 
+HADOOP-11480. Typo in hadoop-aws/index.md uses wrong scheme for
+test.fs.s3.name. (Ted Yu via aajisaka)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3702ef/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 1e44864..8e80b92 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -353,7 +353,7 @@ Example:
 
   property
 nametest.fs.s3.name/name
-values3a://test-aws-s3//value
+values3://test-aws-s3//value
   /property
   
   property
@@ -493,4 +493,4 @@ Example:
 This example pulls in the `auth-keys.xml` file for the credentials. 
 This provides one single place to keep the keys up to date —and means
 that the file `contract-test-options.xml` does not contain any
-secret credentials itself.
\ No newline at end of file
+secret credentials itself.



[27/50] [abbrv] hadoop git commit: recommit HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir. (cherry picked from commit 7c6b6547eeed110e1a842e503bfd33afe04fa814)

2015-03-02 Thread zhz
recommit HDFS-7769. TestHDFSCLI should not create files in hdfs project root 
dir.
(cherry picked from commit 7c6b6547eeed110e1a842e503bfd33afe04fa814)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97c2c59c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97c2c59c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97c2c59c

Branch: refs/heads/HDFS-7285
Commit: 97c2c59c611336401c2d11d72fc4434fcf607823
Parents: 70c1fa0
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Feb 10 17:48:57 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop-hdfs/src/test/resources/testHDFSConf.xml  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2c59c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b4b0087..2a8da43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -981,6 +981,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
 DataNode to register successfully with only one NameNode.(vinayakumarb)
 
+HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.
+(szetszwo)
+
 HDFS-7753. Fix Multithreaded correctness Warnings in BackupImage.
 (Rakesh R and shv)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97c2c59c/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index e59b05a..2d3de1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -16483,8 +16483,8 @@
 command-fs NAMENODE -mkdir -p /user/USERNAME/dir1/command
 command-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes 
/user/USERNAME/dir1/command
 command-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes 
/user/USERNAME/dir1/command
-command-fs NAMENODE -getmerge /user/USERNAME/dir1 data/command
-command-cat data/command
+command-fs NAMENODE -getmerge /user/USERNAME/dir1 
CLITEST_DATA/file/command
+command-cat CLITEST_DATA/file/command
   /test-commands
   cleanup-commands
 command-fs NAMENODE -rm -r /user/USERNAME/command



[38/50] [abbrv] hadoop git commit: YARN-3199. Fair Scheduler documentation improvements (Rohit Agarwal via aw)

2015-03-02 Thread zhz
YARN-3199. Fair Scheduler documentation improvements (Rohit Agarwal via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e70ce6f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e70ce6f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e70ce6f9

Branch: refs/heads/HDFS-7285
Commit: e70ce6f917faf4183002939f0842bd71b4073f4f
Parents: 06aca7c
Author: Allen Wittenauer a...@apache.org
Authored: Sat Feb 28 11:36:15 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt | 5 -
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md | 2 ++
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e70ce6f9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 02b1831..cef1758 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop YARN Change Log
 
-Trunk - Unreleased 
+Trunk - Unreleased
 
   INCOMPATIBLE CHANGES
 
@@ -23,6 +23,9 @@ Trunk - Unreleased
 YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty
 via aw)
 
+YARN-3199. Fair Scheduler documentation improvements (Rohit Agarwal via
+aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e70ce6f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 1812a44..a58b3d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -85,6 +85,8 @@ Customizing the Fair Scheduler typically involves altering 
two files. First, sch
 | `yarn.scheduler.fair.locality.threshold.rack` | For applications that 
request containers on particular racks, the number of scheduling opportunities 
since the last container assignment to wait before accepting a placement on 
another rack. Expressed as a float between 0 and 1, which, as a fraction of the 
cluster size, is the number of scheduling opportunities to pass up. The default 
value of -1.0 means don't pass up any scheduling opportunities. |
 | `yarn.scheduler.fair.allow-undeclared-pools` | If this is true, new queues 
can be created at application submission time, whether because they are 
specified as the application's queue by the submitter or because they are 
placed there by the user-as-default-queue property. If this is false, any time 
an app would be placed in a queue that is not specified in the allocations 
file, it is placed in the default queue instead. Defaults to true. If a queue 
placement policy is given in the allocations file, this property is ignored. |
 | `yarn.scheduler.fair.update-interval-ms` | The interval at which to lock the 
scheduler and recalculate fair shares, recalculate demand, and check whether 
anything is due for preemption. Defaults to 500 ms. |
+| `yarn.scheduler.increment-allocation-mb` | The fairscheduler grants memory 
in increments of this value. If you submit a task with resource request that is 
not a multiple of increment-allocation-mb, the request will be rounded up to 
the nearest increment. Defaults to 1024 MB. |
+| `yarn.scheduler.increment-allocation-vcores` | The fairscheduler grants 
vcores in increments of this value. If you submit a task with resource request 
that is not a multiple of increment-allocation-vcores, the request will be 
rounded up to the nearest increment. Defaults to 1. |
 
 ###Allocation file format
 



[02/50] [abbrv] hadoop git commit: HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt() (cmccabe)

2015-03-02 Thread zhz
HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt() 
(cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11a1c72c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11a1c72c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11a1c72c

Branch: refs/heads/HDFS-7285
Commit: 11a1c72ceb010b8048db79417ad65646047f9111
Parents: 8d184d1
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Feb 25 13:29:31 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 33 
 2 files changed, 16 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a1c72c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ca10da..e0f9267 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -668,6 +668,9 @@ Release 2.7.0 - UNRELEASED
 
 HDFS-7740. Test truncate with DataNodes restarting. (yliu)
 
+HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt()
+(cmccabe)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/11a1c72c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index c408524..cf8015f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -434,12 +434,10 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* Fetch it from the namenode if not cached.
* 
* @param offset block corresponding to this offset in file is returned
-   * @param updatePosition whether to update current position
* @return located block
* @throws IOException
*/
-  private LocatedBlock getBlockAt(long offset,
-  boolean updatePosition) throws IOException {
+  private LocatedBlock getBlockAt(long offset) throws IOException {
 synchronized(infoLock) {
   assert (locatedBlocks != null) : locatedBlocks is null;
 
@@ -449,7 +447,6 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   if (offset  0 || offset = getFileLength()) {
 throw new IOException(offset  0 || offset = getFileLength(), 
offset=
 + offset
-+ , updatePosition= + updatePosition
 + , locatedBlocks= + locatedBlocks);
   }
   else if (offset = locatedBlocks.getFileLength()) {
@@ -470,17 +467,6 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 blk = locatedBlocks.get(targetBlockIdx);
   }
-
-  // update current position
-  if (updatePosition) {
-// synchronized not strictly needed, since we only get here
-// from synchronized caller methods
-synchronized(this) {
-  pos = offset;
-  blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
-  currentLocatedBlock = blk;
-}
-  }
   return blk;
 }
   }
@@ -604,7 +590,14 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   //
   // Compute desired block
   //
-  LocatedBlock targetBlock = getBlockAt(target, true);
+  LocatedBlock targetBlock = getBlockAt(target);
+
+  // update current position
+  this.pos = target;
+  this.blockEnd = targetBlock.getStartOffset() +
+targetBlock.getBlockSize() - 1;
+  this.currentLocatedBlock = targetBlock;
+
   assert (target==pos) : Wrong postion  + pos +  expect  + target;
   long offsetIntoBlock = target - targetBlock.getStartOffset();
 
@@ -979,7 +972,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
 deadNodes.clear(); //2nd option is to remove only nodes[blockId]
 openInfo();
-block = getBlockAt(block.getStartOffset(), false);
+block = getBlockAt(block.getStartOffset());
 failures++;
 continue;
   }
@@ -1056,7 +1049,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo 

[24/50] [abbrv] hadoop git commit: HADOOP-11569. Provide Merge API for MapFile to merge multiple similar MapFiles to one MapFile. Contributed by Vinayakumar B.

2015-03-02 Thread zhz
HADOOP-11569. Provide Merge API for MapFile to merge multiple similar MapFiles 
to one MapFile. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/645ebb96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/645ebb96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/645ebb96

Branch: refs/heads/HDFS-7285
Commit: 645ebb965b88cb3018fb1588268cfaf8db837431
Parents: cc02446
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Feb 27 17:46:07 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../main/java/org/apache/hadoop/io/MapFile.java | 143 +++
 .../java/org/apache/hadoop/io/TestMapFile.java  |  56 
 3 files changed, 202 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/645ebb96/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1d9a6d4..6d4da77 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -445,6 +445,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11510. Expose truncate API via FileContext. (yliu)
 
+HADOOP-11569. Provide Merge API for MapFile to merge multiple similar 
MapFiles
+to one MapFile. (Vinayakumar B via ozawa)
+
   IMPROVEMENTS
 
 HADOOP-11483. HardLink.java should use the jdk7 createLink method 
(aajisaka)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/645ebb96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 84c9dcc..ee76458 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -25,6 +25,7 @@ import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -824,6 +825,148 @@ public class MapFile {
 return cnt;
   }
 
+  /**
+   * Class to merge multiple MapFiles of same Key and Value types to one 
MapFile
+   */
+  public static class Merger {
+private Configuration conf;
+private WritableComparator comparator = null;
+private Reader[] inReaders;
+private Writer outWriter;
+private ClassWritable valueClass = null;
+private ClassWritableComparable keyClass = null;
+
+public Merger(Configuration conf) throws IOException {
+  this.conf = conf;
+}
+
+/**
+ * Merge multiple MapFiles to one Mapfile
+ *
+ * @param inMapFiles
+ * @param outMapFile
+ * @throws IOException
+ */
+public void merge(Path[] inMapFiles, boolean deleteInputs,
+Path outMapFile) throws IOException {
+  try {
+open(inMapFiles, outMapFile);
+mergePass();
+  } finally {
+close();
+  }
+  if (deleteInputs) {
+for (int i = 0; i  inMapFiles.length; i++) {
+  Path path = inMapFiles[i];
+  delete(path.getFileSystem(conf), path.toString());
+}
+  }
+}
+
+/*
+ * Open all input files for reading and verify the key and value types. And
+ * open Output file for writing
+ */
+@SuppressWarnings(unchecked)
+private void open(Path[] inMapFiles, Path outMapFile) throws IOException {
+  inReaders = new Reader[inMapFiles.length];
+  for (int i = 0; i  inMapFiles.length; i++) {
+Reader reader = new Reader(inMapFiles[i], conf);
+if (keyClass == null || valueClass == null) {
+  keyClass = (ClassWritableComparable) reader.getKeyClass();
+  valueClass = (ClassWritable) reader.getValueClass();
+} else if (keyClass != reader.getKeyClass()
+|| valueClass != reader.getValueClass()) {
+  throw new HadoopIllegalArgumentException(
+  Input files cannot be merged as they
+  +  have different Key and Value classes);
+}
+inReaders[i] = reader;
+  }
+
+  if (comparator == null) {
+Class? extends WritableComparable cls;
+cls = keyClass.asSubclass(WritableComparable.class);
+

[03/50] [abbrv] hadoop git commit: HADOOP-11632. Cleanup Find.java to remove SupressWarnings annotations. Contributed by Akira AJISAKA.

2015-03-02 Thread zhz
HADOOP-11632. Cleanup Find.java to remove SupressWarnings annotations. 
Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afc81888
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afc81888
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afc81888

Branch: refs/heads/HDFS-7285
Commit: afc8188847a702d7e256b28efa63fa47a2a8
Parents: cadfb71
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Feb 25 16:25:04 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/fs/shell/find/Find.java   | 29 
 2 files changed, 20 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc81888/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a5a11b9..988eed0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -628,6 +628,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11495. Convert site documentation from apt to markdown
 (Masatake Iwasaki via aw)
 
+HADOOP-11632. Cleanup Find.java to remove SupressWarnings annotations.
+(Akira Ajisaka via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/afc81888/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
index 05cd818..70a8c79 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
@@ -25,6 +25,7 @@ import java.util.Deque;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -63,19 +64,25 @@ public class Find extends FsCommand {
   private static final String OPTION_FOLLOW_ARG_LINK = H;
 
   /** List of expressions recognized by this command. */
-  @SuppressWarnings(rawtypes)
-  private static final Class[] EXPRESSIONS;
+  private static final SetClass? extends Expression EXPRESSIONS =
+  new HashSet();
+
+  private static void addExpression(Class? clazz) {
+EXPRESSIONS.add(clazz.asSubclass(Expression.class));
+  }
 
   static {
 // Initialize the static variables.
-EXPRESSIONS = new Class[] {
-// Operator Expressions
-And.class,
-// Action Expressions
-Print.class,
-// Navigation Expressions
-// Matcher Expressions
-Name.class };
+// Operator Expressions
+addExpression(And.class);
+
+// Action Expressions
+addExpression(Print.class);
+
+// Navigation Expressions
+// Matcher Expressions
+addExpression(Name.class);
+
 DESCRIPTION = buildDescription(ExpressionFactory.getExpressionFactory());
 
 // Register the expressions with the expression factory.
@@ -92,7 +99,6 @@ public class Find extends FsCommand {
   private HashSetPath stopPaths = new HashSetPath();
 
   /** Register the expressions with the expression factory. */
-  @SuppressWarnings(unchecked)
   private static void registerExpressions(ExpressionFactory factory) {
 for (Class? extends Expression exprClass : EXPRESSIONS) {
   factory.registerExpression(exprClass);
@@ -100,7 +106,6 @@ public class Find extends FsCommand {
   }
 
   /** Build the description used by the help command. */
-  @SuppressWarnings(unchecked)
   private static String buildDescription(ExpressionFactory factory) {
 ArrayListExpression operators = new ArrayListExpression();
 ArrayListExpression primaries = new ArrayListExpression();



[09/50] [abbrv] hadoop git commit: YARN-3256. TestClientToAMTokens#testClientTokenRace is not running against all Schedulers even when using ParameterizedSchedulerTestBase. Contributed by Anubhav Dhoo

2015-03-02 Thread zhz
YARN-3256. TestClientToAMTokens#testClientTokenRace is not running against
all Schedulers even when using ParameterizedSchedulerTestBase. Contributed
by Anubhav Dhoot.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/982165ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/982165ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/982165ca

Branch: refs/heads/HDFS-7285
Commit: 982165cadc65f1a3819622a2e38deceae4b20097
Parents: 199a82d
Author: Devaraj K deva...@apache.org
Authored: Thu Feb 26 15:45:41 2015 +0530
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  | 4 
 .../server/resourcemanager/security/TestClientToAMTokens.java| 3 ---
 2 files changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/982165ca/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e5148eb..ac3cbb2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -658,6 +658,10 @@ Release 2.7.0 - UNRELEASED
 YARN-3239. WebAppProxy does not support a final tracking url which has
 query fragments and params (Jian He via jlowe)
 
+YARN-3256. TestClientToAMTokens#testClientTokenRace is not running against 
+all Schedulers even when using ParameterizedSchedulerTestBase. 
+(Anubhav Dhoot via devaraj)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/982165ca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
index 78bc728..499b4d1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java
@@ -33,7 +33,6 @@ import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
 import java.util.Timer;
 import java.util.TimerTask;
 
@@ -43,7 +42,6 @@ import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
@@ -421,7 +419,6 @@ public class TestClientToAMTokens extends 
ParameterizedSchedulerTestBase {
   @Test(timeout=2)
   public void testClientTokenRace() throws Exception {
 
-final Configuration conf = new Configuration();
 conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
   kerberos);
 UserGroupInformation.setConfiguration(conf);



[25/50] [abbrv] hadoop git commit: YARN-2820. Retry in FileSystemRMStateStore when FS's operations fail due to IOException. Contributed by Zhihai Xu.

2015-03-02 Thread zhz
YARN-2820. Retry in FileSystemRMStateStore when FS's operations fail due to 
IOException. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e95f987
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e95f987
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e95f987

Branch: refs/heads/HDFS-7285
Commit: 4e95f9870ed33fe3cd11e73981b24f0dc651bd77
Parents: 6eb3424
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Sat Feb 28 00:56:44 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |   9 +
 .../src/main/resources/yarn-default.xml |  15 +
 .../recovery/FileSystemRMStateStore.java| 303 ++-
 .../recovery/TestFSRMStateStore.java|   5 +
 5 files changed, 265 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e95f987/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 40f187b..38dd9fa 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -333,6 +333,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3255. RM, NM, JobHistoryServer, and WebAppProxyServer's main()
 should support generic options. (shv)
 
+YARN-2820. Retry in FileSystemRMStateStore when FS's operations fail 
+due to IOException. (Zhihai Xu via ozawa)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e95f987/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 05c6cbf..ff06eea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -508,6 +508,15 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC =
   2000, 500;
 
+  public static final String FS_RM_STATE_STORE_NUM_RETRIES =
+  RM_PREFIX + fs.state-store.num-retries;
+  public static final int DEFAULT_FS_RM_STATE_STORE_NUM_RETRIES = 0;
+
+  public static final String FS_RM_STATE_STORE_RETRY_INTERVAL_MS =
+  RM_PREFIX + fs.state-store.retry-interval-ms;
+  public static final long DEFAULT_FS_RM_STATE_STORE_RETRY_INTERVAL_MS =
+  1000L;
+
   public static final String RM_LEVELDB_STORE_PATH = RM_PREFIX
   + leveldb-state-store.path;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e95f987/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a7958a5..df730d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -420,6 +420,21 @@
   /property
 
   property
+descriptionthe number of retries to recover from IOException in
+FileSystemRMStateStore.
+/description
+nameyarn.resourcemanager.fs.state-store.num-retries/name
+value0/value
+  /property
+
+  property
+descriptionRetry interval in milliseconds in FileSystemRMStateStore.
+/description
+nameyarn.resourcemanager.fs.state-store.retry-interval-ms/name
+value1000/value
+  /property
+
+  property
 descriptionLocal path where the RM state will be stored when using
 org.apache.hadoop.yarn.server.resourcemanager.recovery.LeveldbRMStateStore
 as the value for yarn.resourcemanager.store.class/description

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e95f987/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 

[29/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
new file mode 100644
index 000..5e4df9f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/WritingYarnApplications.md
@@ -0,0 +1,591 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+Hadoop: Writing YARN Applications
+=
+
+* [Purpose](#Purpose)
+* [Concepts and Flow](#Concepts_and_Flow)
+* [Interfaces](#Interfaces)
+* [Writing a Simple Yarn Application](#Writing_a_Simple_Yarn_Application)
+* [Writing a simple Client](#Writing_a_simple_Client)
+* [Writing an ApplicationMaster (AM)](#Writing_an_ApplicationMaster_AM)
+* [FAQ](#FAQ)
+* [How can I distribute my application's jars to all of the nodes in the 
YARN cluster that need 
it?](#How_can_I_distribute_my_applications_jars_to_all_of_the_nodes_in_the_YARN_cluster_that_need_it)
+* [How do I get the ApplicationMaster's 
ApplicationAttemptId?](#How_do_I_get_the_ApplicationMasters_ApplicationAttemptId)
+* [Why my container is killed by the 
NodeManager?](#Why_my_container_is_killed_by_the_NodeManager)
+* [How do I include native libraries?](#How_do_I_include_native_libraries)
+* [Useful Links](#Useful_Links)
+* [Sample Code](#Sample_Code)
+
+Purpose
+---
+
+This document describes, at a high-level, the way to implement new 
Applications for YARN.
+
+Concepts and Flow
+-
+
+The general concept is that an *application submission client* submits an 
*application* to the YARN *ResourceManager* (RM). This can be done through 
setting up a `YarnClient` object. After `YarnClient` is started, the client can 
then set up application context, prepare the very first container of the 
application that contains the *ApplicationMaster* (AM), and then submit the 
application. You need to provide information such as the details about the 
local files/jars that need to be available for your application to run, the 
actual command that needs to be executed (with the necessary command line 
arguments), any OS environment settings (optional), etc. Effectively, you need 
to describe the Unix process(es) that needs to be launched for your 
ApplicationMaster.
+
+The YARN ResourceManager will then launch the ApplicationMaster (as specified) 
on an allocated container. The ApplicationMaster communicates with YARN 
cluster, and handles application execution. It performs operations in an 
asynchronous fashion. During application launch time, the main tasks of the 
ApplicationMaster are: a) communicating with the ResourceManager to negotiate 
and allocate resources for future containers, and b) after container 
allocation, communicating YARN *NodeManager*s (NMs) to launch application 
containers on them. Task a) can be performed asynchronously through an 
`AMRMClientAsync` object, with event handling methods specified in a 
`AMRMClientAsync.CallbackHandler` type of event handler. The event handler 
needs to be set to the client explicitly. Task b) can be performed by launching 
a runnable object that then launches containers when there are containers 
allocated. As part of launching this container, the AM has to specify the 
`ContainerLaunchContext` that has
  the launch information such as command line specification, environment, etc.
+
+During the execution of an application, the ApplicationMaster communicates 
NodeManagers through `NMClientAsync` object. All container events are handled 
by `NMClientAsync.CallbackHandler`, associated with `NMClientAsync`. A typical 
callback handler handles client start, stop, status update and error. 
ApplicationMaster also reports execution progress to ResourceManager by 
handling the `getProgress()` method of `AMRMClientAsync.CallbackHandler`.
+
+Other than asynchronous clients, there are synchronous versions for certain 
workflows (`AMRMClient` and `NMClient`). The asynchronous clients are 
recommended because of (subjectively) simpler usages, and this article will 
mainly cover the asynchronous clients. Please refer to `AMRMClient` and 
`NMClient` for more information on 

[31/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
new file mode 100644
index 000..b1591bb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -0,0 +1,2640 @@
+!---
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+
+ResourceManager REST API's.
+===
+
+* [Overview](#Overview)
+* [Cluster Information API](#Cluster_Information_API)
+* [Cluster Metrics API](#Cluster_Metrics_API)
+* [Cluster Scheduler API](#Cluster_Scheduler_API)
+* [Cluster Applications API](#Cluster_Applications_API)
+* [Cluster Application Statistics API](#Cluster_Application_Statistics_API)
+* [Cluster Application API](#Cluster_Application_API)
+* [Cluster Application Attempts API](#Cluster_Application_Attempts_API)
+* [Cluster Nodes API](#Cluster_Nodes_API)
+* [Cluster Node API](#Cluster_Node_API)
+* [Cluster Writeable APIs](#Cluster_Writeable_APIs)
+* [Cluster New Application API](#Cluster_New_Application_API)
+* [Cluster Applications API(Submit 
Application)](#Cluster_Applications_APISubmit_Application)
+* [Cluster Application State API](#Cluster_Application_State_API)
+* [Cluster Application Queue API](#Cluster_Application_Queue_API)
+* [Cluster Delegation Tokens API](#Cluster_Delegation_Tokens_API)
+
+Overview
+
+
+The ResourceManager REST API's allow the user to get information about the 
cluster - status on the cluster, metrics on the cluster, scheduler information, 
information about nodes in the cluster, and information about applications on 
the cluster.
+
+Cluster Information API
+---
+
+The cluster information resource provides overall information about the 
cluster.
+
+### URI
+
+Both of the following URI's give you the cluster information.
+
+  * http://rm http address:port/ws/v1/cluster
+  * http://rm http address:port/ws/v1/cluster/info
+
+### HTTP Operations Supported
+
+  * GET
+
+### Query Parameters Supported
+
+  None
+
+### Elements of the *clusterInfo* object
+
+| Item | Data Type | Description |
+|: |: |: |
+| id | long | The cluster id |
+| startedOn | long | The time the cluster started (in ms since epoch) |
+| state | string | The ResourceManager state - valid values are: NOTINITED, 
INITED, STARTED, STOPPED |
+| haState | string | The ResourceManager HA state - valid values are: 
INITIALIZING, ACTIVE, STANDBY, STOPPED |
+| resourceManagerVersion | string | Version of the ResourceManager |
+| resourceManagerBuildVersion | string | ResourceManager build string with 
build version, user, and checksum |
+| resourceManagerVersionBuiltOn | string | Timestamp when ResourceManager was 
built (in ms since epoch) |
+| hadoopVersion | string | Version of hadoop common |
+| hadoopBuildVersion | string | Hadoop common build string with build version, 
user, and checksum |
+| hadoopVersionBuiltOn | string | Timestamp when hadoop common was built(in ms 
since epoch) |
+
+### Response Examples
+
+**JSON response**
+
+HTTP Request:
+
+  GET http://rm http address:port/ws/v1/cluster/info
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+```json
+{
+  clusterInfo:
+  {
+id:1324053971963,
+startedOn:1324053971963,
+state:STARTED,
+resourceManagerVersion:0.23.1-SNAPSHOT,
+resourceManagerBuildVersion:0.23.1-SNAPSHOT from 1214049 by user1 
source checksum 050cd664439d931c8743a6428fd6a693,
+resourceManagerVersionBuiltOn:Tue Dec 13 22:12:48 CST 2011,
+hadoopVersion:0.23.1-SNAPSHOT,
+hadoopBuildVersion:0.23.1-SNAPSHOT from 1214049 by user1 source 
checksum 11458df3bb77342dca5f917198fad328,
+hadoopVersionBuiltOn:Tue Dec 13 22:12:26 CST 2011
+  }
+}
+```
+
+**XML response**
+
+HTTP Request:
+
+  Accept: application/xml
+  GET http://rm http address:port/ws/v1/cluster/info
+
+Response Header:
+
+  HTTP/1.1 200 OK
+  Content-Type: application/xml
+  Content-Length: 712
+  Server: Jetty(6.1.26)
+
+Response Body:
+
+```xml
+?xml 

[46/50] [abbrv] hadoop git commit: HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using IBM Java (sangamesh via aw)

2015-03-02 Thread zhz
HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using IBM 
Java (sangamesh via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/260883bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/260883bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/260883bf

Branch: refs/heads/HDFS-7285
Commit: 260883bf402cf8ad9b999a6077ba56e75820738f
Parents: d6c1b62
Author: Allen Wittenauer a...@apache.org
Authored: Sat Feb 28 23:22:06 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 .../authentication/KerberosTestUtils.java   | 40 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 32 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/260883bf/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
index 7629a30..8fc08e2 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
@@ -32,12 +32,14 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.Callable;
 
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
 /**
  * Test helper class for Java Kerberos setup.
  */
 public class KerberosTestUtils {
   private static String keytabFile = new File(System.getProperty(test.dir, 
target),
-  UUID.randomUUID().toString()).toString();
+  UUID.randomUUID().toString()).getAbsolutePath();
 
   public static String getRealm() {
 return EXAMPLE.COM;
@@ -65,18 +67,34 @@ public class KerberosTestUtils {
 @Override
 public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
   MapString, String options = new HashMapString, String();
-  options.put(keyTab, KerberosTestUtils.getKeytabFile());
-  options.put(principal, principal);
-  options.put(useKeyTab, true);
-  options.put(storeKey, true);
-  options.put(doNotPrompt, true);
-  options.put(useTicketCache, true);
-  options.put(renewTGT, true);
-  options.put(refreshKrb5Config, true);
-  options.put(isInitiator, true);
+  if (IBM_JAVA) {
+options.put(useKeytab, 
KerberosTestUtils.getKeytabFile().startsWith(file://) ?   
+KerberosTestUtils.getKeytabFile() : file:// +  
KerberosTestUtils.getKeytabFile());
+options.put(principal, principal);
+options.put(refreshKrb5Config, true);
+options.put(credsType, both);
+  } else {
+options.put(keyTab, KerberosTestUtils.getKeytabFile());
+options.put(principal, principal);
+options.put(useKeyTab, true);
+options.put(storeKey, true);
+options.put(doNotPrompt, true);
+options.put(useTicketCache, true);
+options.put(renewTGT, true);
+options.put(refreshKrb5Config, true);
+options.put(isInitiator, true);
+  } 
   String ticketCache = System.getenv(KRB5CCNAME);
   if (ticketCache != null) {
-options.put(ticketCache, ticketCache);
+if (IBM_JAVA) {
+  // IBM JAVA only respect system property and not env variable
+  // The first value searched when useDefaultCcache is used.
+  System.setProperty(KRB5CCNAME, ticketCache);
+  options.put(useDefaultCcache, true);
+  options.put(renewTGT, true);
+} else {
+  options.put(ticketCache, ticketCache);
+}
   }
   options.put(debug, true);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/260883bf/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 74bf558..3c4dc99 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -403,6 +403,9 @@ Trunk (Unreleased)
 
 HADOOP-11637. bash location hard-coded in shell scripts (aw)
 
+HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using
+IBM Java (sangamesh via aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)



[48/50] [abbrv] hadoop git commit: HDFS-7439. Add BlockOpResponseProto's message to the exception messages. Contributed by Takanobu Asanuma

2015-03-02 Thread zhz
HDFS-7439. Add BlockOpResponseProto's message to the exception messages.  
Contributed by Takanobu Asanuma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e4a23f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e4a23f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e4a23f3

Branch: refs/heads/HDFS-7285
Commit: 0e4a23f308c95a99106336f4df7e3e7990dd6ef3
Parents: 72f2c12
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Mar 2 15:03:58 2015 +0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:56 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 26 ++--
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ---
 .../apache/hadoop/hdfs/RemoteBlockReader2.java  | 24 ++
 .../datatransfer/DataTransferProtoUtil.java | 26 
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  9 +++
 .../hdfs/server/datanode/DataXceiver.java   | 14 +++
 7 files changed, 55 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4a23f3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ce35ea2..5ca16af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -694,6 +694,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-5853. Add hadoop.user.group.metrics.percentiles.intervals to
 hdfs-default.xml. (aajisaka)
 
+HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
+(Takanobu Asanuma via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e4a23f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 792c2dd..abcd847 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -174,6 +174,7 @@ import 
org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2260,15 +2261,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   final BlockOpResponseProto reply =
 BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
 
-  if (reply.getStatus() != Status.SUCCESS) {
-if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
-  throw new InvalidBlockTokenException();
-} else {
-  throw new IOException(Bad response  + reply +  for block 
-  + block +  from datanode  + datanodes[j]);
-}
-  }
-  
+  String logInfo = for block  + block +  from datanode  + 
datanodes[j];
+  DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
+
   OpBlockChecksumResponseProto checksumData =
 reply.getChecksumResponse();
 
@@ -2425,16 +2420,9 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   0, 1, true, CachingStrategy.newDefaultStrategy());
   final BlockOpResponseProto reply =
   BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
-  
-  if (reply.getStatus() != Status.SUCCESS) {
-if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
-  throw new InvalidBlockTokenException();
-} else {
-  throw new IOException(Bad response  + reply +  trying to read 
-  + lb.getBlock() +  from datanode  + dn);
-}
-  }
-  
+  String logInfo = trying to read  + lb.getBlock() +  from datanode  + 
dn;
+  DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
+
   return 
PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
 } finally {
   

[37/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via 
aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06aca7c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06aca7c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06aca7c6

Branch: refs/heads/HDFS-7285
Commit: 06aca7c61d037c506ad78420a0b88e562b1e4c2b
Parents: eaccaba
Author: Allen Wittenauer a...@apache.org
Authored: Fri Feb 27 20:39:44 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |3 +
 .../src/site/apt/CapacityScheduler.apt.vm   |  368 ---
 .../src/site/apt/DockerContainerExecutor.apt.vm |  204 --
 .../src/site/apt/FairScheduler.apt.vm   |  483 ---
 .../src/site/apt/NodeManager.apt.vm |   64 -
 .../src/site/apt/NodeManagerCgroups.apt.vm  |   77 -
 .../src/site/apt/NodeManagerRest.apt.vm |  645 
 .../src/site/apt/NodeManagerRestart.apt.vm  |   86 -
 .../src/site/apt/ResourceManagerHA.apt.vm   |  233 --
 .../src/site/apt/ResourceManagerRest.apt.vm | 3104 --
 .../src/site/apt/ResourceManagerRestart.apt.vm  |  298 --
 .../src/site/apt/SecureContainer.apt.vm |  176 -
 .../src/site/apt/TimelineServer.apt.vm  |  260 --
 .../src/site/apt/WebApplicationProxy.apt.vm |   49 -
 .../src/site/apt/WebServicesIntro.apt.vm|  593 
 .../src/site/apt/WritingYarnApplications.apt.vm |  757 -
 .../hadoop-yarn-site/src/site/apt/YARN.apt.vm   |   77 -
 .../src/site/apt/YarnCommands.apt.vm|  369 ---
 .../hadoop-yarn-site/src/site/apt/index.apt.vm  |   82 -
 .../src/site/markdown/CapacityScheduler.md  |  186 ++
 .../site/markdown/DockerContainerExecutor.md.vm |  154 +
 .../src/site/markdown/FairScheduler.md  |  233 ++
 .../src/site/markdown/NodeManager.md|   57 +
 .../src/site/markdown/NodeManagerCgroups.md |   57 +
 .../src/site/markdown/NodeManagerRest.md|  543 +++
 .../src/site/markdown/NodeManagerRestart.md |   53 +
 .../src/site/markdown/ResourceManagerHA.md  |  140 +
 .../src/site/markdown/ResourceManagerRest.md| 2640 +++
 .../src/site/markdown/ResourceManagerRestart.md |  181 +
 .../src/site/markdown/SecureContainer.md|  135 +
 .../src/site/markdown/TimelineServer.md |  231 ++
 .../src/site/markdown/WebApplicationProxy.md|   24 +
 .../src/site/markdown/WebServicesIntro.md   |  569 
 .../site/markdown/WritingYarnApplications.md|  591 
 .../hadoop-yarn-site/src/site/markdown/YARN.md  |   42 +
 .../src/site/markdown/YarnCommands.md   |  272 ++
 .../hadoop-yarn-site/src/site/markdown/index.md |   75 +
 37 files changed, 6186 insertions(+), 7925 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e7af84b..02b1831 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -20,6 +20,9 @@ Trunk - Unreleased
 YARN-2980. Move health check script related functionality to hadoop-common
 (Varun Saxena via aw)
 
+YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty
+via aw)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
deleted file mode 100644
index 8528c1a..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CapacityScheduler.apt.vm
+++ /dev/null
@@ -1,368 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Map Reduce Next Generation-${project.version} - Capacity Scheduler
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Hadoop MapReduce Next Generation - Capacity 

[45/50] [abbrv] hadoop git commit: HADOOP-11615. Update ServiceLevelAuth.md for YARN. Contributed by Brahma Reddy Battula.

2015-03-02 Thread zhz
HADOOP-11615. Update ServiceLevelAuth.md for YARN. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72f2c126
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72f2c126
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72f2c126

Branch: refs/heads/HDFS-7285
Commit: 72f2c1269bd0c89d16aef199c6eb2a45ad6a2c52
Parents: 0cc5192
Author: Akira Ajisaka aajis...@apache.org
Authored: Sun Mar 1 22:16:06 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../src/site/markdown/ServiceLevelAuth.md  | 17 -
 2 files changed, 11 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f2c126/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b1a7a7d..4c0c375 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1025,6 +1025,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11634. Description of webhdfs' principal/keytab should switch places
 each other. (Brahma Reddy Battula via ozawa)
 
+HADOOP-11615. Update ServiceLevelAuth.md for YARN.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72f2c126/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
index ae41b47..e0017d4 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/ServiceLevelAuth.md
@@ -68,10 +68,9 @@ This section lists the various Hadoop services and their 
configuration knobs:
 | security.datanode.protocol.acl | ACL for DatanodeProtocol, which is used by 
datanodes to communicate with the namenode. |
 | security.inter.datanode.protocol.acl | ACL for InterDatanodeProtocol, the 
inter-datanode protocol for updating generation timestamp. |
 | security.namenode.protocol.acl | ACL for NamenodeProtocol, the protocol used 
by the secondary namenode to communicate with the namenode. |
-| security.inter.tracker.protocol.acl | ACL for InterTrackerProtocol, used by 
the tasktrackers to communicate with the jobtracker. |
-| security.job.submission.protocol.acl | ACL for JobSubmissionProtocol, used 
by job clients to communciate with the jobtracker for job submission, querying 
job status etc. |
-| security.task.umbilical.protocol.acl | ACL for TaskUmbilicalProtocol, used 
by the map and reduce tasks to communicate with the parent tasktracker. |
-| security.refresh.policy.protocol.acl | ACL for 
RefreshAuthorizationPolicyProtocol, used by the dfsadmin and mradmin commands 
to refresh the security policy in-effect. |
+| security.job.client.protocol.acl | ACL for JobSubmissionProtocol, used by 
job clients to communciate with the resourcemanager for job submission, 
querying job status etc. |
+| security.job.task.protocol.acl | ACL for TaskUmbilicalProtocol, used by the 
map and reduce tasks to communicate with the parent nodemanager. |
+| security.refresh.policy.protocol.acl | ACL for 
RefreshAuthorizationPolicyProtocol, used by the dfsadmin and rmadmin commands 
to refresh the security policy in-effect. |
 | security.ha.service.protocol.acl | ACL for HAService protocol used by 
HAAdmin to manage the active and stand-by states of namenode. |
 
 ### Access Control Lists
@@ -98,15 +97,15 @@ If access control list is not defined for a service, the 
value of `security.serv
 
 ### Refreshing Service Level Authorization Configuration
 
-The service-level authorization configuration for the NameNode and JobTracker 
can be changed without restarting either of the Hadoop master daemons. The 
cluster administrator can change `$HADOOP_CONF_DIR/hadoop-policy.xml` on the 
master nodes and instruct the NameNode and JobTracker to reload their 
respective configurations via the `-refreshServiceAcl` switch to `dfsadmin` and 
`mradmin` commands respectively.
+The service-level authorization configuration for the NameNode and 
ResourceManager can be changed without restarting either of the Hadoop master 
daemons. The cluster administrator can change 
`$HADOOP_CONF_DIR/hadoop-policy.xml` on the master nodes and instruct the 
NameNode and ResourceManager to reload their respective 

[43/50] [abbrv] hadoop git commit: HADOOP-11634. Description of webhdfs' principal/keytab should switch places each other. Contributed by Brahma Reddy Battula.

2015-03-02 Thread zhz
HADOOP-11634. Description of webhdfs' principal/keytab should switch places 
each other. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/731a463e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/731a463e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/731a463e

Branch: refs/heads/HDFS-7285
Commit: 731a463ed13339e71becba7fa6a1110a6ab33704
Parents: b1ddc71
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Mar 2 04:18:07 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../hadoop-common/src/site/markdown/SecureMode.md| 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/731a463e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3c4dc99..f1d48bc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1019,6 +1019,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-9922. hadoop windows native build will fail in 32 bit machine.
 (Kiran Kumar M R via cnauroth)
 
+HADOOP-11634. Description of webhdfs' principal/keytab should switch places
+each other. (Brahma Reddy Battula via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/731a463e/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
index 0004d25..cb27e29 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
@@ -289,8 +289,8 @@ The following properties should be in the `core-site.xml` 
of all the nodes in th
 
 | Parameter | Value | Notes |
 |: |: |: |
-| `dfs.web.authentication.kerberos.principal` | http/\_h...@realm.tld | 
Kerberos keytab file for the WebHDFS. |
-| `dfs.web.authentication.kerberos.keytab` | 
*/etc/security/keytab/http.service.keytab* | Kerberos principal name for 
WebHDFS. |
+| `dfs.web.authentication.kerberos.principal` | http/\_h...@realm.tld | 
Kerberos principal name for the WebHDFS. |
+| `dfs.web.authentication.kerberos.keytab` | 
*/etc/security/keytab/http.service.keytab* | Kerberos keytab file for WebHDFS. |
 
 ### ResourceManager
 



[33/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm
deleted file mode 100644
index 57a47fd..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WritingYarnApplications.apt.vm
+++ /dev/null
@@ -1,757 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  Hadoop Map Reduce Next Generation-${project.version} - Writing YARN
-  Applications
-  ---
-  ---
-  ${maven.build.timestamp}
-
-Hadoop MapReduce Next Generation - Writing YARN Applications
-
-%{toc|section=1|fromDepth=0}
-
-* Purpose
-
-  This document describes, at a high-level, the way to implement new
-  Applications for YARN.
-
-* Concepts and Flow
-
-  The general concept is that an application submission client submits an
-  application to the YARN ResourceManager (RM). This can be done through
-  setting up a YarnClient object. After YarnClient is started, the
-  client can then set up application context, prepare the very first container 
of
-  the application that contains the ApplicationMaster (AM), and then submit
-  the application. You need to provide information such as the details about 
the
-  local files/jars that need to be available for your application to run, the
-  actual command that needs to be executed (with the necessary command line
-  arguments), any OS environment settings (optional), etc. Effectively, you
-  need to describe the Unix process(es) that needs to be launched for your
-  ApplicationMaster.
-
-  The YARN ResourceManager will then launch the ApplicationMaster (as
-  specified) on an allocated container. The ApplicationMaster communicates with
-  YARN cluster, and handles application execution. It performs operations in an
-  asynchronous fashion. During application launch time, the main tasks of the
-  ApplicationMaster are: a) communicating with the ResourceManager to negotiate
-  and allocate resources for future containers, and b) after container
-  allocation, communicating YARN NodeManagers (NMs) to launch application
-  containers on them. Task a) can be performed asynchronously through an
-  AMRMClientAsync object, with event handling methods specified in a
-  AMRMClientAsync.CallbackHandler type of event handler. The event 
handler
-  needs to be set to the client explicitly. Task b) can be performed by 
launching
-  a runnable object that then launches containers when there are containers
-  allocated. As part of launching this container, the AM has to
-  specify the ContainerLaunchContext that has the launch information 
such as
-  command line specification, environment, etc.
-
-  During the execution of an application, the ApplicationMaster communicates
-  NodeManagers through NMClientAsync object. All container events are
-  handled by NMClientAsync.CallbackHandler, associated with
-  NMClientAsync. A typical callback handler handles client start, stop,
-  status update and error. ApplicationMaster also reports execution progress to
-  ResourceManager by handling the getProgress() method of
-  AMRMClientAsync.CallbackHandler.
-  
-  Other than asynchronous clients, there are synchronous versions for certain
-  workflows (AMRMClient and NMClient). The asynchronous clients are
-  recommended because of (subjectively) simpler usages, and this article
-  will mainly cover the asynchronous clients. Please refer to AMRMClient
-  and NMClient for more information on synchronous clients.
-
-* Interfaces
-
-  The interfaces you'd most like be concerned with are:
-
-  * Client\--\ResourceManager\
-By using YarnClient objects.
-
-  * ApplicationMaster\--\ResourceManager\
-By using AMRMClientAsync objects, handling events asynchronously by
-AMRMClientAsync.CallbackHandler
-
-  * ApplicationMaster\--\NodeManager\
-Launch containers. Communicate with NodeManagers
-by using NMClientAsync objects, handling container events by
-NMClientAsync.CallbackHandler
-
-  []
-
-  Note
-  
-* The three main protocols for YARN application (ApplicationClientProtocol,
-  ApplicationMasterProtocol and ContainerManagementProtocol) are still
-  preserved. The 3 

[17/50] [abbrv] hadoop git commit: YARN-3217. Remove httpclient dependency from hadoop-yarn-server-web-proxy. Contributed by Brahma Reddy Battula.

2015-03-02 Thread zhz
YARN-3217. Remove httpclient dependency from hadoop-yarn-server-web-proxy. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41d99524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41d99524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41d99524

Branch: refs/heads/HDFS-7285
Commit: 41d995245ea2c28b68a96ba60dde79279721348c
Parents: 982165c
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Fri Feb 27 00:22:46 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop-yarn-server-web-proxy/pom.xml|  4 --
 .../server/webproxy/WebAppProxyServlet.java | 46 ++--
 3 files changed, 26 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41d99524/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ac3cbb2..a635592 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -327,6 +327,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2797. Add -help to yarn logs and nodes CLI command. 
 (Jagadesh Kiran N via devaraj)
 
+YARN-3217. Remove httpclient dependency from hadoop-yarn-server-web-proxy.
+(Brahma Reddy Battula via ozawa).
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41d99524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
index fdba1fe..9801064 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
@@ -79,10 +79,6 @@
 /dependency
 
 dependency
-  groupIdcommons-httpclient/groupId
-  artifactIdcommons-httpclient/artifactId
-/dependency
-dependency
   groupIdcom.google.guava/groupId
   artifactIdguava/artifactId
 /dependency

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41d99524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 47f7769..fd98c80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -40,13 +40,6 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.UriBuilder;
 
-import org.apache.commons.httpclient.Header;
-import org.apache.commons.httpclient.HostConfiguration;
-import org.apache.commons.httpclient.HttpClient;
-import org.apache.commons.httpclient.HttpMethod;
-import org.apache.commons.httpclient.cookie.CookiePolicy;
-import org.apache.commons.httpclient.methods.GetMethod;
-import org.apache.commons.httpclient.params.HttpClientParams;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -59,8 +52,15 @@ import org.apache.hadoop.yarn.util.TrackingUriPlugin;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.http.Header;
+import org.apache.http.HttpResponse;
 import org.apache.http.NameValuePair;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.params.ClientPNames;
+import org.apache.http.client.params.CookiePolicy;
 import org.apache.http.client.utils.URLEncodedUtils;
+import org.apache.http.conn.params.ConnRoutePNames;
+import org.apache.http.impl.client.DefaultHttpClient;
 import org.slf4j.Logger;
 

[23/50] [abbrv] hadoop git commit: HDFS-7308. Change the packet chunk size computation in DFSOutputStream in order to enforce packet size = 64kB. Contributed by Takuya Fukudome

2015-03-02 Thread zhz
HDFS-7308. Change the packet chunk size computation in DFSOutputStream in order 
to enforce packet size = 64kB.  Contributed by Takuya Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6eb34243
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6eb34243
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6eb34243

Branch: refs/heads/HDFS-7285
Commit: 6eb3424388a2beeb290dfd670b5f6cf6b720080f
Parents: 54bcb5f
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Feb 27 23:45:37 2015 +0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  3 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java | 31 
 3 files changed, 36 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eb34243/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8556afd..b2422d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -682,6 +682,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7819. Log WARN message for the blocks which are not in Block ID based
 layout (Rakesh R via Colin P. McCabe)
 
+HDFS-7308. Change the packet chunk size computation in DFSOutputStream in
+order to enforce packet size = 64kB.  (Takuya Fukudome via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eb34243/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 9d7dca9..b3e8c97 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -1851,8 +1851,9 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   private void computePacketChunkSize(int psize, int csize) {
+final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
 final int chunkSize = csize + getChecksumSize();
-chunksPerPacket = Math.max(psize/chunkSize, 1);
+chunksPerPacket = Math.max(bodySize/chunkSize, 1);
 packetSize = chunkSize*chunksPerPacket;
 if (DFSClient.LOG.isDebugEnabled()) {
   DFSClient.LOG.debug(computePacketChunkSize: src= + src +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6eb34243/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 678a3b8..7269e39 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.conf.Configuration;
@@ -66,6 +68,35 @@ public class TestDFSOutputStream {
 dos.close();
   }
 
+  /**
+   * The computePacketChunkSize() method of DFSOutputStream should set the 
actual
+   * packet size  64kB. See HDFS-7308 for details.
+   */
+  @Test
+  public void testComputePacketChunkSize()
+  throws Exception {
+DistributedFileSystem fs = cluster.getFileSystem();
+FSDataOutputStream os = fs.create(new Path(/test));
+DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
+wrappedStream);
+
+final int packetSize = 64*1024;
+final int bytesPerChecksum = 512;
+
+Method method = dos.getClass().getDeclaredMethod(computePacketChunkSize,
+int.class, int.class);
+method.setAccessible(true);
+method.invoke(dos, packetSize, bytesPerChecksum);
+
+Field field = dos.getClass().getDeclaredField(packetSize);
+field.setAccessible(true);
+
+Assert.assertTrue((Integer) field.get(dos) + 33  packetSize);
+// If PKT_MAX_HEADER_LEN is 257, actual packet size 

[19/50] [abbrv] hadoop git commit: HDFS-7819. Log WARN message for the blocks which are not in Block ID based layout (Rakesh R via Colin P. McCabe)

2015-03-02 Thread zhz
HDFS-7819. Log WARN message for the blocks which are not in Block ID based 
layout (Rakesh R via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da85e17c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da85e17c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da85e17c

Branch: refs/heads/HDFS-7285
Commit: da85e17c772dfd2348b8f0e93583f7e7ac4d4e16
Parents: db0b6e6
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Feb 26 11:58:29 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/server/datanode/DirectoryScanner.java  | 26 +---
 2 files changed, 25 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da85e17c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e09714f..54b4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -679,6 +679,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7832. Show 'Last Modified' in Namenode's 'Browse Filesystem'
 (vinayakumarb)
 
+HDFS-7819. Log WARN message for the blocks which are not in Block ID based
+layout (Rakesh R via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da85e17c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 71f976b..09c2914 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -597,14 +597,15 @@ public class DirectoryScanner implements Runnable {
   for (String bpid : bpList) {
 LinkedListScanInfo report = new LinkedListScanInfo();
 File bpFinalizedDir = volume.getFinalizedDir(bpid);
-result.put(bpid, compileReport(volume, bpFinalizedDir, report));
+result.put(bpid,
+compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
   }
   return result;
 }
 
 /** Compile list {@link ScanInfo} for the blocks in the directory dir */
-private LinkedListScanInfo compileReport(FsVolumeSpi vol, File dir,
-LinkedListScanInfo report) {
+private LinkedListScanInfo compileReport(FsVolumeSpi vol,
+File bpFinalizedDir, File dir, LinkedListScanInfo report) {
   File[] files;
   try {
 files = FileUtil.listFiles(dir);
@@ -622,12 +623,14 @@ public class DirectoryScanner implements Runnable {
*/
   for (int i = 0; i  files.length; i++) {
 if (files[i].isDirectory()) {
-  compileReport(vol, files[i], report);
+  compileReport(vol, bpFinalizedDir, files[i], report);
   continue;
 }
 if (!Block.isBlockFilename(files[i])) {
   if (isBlockMetaFile(blk_, files[i].getName())) {
 long blockId = Block.getBlockId(files[i].getName());
+verifyFileLocation(files[i].getParentFile(), bpFinalizedDir,
+blockId);
 report.add(new ScanInfo(blockId, null, files[i], vol));
   }
   continue;
@@ -646,9 +649,24 @@ public class DirectoryScanner implements Runnable {
 break;
   }
 }
+verifyFileLocation(blockFile.getParentFile(), bpFinalizedDir,
+blockId);
 report.add(new ScanInfo(blockId, blockFile, metaFile, vol));
   }
   return report;
 }
+
+/**
+ * Verify whether the actual directory location of block file has the
+ * expected directory path computed using its block ID.
+ */
+private void verifyFileLocation(File actualBlockDir,
+File bpFinalizedDir, long blockId) {
+  File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
+  if (actualBlockDir.compareTo(blockDir) != 0) {
+LOG.warn(Block:  + blockId
++  has to be upgraded to block ID-based layout);
+  }
+}
   }
 }



[16/50] [abbrv] hadoop git commit: HADOOP-11637. bash location hard-coded in shell scripts (aw)

2015-03-02 Thread zhz
HADOOP-11637. bash location hard-coded in shell scripts (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db0b6e60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db0b6e60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db0b6e60

Branch: refs/heads/HDFS-7285
Commit: db0b6e60d0e20ff6207359ed454d98403f36dada
Parents: 41d9952
Author: Allen Wittenauer a...@apache.org
Authored: Thu Feb 26 09:29:16 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++
 .../hadoop-common/src/main/bin/hadoop-functions.sh  | 2 +-
 .../hadoop-common/src/site/markdown/RackAwareness.md| 5 +++--
 hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh   | 2 +-
 hadoop-common-project/hadoop-kms/src/main/libexec/kms-config.sh | 2 +-
 hadoop-common-project/hadoop-kms/src/main/sbin/kms.sh   | 2 +-
 .../hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh  | 2 +-
 .../hadoop-hdfs-httpfs/src/main/libexec/httpfs-config.sh| 2 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  | 2 +-
 hadoop-tools/hadoop-sls/src/main/bin/rumen2sls.sh   | 2 +-
 hadoop-tools/hadoop-sls/src/main/bin/slsrun.sh  | 2 +-
 11 files changed, 14 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0b6e60/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 39062a8..ca27463 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -404,6 +404,8 @@ Trunk (Unreleased)
 
 HADOOP-11625. Minor fixes to command manual  SLA doc (aw)
 
+HADOOP-11637. bash location hard-coded in shell scripts (aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0b6e60/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index cec6b2c..bccbe25 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0b6e60/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
index c5ab19a..09f5610 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/RackAwareness.md
@@ -105,7 +105,7 @@ bash Example
 
 
 ```bash
-#!/bin/bash
+#!/usr/bin/env bash
 # Here's a bash example to show just how simple these scripts can be
 # Assuming we have flat network with everything on a single switch, we can 
fake a rack topology.
 # This could occur in a lab environment where we have limited nodes,like 2-8 
physical machines on a unmanaged switch.
@@ -133,4 +133,5 @@ bash Example
 #fails to split on four dots, it will still print '/rack-' last field value
 
 echo $@ | xargs -n 1 | awk -F '.' '{print /rack-$NF}'
-```
\ No newline at end of file
+```
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0b6e60/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
--
diff --git a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh 
b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
index de9554f..41449ef 100644
--- a/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
+++ b/hadoop-common-project/hadoop-kms/src/main/conf/kms-env.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 #
 # Licensed under the Apache License, Version 2.0 (the License);
 # you may not use this file except in compliance with the License.


[11/50] [abbrv] hadoop git commit: HDFS-7537. Add UNDER MIN REPL'D BLOCKS count to fsck. Contributed by GAO Rui

2015-03-02 Thread zhz
HDFS-7537. Add UNDER MIN REPL'D BLOCKS count to fsck.  Contributed by GAO Rui


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0127820a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0127820a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0127820a

Branch: refs/heads/HDFS-7285
Commit: 0127820a0dc6f0d4f741e276a1048117f0e78fc5
Parents: 21e9e91
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Feb 26 11:45:56 2015 +0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/NamenodeFsck.java  | 36 +++--
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 81 +++-
 3 files changed, 111 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0127820a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6dc7a0f..4523bf4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -673,6 +673,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7495. Remove updatePosition argument from DFSInputStream#getBlockAt()
 (cmccabe)
 
+HDFS-7537. Add UNDER MIN REPL'D BLOCKS count to fsck.  (GAO Rui via
+szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0127820a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index f36b773..3c7918f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -513,6 +513,9 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   res.totalReplicas += liveReplicas;
   short targetFileReplication = file.getReplication();
   res.numExpectedReplicas += targetFileReplication;
+  if(liveReplicasminReplication){
+res.numUnderMinReplicatedBlocks++;
+  }
   if (liveReplicas  targetFileReplication) {
 res.excessiveReplicas += (liveReplicas - targetFileReplication);
 res.numOverReplicatedBlocks += 1;
@@ -859,6 +862,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 long corruptBlocks = 0L;
 long excessiveReplicas = 0L;
 long missingReplicas = 0L;
+long numUnderMinReplicatedBlocks=0L;
 long numOverReplicatedBlocks = 0L;
 long numUnderReplicatedBlocks = 0L;
 long numMisReplicatedBlocks = 0L;  // blocks that do not satisfy block 
placement policy
@@ -875,10 +879,13 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 long totalReplicas = 0L;
 
 final short replication;
+final int minReplication;
 
 Result(Configuration conf) {
   this.replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
 
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+  this.minReplication = 
(short)conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+
DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
 }
 
 /**
@@ -926,15 +933,28 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 res.append( (Total open file blocks (not validated): ).append(
 totalOpenFilesBlocks).append());
   }
-  if (corruptFiles  0) {
-res.append(\n  ).append(
-\n  CORRUPT FILES:\t).append(corruptFiles);
-if (missingSize  0) {
-  res.append(\n  MISSING BLOCKS:\t).append(missingIds.size()).append(
-  \n  MISSING SIZE:\t\t).append(missingSize).append( B);
+  if (corruptFiles  0 || numUnderMinReplicatedBlocks0) {
+res.append(\n  );
+if(numUnderMinReplicatedBlocks0){
+  res.append(\n  UNDER MIN REPL'D 
BLOCKS:\t).append(numUnderMinReplicatedBlocks);
+  if(totalBlocks0){
+res.append( ().append(
+((float) (numUnderMinReplicatedBlocks * 100) / (float) 
totalBlocks))
+.append( %));
+  }
+  res.append(\n  

[14/50] [abbrv] hadoop git commit: MAPREDUCE-5612. Add javadoc for TaskCompletionEvent.Status. Contributed by Chris Palmer.

2015-03-02 Thread zhz
MAPREDUCE-5612. Add javadoc for TaskCompletionEvent.Status. Contributed by 
Chris Palmer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d90a92f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d90a92f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d90a92f

Branch: refs/heads/HDFS-7285
Commit: 3d90a92fb331897b845c6639d851d42cb13105f1
Parents: 2827ce2
Author: Akira Ajisaka aajis...@apache.org
Authored: Thu Feb 26 15:05:17 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../hadoop/mapred/TaskCompletionEvent.java  | 32 +++-
 .../hadoop/mapreduce/TaskCompletionEvent.java   | 32 +++-
 3 files changed, 65 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d90a92f/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 6cb1754..f509d4e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -308,6 +308,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6264. Remove httpclient dependency from hadoop-mapreduce-client.
 (Brahma Reddy Battula via aajisaka)
 
+MAPREDUCE-5612. Add javadoc for TaskCompletionEvent.Status.
+(Chris Palmer via aajisaka)
+
   OPTIMIZATIONS
 
 MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d90a92f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
index dc4d82e..2bb55a2 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
@@ -32,7 +32,37 @@ public class TaskCompletionEvent
 extends org.apache.hadoop.mapreduce.TaskCompletionEvent {
   @InterfaceAudience.Public
   @InterfaceStability.Stable
-  static public enum Status {FAILED, KILLED, SUCCEEDED, OBSOLETE, TIPFAILED};
+  /**
+   *  Task Completion Statuses
+   */
+  static public enum Status {
+/**
+ * Task Event Attempt failed but there are attempts remaining.
+ */
+FAILED,
+/**
+ * Task Event was killed.
+ */
+KILLED,
+/**
+ * Task Event was successful.
+ */
+SUCCEEDED,
+/**
+ * Used to Override a previously successful event status.
+ * Example:  Map attempt runs and a SUCCEEDED event is sent. Later a task
+ * is retroactively failed due to excessive fetch failure during shuffle
+ * phase. When the retroactive attempt failure occurs, an OBSOLETE event is
+ * sent for the map attempt indicating the prior event is no longer valid.
+ */
+OBSOLETE,
+/**
+ * Task Event attempt failed and no further attempts exist.
+ * reached MAX attempts. When a reducer receives a TIPFAILED event it
+ * gives up trying to shuffle data from that map task.
+ */
+TIPFAILED
+  }
   
   public static final TaskCompletionEvent[] EMPTY_ARRAY = 
new TaskCompletionEvent[0];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d90a92f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
index 31643a9..21c3823 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/TaskCompletionEvent.java
@@ -36,7 +36,37 @@ import org.apache.hadoop.io.WritableUtils;
 public class TaskCompletionEvent 

[10/50] [abbrv] hadoop git commit: HADOOP-11620. Add support for load balancing across a group of KMS for HA. Contributed by Arun Suresh.

2015-03-02 Thread zhz
HADOOP-11620. Add support for load balancing across a group of KMS for HA. 
Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f356cfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f356cfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f356cfa

Branch: refs/heads/HDFS-7285
Commit: 9f356cfaf589ff55842c180be0adee948d8aa060
Parents: 0127820
Author: Andrew Wang w...@apache.org
Authored: Wed Feb 25 21:15:44 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../crypto/key/kms/KMSClientProvider.java   |  84 -
 .../key/kms/LoadBalancingKMSClientProvider.java | 347 +++
 .../kms/TestLoadBalancingKMSClientProvider.java | 166 +
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 114 +++---
 5 files changed, 654 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f356cfa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0d452f7..39062a8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -648,6 +648,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11506. Configuration variable expansion regex expensive for long
 values. (Gera Shegalov via gera)
 
+HADOOP-11620. Add support for load balancing across a group of KMS for HA.
+(Arun Suresh via wang)
+
   BUG FIXES
 
 HADOOP-11512. Use getTrimmedStrings when reading serialization keys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f356cfa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index 97ab253..223e69a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -52,6 +52,7 @@ import java.io.Writer;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
 import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -74,6 +75,7 @@ import 
org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 
 /**
  * KMS client codeKeyProvider/code implementation.
@@ -221,14 +223,71 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
*/
   public static class Factory extends KeyProviderFactory {
 
+/**
+ * This provider expects URIs in the following form :
+ * kms://PROTO@AUTHORITY/PATH
+ *
+ * where :
+ * - PROTO = http or https
+ * - AUTHORITY = HOSTS[:PORT]
+ * - HOSTS = HOSTNAME[;HOSTS]
+ * - HOSTNAME = string
+ * - PORT = integer
+ *
+ * If multiple hosts are provider, the Factory will create a
+ * {@link LoadBalancingKMSClientProvider} that round-robins requests
+ * across the provided list of hosts.
+ */
 @Override
-public KeyProvider createProvider(URI providerName, Configuration conf)
+public KeyProvider createProvider(URI providerUri, Configuration conf)
 throws IOException {
-  if (SCHEME_NAME.equals(providerName.getScheme())) {
-return new KMSClientProvider(providerName, conf);
+  if (SCHEME_NAME.equals(providerUri.getScheme())) {
+URL origUrl = new URL(extractKMSPath(providerUri).toString());
+String authority = origUrl.getAuthority();
+// check for ';' which delimits the backup hosts
+if (Strings.isNullOrEmpty(authority)) {
+  throw new IOException(
+  No valid authority in kms uri [ + origUrl + ]);
+}
+// Check if port is present in authority
+// In the current scheme, all hosts have to run on the same port
+int port = -1;
+String hostsPart = authority;
+if (authority.contains(:)) {
+  String[] t = authority.split(:);
+  try {
+port = Integer.parseInt(t[1]);
+  } catch (Exception e) 

[08/50] [abbrv] hadoop git commit: HDFS-7832. Show 'Last Modified' in Namenode's 'Browse Filesystem' (Contributed by Vinayakumar B)

2015-03-02 Thread zhz
HDFS-7832. Show 'Last Modified' in Namenode's 'Browse Filesystem' (Contributed 
by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/199a82d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/199a82d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/199a82d4

Branch: refs/heads/HDFS-7285
Commit: 199a82d4a68ebfacab4d2e1b263d332b8f2156dc
Parents: 9f356cf
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Feb 26 14:36:09 2015 +0530
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html | 2 ++
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js   | 9 -
 3 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/199a82d4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4523bf4..e09714f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -676,6 +676,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7537. Add UNDER MIN REPL'D BLOCKS count to fsck.  (GAO Rui via
 szetszwo)
 
+HDFS-7832. Show 'Last Modified' in Namenode's 'Browse Filesystem'
+(vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/199a82d4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index e1fdfa3..7b34044 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -112,6 +112,7 @@
 thOwner/th
 thGroup/th
 thSize/th
+thLast Modified/th
 thReplication/th
 thBlock Size/th
 thName/th
@@ -124,6 +125,7 @@
 td{owner}/td
 td{group}/td
 td{length|fmt_bytes}/td
+td{#helper_date_tostring value={modificationTime}/}/td
 td{replication}/td
 td{blockSize|fmt_bytes}/td
 tda style=cursor:pointer inode-type={type} 
class=explorer-browse-links inode-path={pathSuffix}{pathSuffix}/a/td

http://git-wip-us.apache.org/repos/asf/hadoop/blob/199a82d4/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 87d47fa..0a53dcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -143,6 +143,12 @@
   }
 
   function browse_directory(dir) {
+var HELPERS = {
+  'helper_date_tostring' : function (chunk, ctx, bodies, params) {
+var value = dust.helpers.tap(params.value, chunk, ctx);
+return chunk.write('' + new Date(Number(value)).toLocaleString());
+  }
+};
 var url = '/webhdfs/v1' + dir + '?op=LISTSTATUS';
 $.get(url, function(data) {
   var d = get_response(data, FileStatuses);
@@ -154,7 +160,8 @@
   current_directory = dir;
   $('#directory').val(dir);
   window.location.hash = dir;
-  dust.render('explorer', d, function(err, out) {
+  var base = dust.makeBase(HELPERS);
+  dust.render('explorer', base.push(d), function(err, out) {
 $('#panel').html(out);
 
 $('.explorer-browse-links').click(function() {



[20/50] [abbrv] hadoop git commit: HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 bit. Contributed by Kiran Kumar M R.

2015-03-02 Thread zhz
HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 bit. 
Contributed by Kiran Kumar M R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a90367c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a90367c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a90367c

Branch: refs/heads/HDFS-7285
Commit: 1a90367c3a6cd708b5f19e71859be864ba95e6db
Parents: 9b0eda1
Author: cnauroth cnaur...@apache.org
Authored: Thu Feb 26 13:37:46 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 5 -
 .../hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c  | 2 +-
 .../main/native/libhdfs/os/windows/thread_local_storage.c| 8 
 4 files changed, 16 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a90367c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 54b4057..ae83898 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1040,6 +1040,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7843. A truncated file is corrupted after rollback from a rolling
 upgrade.  (szetszwo)
 
+HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 
bit.
+(Kiran Kumar M R via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a90367c/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 5efce5c..2d402a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -440,10 +440,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
 /goals
 configuration
   target
+condition property=generator value=Visual Studio 10 
else=Visual Studio 10 Win64
+  equals arg1=Win32 arg2=${env.PLATFORM} /
+/condition
 mkdir dir=${project.build.directory}/native/
 exec executable=cmake 
dir=${project.build.directory}/native
 failonerror=true
-  arg line=${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} 
-DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 
'Visual Studio 10 Win64'/
+  arg line=${basedir}/src/ 
-DGENERATED_JAVAH=${project.build.directory}/native/javah 
-DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} 
-DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 
'${generator}'/
 /exec
 exec executable=msbuild 
dir=${project.build.directory}/native
 failonerror=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a90367c/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
index 90450d8..f5cc2a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
@@ -28,7 +28,7 @@
  * @param toRun thread to run
  * @return DWORD result of running thread (always 0)
  */
-static DWORD runThread(LPVOID toRun) {
+static DWORD WINAPI runThread(LPVOID toRun) {
   const thread *t = toRun;
   t-start(t-arg);
   return 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a90367c/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
index 70ad152..4c415e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
@@ -96,13 

[15/50] [abbrv] hadoop git commit: MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. (Varun Saxena via kasha)

2015-03-02 Thread zhz
MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. (Varun 
Saxena via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2827ce2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2827ce2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2827ce2b

Branch: refs/heads/HDFS-7285
Commit: 2827ce2b5361e22e5fae729c6cb5f7f89f864002
Parents: 1a90367
Author: Karthik Kambatla ka...@apache.org
Authored: Thu Feb 26 14:24:19 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../java/org/apache/hadoop/mapred/JobConf.java  |  6 ++---
 .../org/apache/hadoop/conf/TestJobConf.java | 23 ++--
 3 files changed, 17 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2827ce2b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d2c4ab8..6cb1754 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -390,6 +390,9 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-2815. JavaDoc does not generate correctly for
 MultithreadedMapRunner. (Chris Palmer via aajisaka)
 
+MAPREDUCE-6223. TestJobConf#testNegativeValueForTaskVmem failures. 
+(Varun Saxena via kasha)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2827ce2b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 98a643f..315c829 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -1826,8 +1826,7 @@ public class JobConf extends Configuration {
   public long getMemoryForMapTask() {
 long value = getDeprecatedMemoryValue();
 if (value  0) {
-  return getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,
-  JobContext.DEFAULT_MAP_MEMORY_MB);
+  return getMemoryRequired(TaskType.MAP);
 }
 return value;
   }
@@ -1853,8 +1852,7 @@ public class JobConf extends Configuration {
   public long getMemoryForReduceTask() {
 long value = getDeprecatedMemoryValue();
 if (value  0) {
-  return getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,
-  JobContext.DEFAULT_REDUCE_MEMORY_MB);
+  return getMemoryRequired(TaskType.REDUCE);
 }
 return value;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2827ce2b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
index e380d92..f67ba1f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/conf/TestJobConf.java
@@ -19,9 +19,7 @@ package org.apache.hadoop.conf;
 
 import org.junit.Assert;
 import org.junit.Test;
-
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 
 public class TestJobConf {
@@ -87,8 +85,10 @@ public class TestJobConf {
 configuration.set(mapred.task.maxvmem , String.valueOf(-1));
 configuration.set(MRJobConfig.MAP_MEMORY_MB,-1);
 configuration.set(MRJobConfig.REDUCE_MEMORY_MB,-1);
-Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
-Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);
+Assert.assertEquals(configuration.getMemoryForMapTask(),
+MRJobConfig.DEFAULT_MAP_MEMORY_MB);
+Assert.assertEquals(configuration.getMemoryForReduceTask(),
+

[42/50] [abbrv] hadoop git commit: HDFS-5853. Add hadoop.user.group.metrics.percentiles.intervals to hdfs-default.xml (aajisaka)

2015-03-02 Thread zhz
HDFS-5853. Add hadoop.user.group.metrics.percentiles.intervals to 
hdfs-default.xml (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1ddc715
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1ddc715
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1ddc715

Branch: refs/heads/HDFS-7285
Commit: b1ddc7157d74161cee342405822bb93932a22cb4
Parents: 133d04c
Author: Akira Ajisaka aajis...@apache.org
Authored: Sun Mar 1 01:16:36 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 11 +++
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1ddc715/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 16fe394..ce35ea2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -691,6 +691,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7685. Document dfs.namenode.heartbeat.recheck-interval in
 hdfs-default.xml. (Kai Sasaki via aajisaka)
 
+HDFS-5853. Add hadoop.user.group.metrics.percentiles.intervals to
+hdfs-default.xml. (aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1ddc715/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 66fe86c..7eacfc5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1518,6 +1518,17 @@
 /property
 
 property
+  namehadoop.user.group.metrics.percentiles.intervals/name
+  value/value
+  description
+A comma-separated list of the granularity in seconds for the metrics
+which describe the 50/75/90/95/99th percentile latency for group resolution
+in milliseconds.
+By default, percentile latency metrics are disabled.
+  /description
+/property
+
+property
   namedfs.encrypt.data.transfer/name
   valuefalse/value
   description



[50/50] [abbrv] hadoop git commit: HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)

2015-03-02 Thread zhz
HDFS-7789. DFSck should resolve the path to support cross-FS symlinks. (gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7deb79f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7deb79f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7deb79f9

Branch: refs/heads/HDFS-7285
Commit: 7deb79f95ef9a0efb983bde557a0ff0158b11d0d
Parents: 0e4a23f
Author: Gera Shegalov g...@apache.org
Authored: Thu Feb 12 04:32:43 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:56 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/tools/DFSck.java | 31 +---
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 14 ++---
 .../namenode/TestFsckWithMultipleNameNodes.java | 20 +
 4 files changed, 53 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7deb79f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ca16af..d5208da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7439. Add BlockOpResponseProto's message to the exception messages.
 (Takanobu Asanuma via szetszwo)
 
+HDFS-7789. DFSck should resolve the path to support cross-FS symlinks.
+(gera)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7deb79f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index ec83a90..dc6d9d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -225,6 +225,14 @@ public class DFSck extends Configured implements Tool {
 return errCode;
   }
   
+
+  private Path getResolvedPath(String dir) throws IOException {
+Configuration conf = getConf();
+Path dirPath = new Path(dir);
+FileSystem fs = dirPath.getFileSystem(conf);
+return fs.resolvePath(dirPath);
+  }
+
   /**
* Derive the namenode http address from the current file system,
* either default or as set by -fs in the generic options.
@@ -236,19 +244,12 @@ public class DFSck extends Configured implements Tool {
 Configuration conf = getConf();
 
 //get the filesystem object to verify it is an HDFS system
-final FileSystem fs;
-try {
-  fs = target.getFileSystem(conf);
-} catch (IOException ioe) {
-  System.err.println(FileSystem is inaccessible due to:\n
-  + StringUtils.stringifyException(ioe));
-  return null;
-}
+final FileSystem fs = target.getFileSystem(conf);
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println(FileSystem is  + fs.getUri());
   return null;
 }
-
+
 return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
 DFSUtil.getHttpClientScheme(conf));
   }
@@ -303,8 +304,16 @@ public class DFSck extends Configured implements Tool {
   dir = /;
 }
 
-final Path dirpath = new Path(dir);
-final URI namenodeAddress = getCurrentNamenodeAddress(dirpath);
+Path dirpath = null;
+URI namenodeAddress = null;
+try {
+  dirpath = getResolvedPath(dir);
+  namenodeAddress = getCurrentNamenodeAddress(dirpath);
+} catch (IOException ioe) {
+  System.err.println(FileSystem is inaccessible due to:\n
+  + StringUtils.stringifyException(ioe));
+}
+
 if (namenodeAddress == null) {
   //Error message already output in {@link #getCurrentNamenodeAddress()}
   System.err.println(DFSck exiting.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7deb79f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 33de692..0b23b84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 

[49/50] [abbrv] hadoop git commit: HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai Zheng.

2015-03-02 Thread zhz
HADOOP-11658. Externalize io.compression.codecs property. Contributed by Kai 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/22e6b2d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/22e6b2d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/22e6b2d3

Branch: refs/heads/HDFS-7285
Commit: 22e6b2d3ea7a0abb630324e74a74abb533a96ef0
Parents: 7deb79f
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Mar 2 01:09:54 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:56 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop/fs/CommonConfigurationKeys.java  | 17 +++-
 .../io/compress/CompressionCodecFactory.java| 21 +---
 .../hadoop/io/compress/TestCodecFactory.java|  3 ++-
 4 files changed, 31 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/22e6b2d3/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4c0c375..b8ed286 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -642,6 +642,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10976. moving the source code of hadoop-tools docs to the
 directory under hadoop-tools (Masatake Iwasaki via aw)
 
+HADOOP-11658. Externalize io.compression.codecs property.
+(Kai Zheng via aajisaka)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22e6b2d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 442dc7d..7575496 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -91,17 +91,24 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   public static final String IPC_CALLQUEUE_IMPL_KEY = callqueue.impl;
   public static final String IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY = 
identity-provider.impl;
 
+  /** This is for specifying the implementation for the mappings from
+   * hostnames to the racks they belong to
+   */
+  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
+  net.topology.configured.node.mapping;
+
+  /**
+   * Supported compression codec classes
+   */
+  public static final String IO_COMPRESSION_CODECS_KEY = 
io.compression.codecs;
+
   /** Internal buffer size for Lzo compressor/decompressors */
   public static final String  IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY =
 io.compression.codec.lzo.buffersize;
+
   /** Default value for IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY */
   public static final int IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT =
 64*1024;
-  /** This is for specifying the implementation for the mappings from
-   * hostnames to the racks they belong to
-   */
-  public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
-net.topology.configured.node.mapping;
 
   /** Internal buffer size for Snappy compressor/decompressors */
   public static final String IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/22e6b2d3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
index eb35759..7476a15 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
@@ -24,6 +24,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.Path;
 

[40/50] [abbrv] hadoop git commit: MAPREDUCE-5653. DistCp does not honour config-overrides for mapreduce.[map, reduce].memory.mb (Ratandeep Ratti via aw)

2015-03-02 Thread zhz
MAPREDUCE-5653. DistCp does not honour config-overrides for 
mapreduce.[map,reduce].memory.mb (Ratandeep Ratti via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6c1b627
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6c1b627
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6c1b627

Branch: refs/heads/HDFS-7285
Commit: d6c1b627d30bc18fedbbb5c0fa4ba98e6043
Parents: 7797249
Author: Allen Wittenauer a...@apache.org
Authored: Sat Feb 28 22:53:38 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-mapreduce-project/CHANGES.txt  |  3 +++
 .../hadoop-distcp/src/main/resources/distcp-default.xml   | 10 --
 2 files changed, 3 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6c1b627/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f509d4e..ccd24a6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -6,6 +6,9 @@ Trunk (Unreleased)
 MAPREDUCE-5785. Derive heap size or mapreduce.*.memory.mb automatically.
 (Gera Shegalov and Karthik Kambatla via gera)
 
+MAPREDUCE-5653. DistCp does not honour config-overrides for
+mapreduce.[map,reduce].memory.mb (Ratandeep Ratti via aw)
+
   NEW FEATURES
 
 MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6c1b627/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
--
diff --git a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml 
b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
index f50..6e1154e 100644
--- a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
+++ b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml
@@ -32,16 +32,6 @@
 /property
 
 property
-namemapred.job.map.memory.mb/name
-value1024/value
-/property
-
-property
-namemapred.job.reduce.memory.mb/name
-value1024/value
-/property
-
-property
 namemapred.reducer.new-api/name
 valuetrue/value
 /property



[36/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
deleted file mode 100644
index 36b8621..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/NodeManagerRest.apt.vm
+++ /dev/null
@@ -1,645 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  NodeManager REST API's.
-  ---
-  ---
-  ${maven.build.timestamp}
-
-NodeManager REST API's.
-
-%{toc|section=1|fromDepth=0|toDepth=2}
-
-* Overview
-
-  The NodeManager REST API's allow the user to get status on the node and 
information about applications and containers running on that node. 
-  
-* NodeManager Information API
-
-  The node information resource provides overall information about that 
particular node.
-
-** URI
-
-  Both of the following URI's give you the cluster information.
-
---
-  * http://nm http address:port/ws/v1/node
-  * http://nm http address:port/ws/v1/node/info
---
-
-** HTTP Operations Supported
-
---
-  * GET
---
-
-** Query Parameters Supported
-
---
-  None
---
-
-** Elements of the nodeInfo object
-
-*---+--+---+
-|| Item || Data Type   || Description   |
-*---+--+---+
-| id| long | The NodeManager id |
-*---+--+---+
-| nodeHostName | string  | The host name of the NodeManager |
-*---+--+---+
-| totalPmemAllocatedContainersMB | long | The amount of physical 
memory allocated for use by containers in MB |
-*---+--+---+
-| totalVmemAllocatedContainersMB | long | The amount of virtual memory 
allocated for use by containers in MB |
-*---+--+---+
-| totalVCoresAllocatedContainers | long | The number of virtual cores 
allocated for use by containers |
-*---+--+---+
-| lastNodeUpdateTime | long | The last timestamp at which the health 
report was received (in ms since epoch)|
-*---+--+---+
-| healthReport | string  | The diagnostic health report of the node |
-*---+--+---+
-| nodeHealthy | boolean | true/false indicator of if the node is healthy|
-*---+--+---+
-| nodeManagerVersion | string  | Version of the NodeManager |
-*---+--+---+
-| nodeManagerBuildVersion | string  | NodeManager build string with build 
version, user, and checksum |
-*---+--+---+
-| nodeManagerVersionBuiltOn | string  | Timestamp when NodeManager was 
built(in ms since epoch) |
-*---+--+---+
-| hadoopVersion | string  | Version of hadoop common |
-*---+--+---+
-| hadoopBuildVersion | string  | Hadoop common build string with build 
version, user, and checksum |
-*---+--+---+
-| hadoopVersionBuiltOn | string  | Timestamp when hadoop common was built(in 
ms since epoch) |
-*---+--+---+
-
-** Response Examples
-
-  JSON response
-
-  HTTP Request:
-
---
-  GET http://nm http address:port/ws/v1/node/info
---
-
-  Response Header:
-
-+---+
-  HTTP/1.1 200 OK
-  Content-Type: application/json
-  Transfer-Encoding: chunked
-  Server: Jetty(6.1.26)
-+---+
-
-  Response Body:
-
-+---+
-{
-   nodeInfo : {
-  hadoopVersionBuiltOn : Mon Jan  9 14:58:42 UTC 2012,
-  nodeManagerBuildVersion : 0.23.1-SNAPSHOT from 1228355 by user1 
source checksum 20647f76c36430e888cc7204826a445c,
-  lastNodeUpdateTime : 132666126,
-  totalVmemAllocatedContainersMB : 17203,
-  totalVCoresAllocatedContainers : 8,
-  nodeHealthy 

[35/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
deleted file mode 100644
index 69728fb..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm
+++ /dev/null
@@ -1,3104 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  ResourceManager REST API's.
-  ---
-  ---
-  ${maven.build.timestamp}
-
-ResourceManager REST API's.
-
-%{toc|section=1|fromDepth=0|toDepth=2}
-
-* Overview
-
-  The ResourceManager REST API's allow the user to get information about the 
cluster - status on the cluster, metrics on the cluster, scheduler information, 
information about nodes in the cluster, and information about applications on 
the cluster.
-  
-* Cluster Information API
-
-  The cluster information resource provides overall information about the 
cluster. 
-
-** URI
-
-  Both of the following URI's give you the cluster information.
-
---
-  * http://rm http address:port/ws/v1/cluster
-  * http://rm http address:port/ws/v1/cluster/info
---
-
-** HTTP Operations Supported
-
---
-  * GET
---
-
-** Query Parameters Supported
-
---
-  None
---
-
-** Elements of the clusterInfo object
-
-*---+--+---+
-|| Item || Data Type   || Description   |
-*---+--+---+
-| id| long | The cluster id |
-*---+--+---+
-| startedOn | long | The time the cluster started (in ms since 
epoch)|
-*---+--+---+
-| state | string | The ResourceManager state - valid values are: 
NOTINITED, INITED, STARTED, STOPPED|
-*---+--+---+
-| haState   | string | The ResourceManager HA state - valid values are: 
INITIALIZING, ACTIVE, STANDBY, STOPPED|
-*---+--+---+
-| resourceManagerVersion | string  | Version of the ResourceManager |
-*---+--+---+
-| resourceManagerBuildVersion | string  | ResourceManager build string with 
build version, user, and checksum |
-*---+--+---+
-| resourceManagerVersionBuiltOn | string  | Timestamp when ResourceManager was 
built (in ms since epoch)|
-*---+--+---+
-| hadoopVersion | string  | Version of hadoop common |
-*---+--+---+
-| hadoopBuildVersion | string  | Hadoop common build string with build 
version, user, and checksum |
-*---+--+---+
-| hadoopVersionBuiltOn | string  | Timestamp when hadoop common was built(in 
ms since epoch)|
-*---+--+---+
-
-** Response Examples
-
-  JSON response
-
-  HTTP Request:
-
---
-  GET http://rm http address:port/ws/v1/cluster/info
---
-
-  Response Header:
-
-+---+
-  HTTP/1.1 200 OK
-  Content-Type: application/json
-  Transfer-Encoding: chunked
-  Server: Jetty(6.1.26)
-+---+
-
-  Response Body:
-
-+---+
-{
-  clusterInfo:
-  {
-id:1324053971963,
-startedOn:1324053971963,
-state:STARTED,
-resourceManagerVersion:0.23.1-SNAPSHOT,
-resourceManagerBuildVersion:0.23.1-SNAPSHOT from 1214049 by user1 
source checksum 050cd664439d931c8743a6428fd6a693,
-resourceManagerVersionBuiltOn:Tue Dec 13 22:12:48 CST 2011,
-hadoopVersion:0.23.1-SNAPSHOT,
-hadoopBuildVersion:0.23.1-SNAPSHOT from 1214049 by user1 source 
checksum 11458df3bb77342dca5f917198fad328,
-hadoopVersionBuiltOn:Tue Dec 13 22:12:26 CST 2011
-  }
-}
-+---+
-
-  XML response
-
-  HTTP Request:
-
--
-  Accept: application/xml
-  GET http://rm http address:port/ws/v1/cluster/info
--
-
-  Response Header:
-
-+---+
-  HTTP/1.1 200 OK
-  Content-Type: application/xml
-  Content-Length: 712
-  Server: Jetty(6.1.26)
-+---+
-
-  Response 

[34/50] [abbrv] hadoop git commit: YARN-3168. Convert site documentation from apt to markdown (Gururaj Shetty via aw)

2015-03-02 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/06aca7c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
deleted file mode 100644
index a08c19d..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRestart.apt.vm
+++ /dev/null
@@ -1,298 +0,0 @@
-~~ Licensed under the Apache License, Version 2.0 (the License);
-~~ you may not use this file except in compliance with the License.
-~~ You may obtain a copy of the License at
-~~
-~~   http://www.apache.org/licenses/LICENSE-2.0
-~~
-~~ Unless required by applicable law or agreed to in writing, software
-~~ distributed under the License is distributed on an AS IS BASIS,
-~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-~~ See the License for the specific language governing permissions and
-~~ limitations under the License. See accompanying LICENSE file.
-
-  ---
-  ResourceManager Restart
-  ---
-  ---
-  ${maven.build.timestamp}
-
-ResourceManager Restart
-
-%{toc|section=1|fromDepth=0}
-
-* {Overview}
-
-  ResourceManager is the central authority that manages resources and schedules
-  applications running atop of YARN. Hence, it is potentially a single point of
-  failure in a Apache YARN cluster.
-
-  This document gives an overview of ResourceManager Restart, a feature that
-  enhances ResourceManager to keep functioning across restarts and also makes
-  ResourceManager down-time invisible to end-users.
-
-  ResourceManager Restart feature is divided into two phases:
-
-  ResourceManager Restart Phase 1 (Non-work-preserving RM restart):
-  Enhance RM to persist application/attempt state
-  and other credentials information in a pluggable state-store. RM will reload
-  this information from state-store upon restart and re-kick the previously
-  running applications. Users are not required to re-submit the applications.
-
-  ResourceManager Restart Phase 2 (Work-preserving RM restart):
-  Focus on re-constructing the running state of ResourceManager by combining
-  the container statuses from NodeManagers and container requests from 
ApplicationMasters
-  upon restart. The key difference from phase 1 is that previously running 
applications
-  will not be killed after RM restarts, and so applications won't lose its work
-  because of RM outage.
-
-* {Feature}
-
-** Phase 1: Non-work-preserving RM restart
-
-  As of Hadoop 2.4.0 release, only ResourceManager Restart Phase 1 is 
implemented which
-  is described below.
-
-  The overall concept is that RM will persist the application metadata
-  (i.e. ApplicationSubmissionContext) in
-  a pluggable state-store when client submits an application and also saves 
the final status
-  of the application such as the completion state (failed, killed, finished) 
-  and diagnostics when the application completes. Besides, RM also saves
-  the credentials like security keys, tokens to work in a secure environment.
-  Any time RM shuts down, as long as the required information (i.e.application 
metadata
-  and the alongside credentials if running in a secure environment) is 
available
-  in the state-store, when RM restarts, it can pick up the application metadata
-  from the state-store and re-submit the application. RM won't re-submit the
-  applications if they were already completed (i.e. failed, killed, finished)
-  before RM went down.
-
-  NodeManagers and clients during the down-time of RM will keep polling RM 
until 
-  RM comes up. When RM becomes alive, it will send a re-sync command to
-  all the NodeManagers and ApplicationMasters it was talking to via heartbeats.
-  As of Hadoop 2.4.0 release, the behaviors for NodeManagers and 
ApplicationMasters to handle this command
-  are: NMs will kill all its managed containers and re-register with RM. From 
the
-  RM's perspective, these re-registered NodeManagers are similar to the newly 
joining NMs. 
-  AMs(e.g. MapReduce AM) are expected to shutdown when they receive the 
re-sync command.
-  After RM restarts and loads all the application metadata, credentials from 
state-store
-  and populates them into memory, it will create a new
-  attempt (i.e. ApplicationMaster) for each application that was not yet 
completed
-  and re-kick that application as usual. As described before, the previously 
running
-  applications' work is lost in this manner since they are essentially killed 
by
-  RM via the re-sync command on restart.
-
-** Phase 2: Work-preserving RM restart
-
-  As of Hadoop 2.6.0, we further enhanced RM restart feature to address the 
problem 
-  to not kill any applications running on YARN cluster if RM restarts.
-
-  Beyond all the groundwork that has been done in Phase 1 

[39/50] [abbrv] hadoop git commit: move HADOOP-10976 to 2.7

2015-03-02 Thread zhz
move HADOOP-10976 to 2.7


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77972492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77972492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77972492

Branch: refs/heads/HDFS-7285
Commit: 779724926e7e4b08c89b79684065b66623dd5a97
Parents: e70ce6f
Author: Akira Ajisaka aajis...@apache.org
Authored: Sat Feb 28 17:15:13 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77972492/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6d4da77..74bf558 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -163,9 +163,6 @@ Trunk (Unreleased)
 HADOOP-11346. Rewrite sls/rumen to use new shell framework (John Smith
 via aw)
 
-HADOOP-10976. moving the source code of hadoop-tools docs to the
-directory under hadoop-tools (Masatake Iwasaki via aw)
-
 HADOOP-7713. dfs -count -q should label output column (Jonathan Allen
 via aw)
 
@@ -636,6 +633,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11632. Cleanup Find.java to remove SupressWarnings annotations.
 (Akira Ajisaka via ozawa)
 
+HADOOP-10976. moving the source code of hadoop-tools docs to the
+directory under hadoop-tools (Masatake Iwasaki via aw)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



[26/50] [abbrv] hadoop git commit: HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get files list for scanning (Contributed by J.Andreina)

2015-03-02 Thread zhz
HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get files 
list for scanning (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54bcb5f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54bcb5f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54bcb5f7

Branch: refs/heads/HDFS-7285
Commit: 54bcb5f7e3fa8c1e4f3f325a4c3b6a24f61e4687
Parents: 04f0dae
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Feb 27 16:36:28 2015 +0530
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java|  2 +-
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java   | 12 +---
 .../hdfs/server/datanode/TestDirectoryScanner.java  |  9 ++---
 4 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ba553dc..8556afd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1040,6 +1040,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7774. Unresolved symbols error while compiling HDFS on Windows 7/32 
bit.
 (Kiran Kumar M R via cnauroth)
 
+HDFS-6753. Initialize checkDisk when DirectoryScanner not able to get
+files list for scanning (J.Andreina via vinayakumarb)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f233e02..92ddb7b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -815,7 +815,7 @@ public class DataNode extends ReconfigurableBase
   reason = verifcation is not supported by SimulatedFSDataset;
 } 
 if (reason == null) {
-  directoryScanner = new DirectoryScanner(data, conf);
+  directoryScanner = new DirectoryScanner(this, data, conf);
   directoryScanner.start();
 } else {
   LOG.info(Periodic Directory Tree Verification scan is disabled because 
 +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54bcb5f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 09c2914..c7ee21e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -63,6 +63,7 @@ public class DirectoryScanner implements Runnable {
   private final long scanPeriodMsecs;
   private volatile boolean shouldRun = false;
   private boolean retainDiffs = false;
+  private final DataNode datanode;
 
   final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool();
   final MapString, Stats stats = new HashMapString, Stats();
@@ -308,7 +309,8 @@ public class DirectoryScanner implements Runnable {
 }
   }
 
-  DirectoryScanner(FsDatasetSpi? dataset, Configuration conf) {
+  DirectoryScanner(DataNode datanode, FsDatasetSpi? dataset, Configuration 
conf) {
+this.datanode = datanode;
 this.dataset = dataset;
 int interval = 
conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
 DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
@@ -547,7 +549,7 @@ public class DirectoryScanner implements Runnable {
 for (int i = 0; i  volumes.size(); i++) {
   if (isValid(dataset, volumes.get(i))) {
 ReportCompiler reportCompiler =
-  new ReportCompiler(volumes.get(i));
+  new ReportCompiler(datanode,volumes.get(i));
 FutureScanInfoPerBlockPool result = 
   

[22/50] [abbrv] hadoop git commit: Revert HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.

2015-03-02 Thread zhz
Revert HDFS-7769. TestHDFSCLI should not create files in hdfs project root 
dir.

This reverts commit 7c6b6547eeed110e1a842e503bfd33afe04fa814.

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04f0dae7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04f0dae7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04f0dae7

Branch: refs/heads/HDFS-7285
Commit: 04f0dae7c982c3af7e96811a92a1cb6aeda74f9f
Parents: 645ebb9
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Feb 27 18:25:32 2015 +0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 ---
 .../hadoop-hdfs/src/test/resources/testHDFSConf.xml  | 4 ++--
 2 files changed, 2 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04f0dae7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ae83898..ba553dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -975,9 +975,6 @@ Release 2.7.0 - UNRELEASED
 HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
 DataNode to register successfully with only one NameNode.(vinayakumarb)
 
-HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.
-(szetszwo)
-
 HDFS-7753. Fix Multithreaded correctness Warnings in BackupImage.
 (Rakesh R and shv)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04f0dae7/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 2d3de1f..e59b05a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -16483,8 +16483,8 @@
 command-fs NAMENODE -mkdir -p /user/USERNAME/dir1/command
 command-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes 
/user/USERNAME/dir1/command
 command-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes 
/user/USERNAME/dir1/command
-command-fs NAMENODE -getmerge /user/USERNAME/dir1 
CLITEST_DATA/file/command
-command-cat CLITEST_DATA/file/command
+command-fs NAMENODE -getmerge /user/USERNAME/dir1 data/command
+command-cat data/command
   /test-commands
   cleanup-commands
 command-fs NAMENODE -rm -r /user/USERNAME/command



[21/50] [abbrv] hadoop git commit: YARN-3255. RM, NM, JobHistoryServer, and WebAppProxyServer's main() should support generic options. Contributed by Konstantin Shvachko.

2015-03-02 Thread zhz
YARN-3255. RM, NM, JobHistoryServer, and WebAppProxyServer's main() should 
support generic options. Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc024460
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc024460
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc024460

Branch: refs/heads/HDFS-7285
Commit: cc024460e00f99a5a5254ab7220d2efdf4797bac
Parents: 3d90a92
Author: Konstantin V Shvachko s...@apache.org
Authored: Thu Feb 26 17:12:19 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:53 2015 -0800

--
 .../java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java | 2 ++
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/yarn/server/nodemanager/NodeManager.java   | 4 +++-
 .../hadoop/yarn/server/resourcemanager/ResourceManager.java  | 3 +++
 .../apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java| 2 ++
 5 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc024460/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
index 6d58040..252ac55 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
@@ -216,6 +217,7 @@ public class JobHistoryServer extends CompositeService {
   new CompositeServiceShutdownHook(jobHistoryServer),
   SHUTDOWN_HOOK_PRIORITY);
   YarnConfiguration conf = new YarnConfiguration(new JobConf());
+  new GenericOptionsParser(conf, args);
   jobHistoryServer.init(conf);
   jobHistoryServer.start();
 } catch (Throwable t) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc024460/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a635592..40f187b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -330,6 +330,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3217. Remove httpclient dependency from hadoop-yarn-server-web-proxy.
 (Brahma Reddy Battula via ozawa).
 
+YARN-3255. RM, NM, JobHistoryServer, and WebAppProxyServer's main()
+should support generic options. (shv)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc024460/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 7584138..a4be120 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.util.GenericOptionsParser;
 import 

[13/50] [abbrv] hadoop git commit: HDFS-7843. A truncated file is corrupted after rollback from a rolling upgrade.

2015-03-02 Thread zhz
HDFS-7843. A truncated file is corrupted after rollback from a rolling upgrade.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/746bc377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/746bc377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/746bc377

Branch: refs/heads/HDFS-7285
Commit: 746bc377b7252d344b5d83f3ae862b717085db4d
Parents: 7911e1d
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Feb 26 10:14:40 2015 +0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  3 ++
 .../apache/hadoop/hdfs/TestRollingUpgrade.java  | 48 ++--
 3 files changed, 40 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e0f9267..f8b0c37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1026,6 +1026,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
 case. (Liang Xie via wang)
 
+HDFS-7843. A truncated file is corrupted after rollback from a rolling
+upgrade.  (szetszwo)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index deaf90c..63ffd81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2089,6 +2089,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 if(!isUpgradeFinalized()) {
   return true;
 }
+if (isRollingUpgrade()) {
+  return true;
+}
 return file.isBlockInLatestSnapshot(blk);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/746bc377/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
index 8e7b4b1..9746049 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java
@@ -23,9 +23,11 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
+import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
@@ -260,42 +263,50 @@ public class TestRollingUpgrade {
 final Configuration conf = new HdfsConfiguration();
 MiniDFSCluster cluster = null;
 try {
-  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
   cluster.waitActive();
 
   final Path foo = new Path(/foo);
   final Path bar = new Path(/bar);
   cluster.getFileSystem().mkdirs(foo);
 
-  startRollingUpgrade(foo, bar, cluster);
+  

[41/50] [abbrv] hadoop git commit: HDFS-7685. Document dfs.namenode.heartbeat.recheck-interval in hdfs-default.xml. Contributed by Kai Sasaki.

2015-03-02 Thread zhz
HDFS-7685. Document dfs.namenode.heartbeat.recheck-interval in 
hdfs-default.xml. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70c1fa0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70c1fa0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70c1fa0b

Branch: refs/heads/HDFS-7285
Commit: 70c1fa0b18639c2aab57efdbd427901c95f4a728
Parents: 4e95f98
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri Feb 27 12:17:34 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 11 +++
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70c1fa0b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b2422d6..b4b0087 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -685,6 +685,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7308. Change the packet chunk size computation in DFSOutputStream in
 order to enforce packet size = 64kB.  (Takuya Fukudome via szetszwo)
 
+HDFS-7685. Document dfs.namenode.heartbeat.recheck-interval in
+hdfs-default.xml. (Kai Sasaki via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70c1fa0b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 85d2273..66fe86c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -145,6 +145,17 @@
 /property
 
 property
+  namedfs.namenode.heartbeat.recheck-interval/name
+  value30/value
+  description
+This time decides the interval to check for expired datanodes.
+With this value and dfs.heartbeat.interval, the interval of
+deciding the datanode is stale or not is also calculated.
+The unit of this configuration is millisecond.
+  /description
+/property
+
+property
   namedfs.http.policy/name
   valueHTTP_ONLY/value
   descriptionDecide if HTTPS(SSL) is supported on HDFS



[04/50] [abbrv] hadoop git commit: YARN-3247. TestQueueMappings should use CapacityScheduler explicitly. Contributed by Zhihai Xu.

2015-03-02 Thread zhz
YARN-3247. TestQueueMappings should use CapacityScheduler explicitly. 
Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cadfb71d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cadfb71d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cadfb71d

Branch: refs/heads/HDFS-7285
Commit: cadfb71d7492f056a9318069865ab4fcd97d92f8
Parents: ef3702e
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Feb 25 10:34:28 2015 +0900
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../resourcemanager/scheduler/capacity/TestQueueMappings.java | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadfb71d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 39e2dc0..2d11ed7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -652,6 +652,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3238. Connection timeouts to nodemanagers are retried at
 multiple levels (Jason Lowe via xgong)
 
+YARN-3247. TestQueueMappings should use CapacityScheduler explicitly.
+(Zhihai Xu via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadfb71d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
index 2317fab..005f40b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
@@ -135,6 +135,8 @@ public class TestQueueMappings {
 checkQMapping(a, Q1, cs);
 
 csConf = new CapacitySchedulerConfiguration();
+csConf.set(YarnConfiguration.RM_SCHEDULER,
+CapacityScheduler.class.getName());
 setupQueueConfiguration(csConf);
 conf = new YarnConfiguration(csConf);
 



[07/50] [abbrv] hadoop git commit: YARN-3239. WebAppProxy does not support a final tracking url which has query fragments and params. Contributed by Jian He

2015-03-02 Thread zhz
YARN-3239. WebAppProxy does not support a final tracking url which has query 
fragments and params. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b09124d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b09124d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b09124d

Branch: refs/heads/HDFS-7285
Commit: 4b09124d9d2b1b8bc7bd68c11490a5729d1e7bec
Parents: afc8188
Author: Jason Lowe jl...@apache.org
Authored: Wed Feb 25 16:14:34 2015 +
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:50 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../server/webproxy/WebAppProxyServlet.java | 21 +-
 .../server/webproxy/TestWebAppProxyServlet.java | 23 +++-
 3 files changed, 40 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b09124d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2d11ed7..e5148eb 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -655,6 +655,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3247. TestQueueMappings should use CapacityScheduler explicitly.
 (Zhihai Xu via ozawa)
 
+YARN-3239. WebAppProxy does not support a final tracking url which has
+query fragments and params (Jian He via jlowe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b09124d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
index 6c0391f..47f7769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java
@@ -20,9 +20,9 @@ package org.apache.hadoop.yarn.server.webproxy;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.ObjectInputStream;
 import java.io.OutputStream;
 import java.io.PrintWriter;
-import java.io.ObjectInputStream;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -38,6 +38,7 @@ import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.UriBuilder;
 
 import org.apache.commons.httpclient.Header;
 import org.apache.commons.httpclient.HostConfiguration;
@@ -58,6 +59,8 @@ import org.apache.hadoop.yarn.util.TrackingUriPlugin;
 import org.apache.hadoop.yarn.webapp.MimeType;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -325,11 +328,17 @@ public class WebAppProxyServlet extends HttpServlet {
 req.getQueryString(), true), runningUser, id);
 return;
   }
-  URI toFetch = new URI(trackingUri.getScheme(), 
-  trackingUri.getAuthority(),
-  StringHelper.ujoin(trackingUri.getPath(), rest), 
req.getQueryString(),
-  null);
-  
+
+  // Append the user-provided path and query parameter to the original
+  // tracking url.
+  ListNameValuePair queryPairs =
+  URLEncodedUtils.parse(req.getQueryString(), null);
+  UriBuilder builder = UriBuilder.fromUri(trackingUri);
+  for (NameValuePair pair : queryPairs) {
+builder.queryParam(pair.getName(), pair.getValue());
+  }
+  URI toFetch = builder.path(rest).build();
+
   LOG.info({} is accessing unchecked {}
   +  which is the app master GUI of {} owned by {},
   remoteUser, toFetch, appId, runningUser);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b09124d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java

[18/50] [abbrv] hadoop git commit: HADOOP-9922. hadoop windows native build will fail in 32 bit machine. Contributed by Kiran Kumar M R.

2015-03-02 Thread zhz
HADOOP-9922. hadoop windows native build will fail in 32 bit machine. 
Contributed by Kiran Kumar M R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b0eda11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b0eda11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b0eda11

Branch: refs/heads/HDFS-7285
Commit: 9b0eda11cfa0539ee7b7bef52427342a96e75ec9
Parents: da85e17
Author: cnauroth cnaur...@apache.org
Authored: Thu Feb 26 12:41:33 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:52 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../hadoop-common/src/main/native/native.sln|  8 +--
 .../src/main/native/native.vcxproj  | 40 
 .../src/main/winutils/include/winutils.h|  5 +-
 .../src/main/winutils/libwinutils.c |  2 +-
 .../src/main/winutils/libwinutils.vcxproj   | 64 +++-
 .../hadoop-common/src/main/winutils/service.c   |  8 +--
 .../hadoop-common/src/main/winutils/task.c  |  2 +-
 .../src/main/winutils/winutils.sln  | 10 +++
 .../src/main/winutils/winutils.vcxproj  | 61 ++-
 10 files changed, 189 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0eda11/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ca27463..1d9a6d4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1010,6 +1010,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11629. WASB filesystem should not start BandwidthGaugeUpdater if
 fs.azure.skip.metrics set to true. (Shanyu Zhao via cnauroth)
 
+HADOOP-9922. hadoop windows native build will fail in 32 bit machine.
+(Kiran Kumar M R via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0eda11/hadoop-common-project/hadoop-common/src/main/native/native.sln
--
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.sln 
b/hadoop-common-project/hadoop-common/src/main/native/native.sln
index 40a7821..54bc17e 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/native.sln
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.sln
@@ -31,14 +31,14 @@ Global
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Mixed 
Platforms.ActiveCfg = Release|x64
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Mixed 
Platforms.Build.0 = Release|x64
-   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.ActiveCfg = 
Release|x64
-   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.Build.0 = 
Release|x64
+   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.ActiveCfg = 
Release|Win32
+   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|Win32.Build.0 = 
Release|Win32
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|x64.ActiveCfg = 
Release|x64
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Debug|x64.Build.0 = 
Release|x64
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Mixed 
Platforms.ActiveCfg = Release|x64
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Mixed 
Platforms.Build.0 = Release|x64
-   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.ActiveCfg 
= Release|x64
-   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.Build.0 = 
Release|x64
+   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.ActiveCfg 
= Release|Win32
+   {4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|Win32.Build.0 = 
Release|Win32
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|x64.ActiveCfg = 
Release|x64
{4C0C12D2-3CB0-47F8-BCD0-55BD5732DFA7}.Release|x64.Build.0 = 
Release|x64
EndGlobalSection

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b0eda11/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
--
diff --git a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj 
b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
index 2d60e56..0912c6a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
+++ b/hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
@@ -19,6 +19,10 @@
 
 Project DefaultTargets=CheckRequireSnappy;Build ToolsVersion=4.0 

[12/50] [abbrv] hadoop git commit: HDFS-7460. Rewrite httpfs to use new shell framework (John Smith via aw)

2015-03-02 Thread zhz
HDFS-7460. Rewrite httpfs to use new shell framework (John Smith via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21e9e91c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21e9e91c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21e9e91c

Branch: refs/heads/HDFS-7285
Commit: 21e9e91c8ab1160a9a756b5f155c62492661bc78
Parents: 746bc37
Author: Allen Wittenauer a...@apache.org
Authored: Wed Feb 25 18:57:41 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:51 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |   2 +-
 .../src/main/conf/httpfs-env.sh |  51 +++--
 .../src/main/libexec/httpfs-config.sh   | 208 ---
 .../hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh  | 116 +++
 .../src/main/tomcat/ssl-server.xml  | 135 
 .../src/main/tomcat/ssl-server.xml.conf | 135 
 .../src/site/apt/ServerSetup.apt.vm | 159 --
 .../src/site/apt/UsingHttpTools.apt.vm  |  87 
 .../src/site/apt/index.apt.vm   |  83 
 .../src/site/markdown/ServerSetup.md.vm | 121 +++
 .../src/site/markdown/UsingHttpTools.md |  62 ++
 .../src/site/markdown/index.md  |  52 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 13 files changed, 533 insertions(+), 680 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e9e91c/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index 4c42ef9..ddc6033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -524,7 +524,7 @@
 copy file=${basedir}/src/main/tomcat/server.xml
   toDir=${httpfs.tomcat.dist.dir}/conf/
 delete 
file=${httpfs.tomcat.dist.dir}/conf/ssl-server.xml/
-copy file=${basedir}/src/main/tomcat/ssl-server.xml
+copy file=${basedir}/src/main/tomcat/ssl-server.xml.conf
   toDir=${httpfs.tomcat.dist.dir}/conf/
 delete 
file=${httpfs.tomcat.dist.dir}/conf/logging.properties/
 copy file=${basedir}/src/main/tomcat/logging.properties

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21e9e91c/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
index a2701d4..0e8cc40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/conf/httpfs-env.sh
@@ -14,40 +14,59 @@
 #
 
 # Set httpfs specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs HttpFS
-# Java System properties for HttpFS should be specified in this variable
 #
-# export CATALINA_OPTS=
-
-# HttpFS logs directory
+# hadoop-env.sh is read prior to this file.
 #
-# export HTTPFS_LOG=${HTTPFS_HOME}/logs
 
-# HttpFS temporary directory
+# HTTPFS temporary directory
 #
-# export HTTPFS_TEMP=${HTTPFS_HOME}/temp
+# export HTTPFS_TEMP=${HADOOP_PREFIX}/temp
 
-# The HTTP port used by HttpFS
+# The HTTP port used by HTTPFS
 #
 # export HTTPFS_HTTP_PORT=14000
 
-# The Admin port used by HttpFS
+# The Admin port used by HTTPFS
 #
-# export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1`
+# export HTTPFS_ADMIN_PORT=$((HTTPFS_HTTP_PORT + 1))
 
-# The hostname HttpFS server runs on
+# The maximum number of Tomcat handler threads
 #
-# export HTTPFS_HTTP_HOSTNAME=`hostname -f`
+# export HTTPFS_MAX_THREADS=1000
 
-# Indicates if HttpFS is using SSL
+# The hostname HttpFS server runs on
 #
-# export HTTPFS_SSL_ENABLED=false
+# export HTTPFS_HTTP_HOSTNAME=$(hostname -f)
 
 # The location of the SSL keystore if using SSL
 #
 # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore
 
+#
 # The password of the SSL keystore if using SSL
 #
 # export HTTPFS_SSL_KEYSTORE_PASS=password
+
+##
+## Tomcat specific settings
+##
+#
+# Location of tomcat
+#
+# export HTTPFS_CATALINA_HOME=${HADOOP_PREFIX}/share/hadoop/httpfs/tomcat
+
+# Java System properties for HTTPFS should be specified in this variable.
+# The java.library.path and hadoop.home.dir properties are automatically
+# configured.  In order to supplement java.library.path,
+# one should add to the JAVA_LIBRARY_PATH env var.
+#
+# export CATALINA_OPTS=
+
+# PID file
+#
+# export 

[44/50] [abbrv] hadoop git commit: HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like. (aajisaka)

2015-03-02 Thread zhz
HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like. 
(aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cc51925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cc51925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cc51925

Branch: refs/heads/HDFS-7285
Commit: 0cc5192520bb4ed9551888fb24206b6a29e2db6a
Parents: 731a463
Author: Akira Ajisaka aajis...@apache.org
Authored: Sun Mar 1 21:09:15 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/fs/shell/FsUsage.java | 12 ++--
 .../org/apache/hadoop/hdfs/TestDFSShell.java| 29 
 3 files changed, 42 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cc51925/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f1d48bc..b1a7a7d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -13,6 +13,9 @@ Trunk (Unreleased)
 
 HADOOP-10950. rework heap management vars (John Smith via aw)
 
+HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like.
+(aajisaka)
+
   NEW FEATURES
 
 HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via 
aw)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cc51925/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
index 5c1dbf0..765b181 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
@@ -132,15 +132,23 @@ class FsUsage extends FsCommand {
 }
 
 @Override
-protected void processPathArgument(PathData item) throws IOException {
+protected void processArguments(LinkedListPathData args)
+throws IOException {
   usagesTable = new TableBuilder(3);
+  super.processArguments(args);
+  if (!usagesTable.isEmpty()) {
+usagesTable.printToStream(out);
+  }
+}
+
+@Override
+protected void processPathArgument(PathData item) throws IOException {
   // go one level deep on dirs from cmdline unless in summary mode
   if (!summary  item.stat.isDirectory()) {
 recursePath(item);
   } else {
 super.processPathArgument(item);
   }
-  usagesTable.printToStream(out);
 }
 
 @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cc51925/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index ee04076..0a88208 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -95,6 +95,14 @@ public class TestDFSShell {
 return f;
   }
 
+  static Path writeByte(FileSystem fs, Path f) throws IOException {
+DataOutputStream out = fs.create(f);
+out.writeByte(1);
+out.close();
+assertTrue(fs.exists(f));
+return f;
+  }
+
   static Path mkdir(FileSystem fs, Path p) throws IOException {
 assertTrue(fs.mkdirs(p));
 assertTrue(fs.exists(p));
@@ -272,6 +280,27 @@ public class TestDFSShell {
   Long combinedDiskUsed = myFileDiskUsed + myFile2DiskUsed;
   assertThat(returnString, containsString(combinedLength.toString()));
   assertThat(returnString, containsString(combinedDiskUsed.toString()));
+
+  // Check if output is rendered properly with multiple input paths
+  Path myFile3 = new Path(/test/dir/file3);
+  writeByte(fs, myFile3);
+  assertTrue(fs.exists(myFile3));
+  args = new String[3];
+  args[0] = -du;
+  args[1] = /test/dir/file3;
+  args[2] = /test/dir/file2;
+  val = -1;
+  try {
+val = shell.run(args);
+  } catch (Exception e) {
+System.err.println(Exception raised from DFSShell.run  +
+e.getLocalizedMessage());
+  }
+  

[28/50] [abbrv] hadoop git commit: YARN-3262. Surface application outstanding resource requests table in RM web UI. (Jian He via wangda)

2015-03-02 Thread zhz
YARN-3262. Surface application outstanding resource requests table in RM web 
UI. (Jian He via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eaccaba3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eaccaba3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eaccaba3

Branch: refs/heads/HDFS-7285
Commit: eaccaba3121c6fcebc8b4ab94ec48ee1085a3fb5
Parents: 97c2c59
Author: Wangda Tan wan...@apache.org
Authored: Fri Feb 27 16:13:32 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:54 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../records/impl/pb/ResourceRequestPBImpl.java  |  4 +-
 .../scheduler/AbstractYarnScheduler.java|  9 
 .../scheduler/AppSchedulingInfo.java| 33 +++---
 .../scheduler/SchedulerApplicationAttempt.java  |  6 ++-
 .../server/resourcemanager/webapp/AppBlock.java | 46 +++-
 .../server/resourcemanager/webapp/AppPage.java  |  4 ++
 .../resourcemanager/webapp/AppsBlock.java   |  5 ++-
 .../webapp/FairSchedulerAppsBlock.java  |  5 ++-
 .../resourcemanager/webapp/RMWebServices.java   |  6 +--
 .../resourcemanager/webapp/dao/AppInfo.java | 17 +++-
 .../webapp/TestRMWebAppFairScheduler.java   | 10 -
 .../webapp/TestRMWebServicesApps.java   |  3 +-
 13 files changed, 118 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaccaba3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 38dd9fa..e7af84b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -336,6 +336,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2820. Retry in FileSystemRMStateStore when FS's operations fail 
 due to IOException. (Zhihai Xu via ozawa)
 
+YARN-3262. Surface application outstanding resource requests table 
+in RM web UI. (Jian He via wangda)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaccaba3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
index 0c8491f..27fb5ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceRequestPBImpl.java
@@ -140,13 +140,13 @@ public class ResourceRequestPBImpl extends  
ResourceRequest {
 this.capability = capability;
   }
   @Override
-  public int getNumContainers() {
+  public synchronized int getNumContainers() {
 ResourceRequestProtoOrBuilder p = viaProto ? proto : builder;
 return (p.getNumContainers());
   }
 
   @Override
-  public void setNumContainers(int numContainers) {
+  public synchronized void setNumContainers(int numContainers) {
 maybeInitBuilder();
 builder.setNumContainers((numContainers));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaccaba3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index 04b3452..968a767 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -658,4 +658,13 @@ public abstract class AbstractYarnScheduler
   maxAllocWriteLock.unlock();
 }
   }
+
+  public 

[47/50] [abbrv] hadoop git commit: HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks fails using IBM java (Ayappan via aw)

2015-03-02 Thread zhz
HDFS-4681. TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
fails using IBM java (Ayappan via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/133d04cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/133d04cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/133d04cb

Branch: refs/heads/HDFS-7285
Commit: 133d04cb8972fa3e1e7a7babdc41c21b7340e34c
Parents: 260883b
Author: Allen Wittenauer a...@apache.org
Authored: Sat Feb 28 23:32:09 2015 -0800
Committer: Zhe Zhang zhezh...@cloudera.com
Committed: Mon Mar 2 09:13:55 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../test/java/org/apache/hadoop/hdfs/DFSTestUtil.java   | 12 
 .../java/org/apache/hadoop/hdfs/MiniDFSCluster.java | 10 ++
 .../blockmanagement/TestBlocksWithNotEnoughRacks.java   |  7 ---
 4 files changed, 29 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2a8da43..16fe394 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -306,6 +306,9 @@ Trunk (Unreleased)
 HDFS-7803. Wrong command mentioned in HDFSHighAvailabilityWithQJM
 documentation (Arshad Mohammad via aw)
 
+HDFS-4681. 
TestBlocksWithNotEnoughRacks#testCorruptBlockRereplicatedAcrossRacks 
+fails using IBM java (Ayappan via aw)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 5f05d94..c3dac35 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -252,6 +252,12 @@ public class DFSTestUtil {
   public void createFiles(FileSystem fs, String topdir) throws IOException {
 createFiles(fs, topdir, (short)3);
   }
+
+  public static byte[] readFileAsBytes(FileSystem fs, Path fileName) throws 
IOException {
+ByteArrayOutputStream os = new ByteArrayOutputStream();
+IOUtils.copyBytes(fs.open(fileName), os, 1024, true);
+return os.toByteArray();
+  }
   
   /** create nFiles with random names and directory hierarchies
*  with random (but reproducible) data in them.
@@ -724,6 +730,12 @@ public class DFSTestUtil {
 return b.toString();
   }
 
+  public static byte[] readFileAsBytes(File f) throws IOException {
+ByteArrayOutputStream os = new ByteArrayOutputStream();
+IOUtils.copyBytes(new FileInputStream(f), os, 1024, true);
+return os.toByteArray();
+  }
+
   /* Write the given string to the given file */
   public static void writeFile(FileSystem fs, Path p, String s) 
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 5297ba2..2c1d07e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -1869,6 +1869,16 @@ public class MiniDFSCluster {
 return null;
   }
 
+  public byte[] readBlockOnDataNodeAsBytes(int i, ExtendedBlock block)
+  throws IOException {
+assert (i = 0  i  dataNodes.size()) : Invalid datanode +i;
+File blockFile = getBlockFile(i, block);
+if (blockFile != null  blockFile.exists()) {
+  return DFSTestUtil.readFileAsBytes(blockFile);
+}
+return null;
+  }
+
   /**
* Corrupt a block on a particular datanode.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/133d04cb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
--
diff --git