hadoop git commit: MAPREDUCE-5708. Duplicate String.format in YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.

2015-05-14 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4bbcffa51 - 454236ec1


MAPREDUCE-5708. Duplicate String.format in
YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.

(cherry picked from commit 05ff54c66c49301c4ec2549704d9d459e784572c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/454236ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/454236ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/454236ec

Branch: refs/heads/branch-2
Commit: 454236ec19c29e9366882f501f67169b51424b3d
Parents: 4bbcffa
Author: Devaraj K deva...@apache.org
Authored: Thu May 14 22:09:54 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 14 22:11:15 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/454236ec/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2885c0e..fd4cd72 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -164,6 +164,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
 doesn't work. (Takuya Fukudome via ozawa)
 
+MAPREDUCE-5708. Duplicate String.format in 
YarnOutputFiles.getSpillFileForWrite.
+(Konstantin Weitz via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/454236ec/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
index e08e093..e099b8f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
@@ -157,8 +157,8 @@ public class YarnOutputFiles extends MapOutputFile {
   public Path getSpillFileForWrite(int spillNumber, long size)
   throws IOException {
 return lDirAlloc.getLocalPathForWrite(
-String.format(String.format(SPILL_FILE_PATTERN,
-conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber)), size, conf);
+String.format(SPILL_FILE_PATTERN,
+conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
   }
 
   /**



hadoop git commit: YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in RMApps. Contributed by Xuan Gong. (cherry picked from commit 15ccd967ee3e7046a50522089f67ba01f36ec76a)

2015-05-14 Thread junping_du
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 454236ec1 - bc13c7d84


YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in 
RMApps. Contributed by Xuan Gong.
(cherry picked from commit 15ccd967ee3e7046a50522089f67ba01f36ec76a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc13c7d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc13c7d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc13c7d8

Branch: refs/heads/branch-2
Commit: bc13c7d84bf4693c976ecdff7d69686705bf906c
Parents: 454236e
Author: Junping Du junping...@apache.org
Authored: Thu May 14 10:57:36 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Thu May 14 10:59:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/records/LogAggregationStatus.java  |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../src/main/proto/yarn_protos.proto|   1 +
 .../src/main/resources/yarn-default.xml |   8 +
 .../protocolrecords/LogAggregationReport.java   |  16 +-
 .../protocolrecords/NodeHeartbeatRequest.java   |   7 +-
 .../impl/pb/LogAggregationReportPBImpl.java |  40 
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  82 +++
 .../hadoop/yarn/server/webapp/AppBlock.java |  19 +-
 .../yarn_server_common_service_protos.proto |  14 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |  46 +---
 .../logaggregation/AppLogAggregatorImpl.java|  19 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java | 228 +++
 .../resourcemanager/rmnode/RMNodeImpl.java  |  13 +-
 .../rmnode/RMNodeStatusEvent.java   |  11 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  11 +-
 .../webapp/RMAppLogAggregationStatusBlock.java  |  37 ++-
 .../TestRMAppLogAggregationStatus.java  | 181 +++
 19 files changed, 469 insertions(+), 279 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc13c7d8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 58e51c3..c642b12 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -63,6 +63,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3448. Added a rolling time-to-live LevelDB timeline store 
implementation.
 (Jonathan Eagles via zjshen)
 
+YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in 
+RMApps. (Xuan Gong via junping_du)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc13c7d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
index da1230c..1e10972 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
@@ -34,6 +34,8 @@ public enum LogAggregationStatus {
   /** Log Aggregation is Running. */
   RUNNING,
 
+  /** Log Aggregation is Running, but has failures in previous cycles. */
+  RUNNING_WITH_FAILURE,
   /**
* Log Aggregation is Succeeded. All of the logs have been aggregated
* successfully.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc13c7d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d677c59..7a05d13 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -718,6 +718,16 @@ public class YarnConfiguration extends Configuration {
   + proxy-user-privileges.enabled;
   public static boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
+  /**
+   * How many diagnostics/failure messages can be saved in RM 

hadoop git commit: MAPREDUCE-5708. Duplicate String.format in YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.

2015-05-14 Thread devaraj
Repository: hadoop
Updated Branches:
  refs/heads/trunk def9136e0 - 05ff54c66


MAPREDUCE-5708. Duplicate String.format in
YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05ff54c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05ff54c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05ff54c6

Branch: refs/heads/trunk
Commit: 05ff54c66c49301c4ec2549704d9d459e784572c
Parents: def9136
Author: Devaraj K deva...@apache.org
Authored: Thu May 14 22:09:54 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 14 22:09:54 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ff54c6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 7fe8483..bc1f427 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -422,6 +422,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
 doesn't work. (Takuya Fukudome via ozawa)
 
+MAPREDUCE-5708. Duplicate String.format in 
YarnOutputFiles.getSpillFileForWrite.
+(Konstantin Weitz via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ff54c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
index e08e093..e099b8f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
@@ -157,8 +157,8 @@ public class YarnOutputFiles extends MapOutputFile {
   public Path getSpillFileForWrite(int spillNumber, long size)
   throws IOException {
 return lDirAlloc.getLocalPathForWrite(
-String.format(String.format(SPILL_FILE_PATTERN,
-conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber)), size, conf);
+String.format(SPILL_FILE_PATTERN,
+conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
   }
 
   /**



[25/37] hadoop git commit: Update fix version for YARN-3457 and YARN-3537.

2015-05-14 Thread jitendra
Update fix version for YARN-3457 and YARN-3537.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7de6198
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7de6198
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7de6198

Branch: refs/heads/HDFS-7240
Commit: f7de6198da030cb9cd62dcfd3c378dbbf857e017
Parents: 7f19e7a
Author: Jason Lowe jl...@apache.org
Authored: Wed May 13 20:52:17 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Wed May 13 20:53:16 2015 +

--
 hadoop-yarn-project/CHANGES.txt | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7de6198/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 38f5e81..146690b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -291,9 +291,6 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)
 
-YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
-(Bibin A Chundatt via ozawa)
-
 YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
 (Varun Vasudev via wangda)
 
@@ -320,9 +317,6 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3444. Fix typo capabililty. (Gabor Liptak via aajisaka)
 
-YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
-invoked (Brahma Reddy Battula via jlowe)
-
 YARN-3530. ATS throws exception on trying to filter results without 
otherinfo.
 (zhijie shen via xgong)
 
@@ -488,6 +482,12 @@ Release 2.7.1 - UNRELEASED
 YARN-3626. On Windows localized resources are not moved to the front
 of the classpath when they should be. (Craig Welch via xgong)
 
+YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore 
called.
+(Bibin A Chundatt via ozawa)
+
+YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
+invoked (Brahma Reddy Battula via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[10/37] hadoop git commit: YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster in setup instead of individual methods. (nijel via kasha)

2015-05-14 Thread jitendra
YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster in 
setup instead of individual methods. (nijel via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe0df596
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe0df596
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe0df596

Branch: refs/heads/HDFS-7240
Commit: fe0df596271340788095cb43a1944e19ac4c2cf7
Parents: 2f4b6d1
Author: Karthik Kambatla ka...@apache.org
Authored: Tue May 12 10:45:33 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue May 12 10:45:33 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/TestContainerManagerSecurity.java| 46 +---
 2 files changed, 15 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe0df596/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 131161f..5a858cf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -230,6 +230,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
 log for overall resource usage by all containers. (Naganarasimha G R via 
devaraj)
 
+YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster 
in 
+setup instead of individual methods. (nijel via kasha)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe0df596/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index f0dcb56..59bb6aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -82,8 +82,6 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import com.google.common.io.ByteArrayDataInput;
-import com.google.common.io.ByteStreams;
 
 @RunWith(Parameterized.class)
 public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
@@ -105,10 +103,20 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
 testRootDir.mkdirs();
 httpSpnegoKeytabFile.deleteOnExit();
 getKdc().createPrincipal(httpSpnegoKeytabFile, httpSpnegoPrincipal);
+
+yarnCluster =
+new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
+1);
+yarnCluster.init(conf);
+yarnCluster.start();
   }
  
   @After
   public void tearDown() {
+if (yarnCluster != null) {
+  yarnCluster.stop();
+  yarnCluster = null;
+}
 testRootDir.delete();
   }
 
@@ -144,11 +152,6 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   
   @Test (timeout = 12)
   public void testContainerManager() throws Exception {
-try {
-  yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class
-  .getName(), 1, 1, 1);
-  yarnCluster.init(conf);
-  yarnCluster.start();
   
   // TestNMTokens.
   testNMTokens(conf);
@@ -156,36 +159,11 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   // Testing for container token tampering
   testContainerToken(conf);
   
-} catch (Exception e) {
-  e.printStackTrace();
-  throw e;
-} finally {
-  if (yarnCluster != null) {
-yarnCluster.stop();
-yarnCluster = null;
-  }
-}
-  }
-
-  @Test (timeout = 12)
-  public void testContainerManagerWithEpoch() throws Exception {
-try {
-  yarnCluster = new MiniYARNCluster(TestContainerManagerSecurity.class
-  .getName(), 1, 1, 1);
-  yarnCluster.init(conf);
-  yarnCluster.start();
-
-  // Testing for container token tampering
+  // Testing for container token tampering with epoch
   testContainerTokenWithEpoch(conf);
 
-} finally {
-  if (yarnCluster != null) 

[37/37] hadoop git commit: YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in RMApps. Contributed by Xuan Gong.

2015-05-14 Thread jitendra
YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in 
RMApps. Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15ccd967
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15ccd967
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15ccd967

Branch: refs/heads/HDFS-7240
Commit: 15ccd967ee3e7046a50522089f67ba01f36ec76a
Parents: 05ff54c
Author: Junping Du junping...@apache.org
Authored: Thu May 14 10:57:36 2015 -0700
Committer: Junping Du junping...@apache.org
Committed: Thu May 14 10:58:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/api/records/LogAggregationStatus.java  |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../src/main/proto/yarn_protos.proto|   1 +
 .../src/main/resources/yarn-default.xml |   8 +
 .../protocolrecords/LogAggregationReport.java   |  16 +-
 .../protocolrecords/NodeHeartbeatRequest.java   |   7 +-
 .../impl/pb/LogAggregationReportPBImpl.java |  40 
 .../impl/pb/NodeHeartbeatRequestPBImpl.java |  82 +++
 .../hadoop/yarn/server/webapp/AppBlock.java |  19 +-
 .../yarn_server_common_service_protos.proto |  14 +-
 .../nodemanager/NodeStatusUpdaterImpl.java  |  46 +---
 .../logaggregation/AppLogAggregatorImpl.java|  19 +-
 .../server/resourcemanager/rmapp/RMAppImpl.java | 228 +++
 .../resourcemanager/rmnode/RMNodeImpl.java  |  13 +-
 .../rmnode/RMNodeStatusEvent.java   |  11 +-
 .../resourcemanager/webapp/RMAppBlock.java  |  11 +-
 .../webapp/RMAppLogAggregationStatusBlock.java  |  37 ++-
 .../TestRMAppLogAggregationStatus.java  | 181 +++
 19 files changed, 469 insertions(+), 279 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ccd967/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0346c54..e0f2c52 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -111,6 +111,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3448. Added a rolling time-to-live LevelDB timeline store 
implementation.
 (Jonathan Eagles via zjshen)
 
+YARN-3505. Node's Log Aggregation Report with SUCCEED should not cached in 
+RMApps. (Xuan Gong via junping_du)
+
   IMPROVEMENTS
 
 YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ccd967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
index da1230c..1e10972 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
@@ -34,6 +34,8 @@ public enum LogAggregationStatus {
   /** Log Aggregation is Running. */
   RUNNING,
 
+  /** Log Aggregation is Running, but has failures in previous cycles. */
+  RUNNING_WITH_FAILURE,
   /**
* Log Aggregation is Succeeded. All of the logs have been aggregated
* successfully.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ccd967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 94f3e60..52fff14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -718,6 +718,16 @@ public class YarnConfiguration extends Configuration {
   + proxy-user-privileges.enabled;
   public static boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false;
 
+  /**
+   * How many diagnostics/failure messages can be saved in RM for
+   * log aggregation. It also defines the number of diagnostics/failure
+   * messages can be shown in log aggregation web ui.
+   */
+  public 

[27/37] hadoop git commit: HDFS-8380. Always call addStoredBlock on blocks which have been shifted from one storage to another (cmccabe)

2015-05-14 Thread jitendra
HDFS-8380. Always call addStoredBlock on blocks which have been shifted from 
one storage to another (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/281d47a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/281d47a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/281d47a9

Branch: refs/heads/HDFS-7240
Commit: 281d47a96937bc329b1b4051ffcb8f5fcac98354
Parents: 711d77c
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed May 13 14:29:05 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed May 13 14:29:05 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../blockmanagement/BlockInfoContiguous.java|  18 ---
 .../server/blockmanagement/BlockManager.java|  12 +-
 .../TestNameNodePrunesMissingStorages.java  | 118 ++-
 4 files changed, 129 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/281d47a9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f4e40b7..932b500 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -749,6 +749,8 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8358. TestTraceAdmin fails (Masatake Iwasaki via kihwal)
 
+HDFS-8380. Always call addStoredBlock on blocks which have been shifted
+from one storage to another (cmccabe)
 
 Release 2.7.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/281d47a9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index 1ba3536..769046b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -231,24 +231,6 @@ public class BlockInfoContiguous extends Block
   }
 
   /**
-   * Find specified DatanodeDescriptor.
-   * @return index or -1 if not found.
-   */
-  boolean findDatanode(DatanodeDescriptor dn) {
-int len = getCapacity();
-for(int idx = 0; idx  len; idx++) {
-  DatanodeDescriptor cur = getDatanode(idx);
-  if(cur == dn) {
-return true;
-  }
-  if(cur == null) {
-break;
-  }
-}
-return false;
-  }
-
-  /**
* Find specified DatanodeStorageInfo.
* @return DatanodeStorageInfo or null if not found.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/281d47a9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ab2607b..6d5808e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1813,6 +1813,9 @@ public class BlockManager {
   if (storageInfo.getBlockReportCount() == 0) {
 // The first block report can be processed a lot more efficiently than
 // ordinary block reports.  This shortens restart times.
+LOG.info(Processing first storage report for  +
+storageInfo.getStorageID() +  from datanode  +
+nodeID.getDatanodeUuid());
 processFirstBlockReport(storageInfo, newReport);
   } else {
 invalidatedBlocks = processReport(storageInfo, newReport);
@@ -2068,7 +2071,12 @@ public class BlockManager {
 
 for (BlockReportReplica iblk : report) {
   ReplicaState reportedState = iblk.getState();
-  
+
+  if (LOG.isDebugEnabled()) {
+LOG.debug(Initial report of block  + iblk.getBlockName()
++  on  + storageInfo.getDatanodeDescriptor() +  size  +
+iblk.getNumBytes() +  replicaState =  + reportedState);
+  }
   if (shouldPostponeBlocksFromFuture 
   namesystem.isGenStampInFuture(iblk)) {
 queueReportedBlock(storageInfo, iblk, 

[01/37] hadoop git commit: YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue info once. (Varun Saxena via wangda)

2015-05-14 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 444836b3d - 15ccd967e


YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue info 
once. (Varun Saxena via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d6f67412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d6f67412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d6f67412

Branch: refs/heads/HDFS-7240
Commit: d6f6741296639a73f5306e3ebefec84a40ca03e5
Parents: 444836b
Author: Wangda Tan wan...@apache.org
Authored: Mon May 11 17:31:15 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon May 11 17:31:15 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../server/resourcemanager/RMServerUtils.java   | 10 ++-
 .../scheduler/SchedulerUtils.java   | 30 +++-
 3 files changed, 35 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f67412/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 80cdf35..b5cb0a5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -405,6 +405,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 
+YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue 
+info once. (Varun Saxena via wangda)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f67412/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 4669a28..4d2e41c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -35,6 +35,7 @@ import 
org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -94,9 +95,16 @@ public class RMServerUtils {
   Resource maximumResource, String queueName, YarnScheduler scheduler,
   RMContext rmContext)
   throws InvalidResourceRequestException {
+// Get queue from scheduler
+QueueInfo queueInfo = null;
+try {
+  queueInfo = scheduler.getQueueInfo(queueName, false, false);
+} catch (IOException e) {
+}
+
 for (ResourceRequest resReq : ask) {
   SchedulerUtils.normalizeAndvalidateRequest(resReq, maximumResource,
-  queueName, scheduler, rmContext);
+  queueName, scheduler, rmContext, queueInfo);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d6f67412/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
index 0ef5c1e..8047d0b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
+++ 

[09/37] hadoop git commit: HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out even in debug level. Contributed by Junping Du.

2015-05-14 Thread jitendra
HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out even in 
debug level. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f4b6d11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f4b6d11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f4b6d11

Branch: refs/heads/HDFS-7240
Commit: 2f4b6d1157f280c8a6e1b2e7217fd2ec16991985
Parents: 5c2f05c
Author: Haohui Mai whe...@apache.org
Authored: Tue May 12 10:30:32 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue May 12 10:30:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java | 3 ---
 .../src/main/java/org/apache/hadoop/security/SaslRpcClient.java   | 3 ---
 .../java/org/apache/hadoop/security/UserGroupInformation.java | 3 ---
 4 files changed, 3 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4b6d11/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 47731fb..a15444e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -668,6 +668,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11947. test-patch should return early from determine-issue when
 run in jenkins mode. (Sean Busbey via aw)
 
+HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out
+even in debug level. (Junping Du via wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4b6d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 5f1809a..ac32ac9 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -1488,9 +1488,6 @@ public abstract class Server {
 }
 
 private void doSaslReply(Message message) throws IOException {
-  if (LOG.isDebugEnabled()) {
-LOG.debug(Sending sasl message +message);
-  }
   setupResponse(saslResponse, saslCall,
   RpcStatusProto.SUCCESS, null,
   new RpcResponseWrapper(message), null, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4b6d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
index 4a1a397..7d3afa8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
@@ -385,9 +385,6 @@ public class SaslRpcClient {
   }
   RpcSaslProto saslMessage =
   RpcSaslProto.parseFrom(responseWrapper.getMessageBytes());
-  if (LOG.isDebugEnabled()) {
-LOG.debug(Received SASL message +saslMessage);
-  }
   // handle sasl negotiation process
   RpcSaslProto.Builder response = null;
   switch (saslMessage.getState()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4b6d11/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
index 4b0b5f3..be3d60d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
@@ -865,9 +865,6 @@ public class UserGroupInformation {
 .getPrivateCredentials(KerberosTicket.class);
 for (KerberosTicket ticket : tickets) {
   if (SecurityUtil.isOriginalTGT(ticket)) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(Found tgt  + ticket);
-}
 return ticket;

[18/37] hadoop git commit: HDFS-8143. Mover should exit after some retry when failed to move blocks. Contributed by surendra singh lilhore

2015-05-14 Thread jitendra
HDFS-8143. Mover should exit after some retry when failed to move blocks.  
Contributed by surendra singh lilhore


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdec12d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdec12d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdec12d1

Branch: refs/heads/HDFS-7240
Commit: cdec12d1b84d444e13bf997c817643ec24aaa832
Parents: 065d8f2
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed May 13 11:57:49 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Wed May 13 11:57:49 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 30 ---
 .../hadoop/hdfs/server/mover/TestMover.java | 39 +++-
 3 files changed, 65 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdec12d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f8e9f3a..ae056fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -353,6 +353,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final longDFS_MOVER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
   public static final String  DFS_MOVER_MOVERTHREADS_KEY = 
dfs.mover.moverThreads;
   public static final int DFS_MOVER_MOVERTHREADS_DEFAULT = 1000;
+  public static final String  DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY = 
dfs.mover.retry.max.attempts;
+  public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
dfs.datanode.address;
   public static final int DFS_DATANODE_DEFAULT_PORT = 50010;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdec12d1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index b32220c..0710f3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -58,6 +58,7 @@ import java.io.InputStreamReader;
 import java.net.URI;
 import java.text.DateFormat;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
 
 @InterfaceAudience.Private
 public class Mover {
@@ -107,10 +108,12 @@ public class Mover {
   private final Dispatcher dispatcher;
   private final StorageMap storages;
   private final ListPath targetPaths;
+  private final int retryMaxAttempts;
+  private final AtomicInteger retryCount;
 
   private final BlockStoragePolicy[] blockStoragePolicies;
 
-  Mover(NameNodeConnector nnc, Configuration conf) {
+  Mover(NameNodeConnector nnc, Configuration conf, AtomicInteger retryCount) {
 final long movedWinWidth = conf.getLong(
 DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_KEY,
 DFSConfigKeys.DFS_MOVER_MOVEDWINWIDTH_DEFAULT);
@@ -120,7 +123,10 @@ public class Mover {
 final int maxConcurrentMovesPerNode = conf.getInt(
 DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
 DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
-
+this.retryMaxAttempts = conf.getInt(
+DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY,
+DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT);
+this.retryCount = retryCount;
 this.dispatcher = new Dispatcher(nnc, Collections.String emptySet(),
 Collections.String emptySet(), movedWinWidth, moverThreads, 0,
 maxConcurrentMovesPerNode, conf);
@@ -255,14 +261,27 @@ public class Mover {
  * @return whether there is still remaining migration work for the next
  * round
  */
-private boolean processNamespace() {
+private boolean processNamespace() throws IOException {
   getSnapshottableDirs();
   boolean hasRemaining = false;
   for (Path target : targetPaths) {
 hasRemaining |= processPath(target.toUri().getPath());
   }
   // wait for pending move to finish and retry the failed migration
-  hasRemaining |= 

[29/37] hadoop git commit: HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache. Contributed by Brahma Reddy Battula.

2015-05-14 Thread jitendra
HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4356e8a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4356e8a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4356e8a5

Branch: refs/heads/HDFS-7240
Commit: 4356e8a5ef0ac6d11a34704b80ef360a710e623a
Parents: 0e85044
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Thu May 14 10:20:45 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Thu May 14 10:20:45 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt|  3 +++
 .../hadoop/metrics2/impl/MetricsSourceAdapter.java | 17 +++--
 2 files changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4356e8a5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 207d144..5e6b9ea 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -674,6 +674,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11962. Sasl message with MD5 challenge text shouldn't be LOG out
 even in debug level. (Junping Du via wheat9)
 
+HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache.
+(Brahma Reddy Battula via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4356e8a5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
index cae9c3d..f3ddc91 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
@@ -154,31 +154,28 @@ class MetricsSourceAdapter implements DynamicMBean {
 
   private void updateJmxCache() {
 boolean getAllMetrics = false;
-synchronized(this) {
+synchronized (this) {
   if (Time.now() - jmxCacheTS = jmxCacheTTL) {
 // temporarilly advance the expiry while updating the cache
 jmxCacheTS = Time.now() + jmxCacheTTL;
 if (lastRecs == null) {
   getAllMetrics = true;
 }
-  }
-  else {
+  } else {
 return;
   }
-}
 
-if (getAllMetrics) {
-  MetricsCollectorImpl builder = new MetricsCollectorImpl();
-  getMetrics(builder, true);
-}
+  if (getAllMetrics) {
+MetricsCollectorImpl builder = new MetricsCollectorImpl();
+getMetrics(builder, true);
+  }
 
-synchronized(this) {
   updateAttrCache();
   if (getAllMetrics) {
 updateInfoCache();
   }
   jmxCacheTS = Time.now();
-  lastRecs = null;  // in case regular interval update is not running
+  lastRecs = null; // in case regular interval update is not running
 }
   }
 



[06/37] hadoop git commit: HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. (Contributed by Zhe Zhang)

2015-05-14 Thread jitendra
HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication. 
(Contributed by Zhe Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d5da948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d5da948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d5da948

Branch: refs/heads/HDFS-7240
Commit: 6d5da9484185ca9f585195d6da069b9cd5be4044
Parents: 8badd82
Author: yliu y...@apache.org
Authored: Tue May 12 21:29:22 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue May 12 21:29:22 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockCollection.java |  2 +-
 .../blockmanagement/BlockInfoContiguous.java|  2 +-
 .../server/blockmanagement/BlockManager.java| 16 ++---
 .../blockmanagement/DecommissionManager.java| 10 
 .../hdfs/server/namenode/FSDirAttrOp.java   |  4 ++--
 .../hdfs/server/namenode/FSDirConcatOp.java |  4 ++--
 .../hdfs/server/namenode/FSDirectory.java   |  4 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java   |  7 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  8 +++
 .../hdfs/server/namenode/NamenodeFsck.java  |  9 ---
 .../snapshot/FileWithSnapshotFeature.java   |  5 ++--
 .../blockmanagement/TestBlockManager.java   |  6 ++---
 .../blockmanagement/TestReplicationPolicy.java  |  4 ++--
 .../snapshot/TestFileWithSnapshotFeature.java   |  2 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |  4 ++--
 .../snapshot/TestSnapshotReplication.java   | 25 
 18 files changed, 66 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7cff8d4..cd477af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -543,6 +543,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8357. Consolidate parameters of INode.CleanSubtree() into a parameter
 objects. (Li Lu via wheat9)
 
+HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
+(Contributed by Zhe Zhang)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index e9baf85..c0a959c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -58,7 +58,7 @@ public interface BlockCollection {
* Get block replication for the collection 
* @return block replication value
*/
-  public short getBlockReplication();
+  public short getPreferredBlockReplication();
 
   /** 
* @return the storage policy ID.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d5da948/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
index df27882..1ba3536 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
@@ -380,7 +380,7 @@ public class BlockInfoContiguous extends Block
 if(isComplete()) {
   BlockInfoContiguousUnderConstruction ucBlock =
   new BlockInfoContiguousUnderConstruction(this,
-  getBlockCollection().getBlockReplication(), s, targets);
+  getBlockCollection().getPreferredBlockReplication(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
   return ucBlock;
 }


[14/37] hadoop git commit: YARN-3539. Updated timeline server documentation and marked REST APIs evolving. Contributed by Steve Loughran.

2015-05-14 Thread jitendra
YARN-3539. Updated timeline server documentation and marked REST APIs evolving. 
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fcd0702c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fcd0702c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fcd0702c

Branch: refs/heads/HDFS-7240
Commit: fcd0702c10ce574b887280476aba63d6682d5271
Parents: 2463666
Author: Zhijie Shen zjs...@apache.org
Authored: Tue May 12 21:12:18 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Tue May 12 21:13:22 2015 -0700

--
 .../src/site/markdown/Compatibility.md  |1 +
 hadoop-project/src/site/site.xml|1 +
 hadoop-yarn-project/CHANGES.txt |3 +
 .../TimelineDelegationTokenResponse.java|4 +-
 .../api/records/timeline/TimelineDomain.java|4 +-
 .../api/records/timeline/TimelineDomains.java   |4 +-
 .../api/records/timeline/TimelineEntities.java  |4 +-
 .../api/records/timeline/TimelineEntity.java|4 +-
 .../api/records/timeline/TimelineEvent.java |4 +-
 .../api/records/timeline/TimelineEvents.java|6 +-
 .../records/timeline/TimelinePutResponse.java   |6 +-
 .../yarn/api/records/timeline/package-info.java |2 +
 .../hadoop/yarn/client/api/TimelineClient.java  |4 +-
 .../client/api/impl/TimelineClientImpl.java |4 +-
 .../yarn/client/api/impl/package-info.java  |2 +
 .../hadoop/yarn/client/api/package-info.java|2 +
 .../TimelineDelegationTokenIdentifier.java  |4 +-
 .../client/TimelineDelegationTokenSelector.java |4 +-
 .../yarn/server/webapp/dao/AppAttemptInfo.java  |5 +
 .../yarn/server/webapp/dao/AppAttemptsInfo.java |5 +
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |5 +
 .../hadoop/yarn/server/webapp/dao/AppsInfo.java |7 +-
 .../yarn/server/webapp/dao/ContainerInfo.java   |5 +
 .../yarn/server/webapp/dao/ContainersInfo.java  |5 +
 .../src/site/markdown/TimelineServer.md | 1852 --
 25 files changed, 1800 insertions(+), 147 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd0702c/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
index c058021..8326b5f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md
@@ -169,6 +169,7 @@ REST API compatibility corresponds to both the request 
(URLs) and responses to e
 * [NodeManager](../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html)
 * [MR Application 
Master](../../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html)
 * [History Server](../../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html)
+* [Timeline Server v1 REST 
API](../../hadoop-yarn/hadoop-yarn-site/TimelineServer.html)
 
  Policy
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd0702c/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 71defe5..7234881 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -134,6 +134,7 @@
   item name=Introduction 
href=hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html/
   item name=Resource Manager 
href=hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html/
   item name=Node Manager 
href=hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html/
+  item name=Timeline Server 
href=TimelineServer.html#Timeline_Server_REST_API_v1/
 /menu
 
 menu name=Hadoop Compatible File Systems inherit=top

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd0702c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5a858cf..59a3dd2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -414,6 +414,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3489. RMServerUtils.validateResourceRequests should only obtain queue 
 info once. (Varun Saxena via wangda)
 
+YARN-3539. Updated timeline server documentation and marked REST APIs 
evolving.
+(Steve Loughran via zjshen)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd0702c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineDelegationTokenResponse.java

[21/37] hadoop git commit: YARN-2921. Fix MockRM/MockAM#waitForState sleep too long. (Tsuyoshi Ozawa via wangda)

2015-05-14 Thread jitendra
YARN-2921. Fix MockRM/MockAM#waitForState sleep too long. (Tsuyoshi Ozawa via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/341a4768
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/341a4768
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/341a4768

Branch: refs/heads/HDFS-7240
Commit: 341a476812015d0d584b198b451ea9458645a47c
Parents: 93b770f
Author: Wangda Tan wan...@apache.org
Authored: Wed May 13 13:06:07 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed May 13 13:06:07 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../hadoop/yarn/event/AsyncDispatcher.java  |  3 +-
 .../yarn/server/resourcemanager/MockAM.java | 33 
 .../yarn/server/resourcemanager/MockRM.java | 55 ++--
 .../TestApplicationMasterService.java   |  1 +
 .../server/resourcemanager/TestRMRestart.java   | 15 +-
 .../applicationsmanager/TestAMRestart.java  | 20 +++
 7 files changed, 90 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/341a4768/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 59a3dd2..d61f7fc 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -400,6 +400,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3629. NodeID is always printed as null in node manager 
initialization log.
 (nijel via devaraj)
 
+YARN-2921. Fix MockRM/MockAM#waitForState sleep too long. 
+(Tsuyoshi Ozawa via wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/341a4768/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
index d36d841..c54b9c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/AsyncDispatcher.java
@@ -141,7 +141,8 @@ public class AsyncDispatcher extends AbstractService 
implements Dispatcher {
   synchronized (waitForDrained) {
 while (!drained  eventHandlingThread.isAlive()) {
   waitForDrained.wait(1000);
-  LOG.info(Waiting for AsyncDispatcher to drain.);
+  LOG.info(Waiting for AsyncDispatcher to drain. Thread state is : +
+  eventHandlingThread.getState());
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/341a4768/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
index 5c107aa..0e25360 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockAM.java
@@ -43,10 +43,13 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.log4j.Logger;
 import org.junit.Assert;
 
 public class MockAM {
 
+  private static final Logger LOG = Logger.getLogger(MockAM.class);
+
   private volatile int responseId = 0;
   private final ApplicationAttemptId attemptId;
   private RMContext context;
@@ -73,18 +76,28 @@ public class MockAM {
   public void waitForState(RMAppAttemptState finalState) throws Exception {
 RMApp app = context.getRMApps().get(attemptId.getApplicationId());
 RMAppAttempt attempt = app.getRMAppAttempt(attemptId);
-int timeoutSecs = 0;
+final int timeoutMsecs = 4;
+

[16/37] hadoop git commit: HADOOP-9723. Improve error message when hadoop archive output path already exists. Contributed by Jean-Baptiste Onofré and Yongjun Zhang.

2015-05-14 Thread jitendra
HADOOP-9723. Improve error message when hadoop archive output path already 
exists. Contributed by Jean-Baptiste Onofré and Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92c38e41
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92c38e41
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92c38e41

Branch: refs/heads/HDFS-7240
Commit: 92c38e41e1fffb9d60d4fa5d4d2212777af9e9a5
Parents: e82067b
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed May 13 17:28:57 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed May 13 17:28:57 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../org/apache/hadoop/tools/HadoopArchives.java | 13 --
 .../apache/hadoop/tools/TestHadoopArchives.java | 44 +++-
 3 files changed, 54 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92c38e41/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a15444e..2fbecbb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -560,6 +560,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11948. test-patch's issue matching regex should be configurable.
 (Sean Busbey via aw)
 
+HADOOP-9723. Improve error message when hadoop archive output path already
+exists. (Jean-Baptiste Onofré and Yongjun Zhang via aajisak)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92c38e41/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
index c5c42b1..f00bb6d 100644
--- 
a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java
@@ -469,8 +469,13 @@ public class HadoopArchives implements Tool {
 Path outputPath = new Path(dest, archiveName);
 FileOutputFormat.setOutputPath(conf, outputPath);
 FileSystem outFs = outputPath.getFileSystem(conf);
-if (outFs.exists(outputPath) || outFs.isFile(dest)) {
-  throw new IOException(Invalid Output:  + outputPath);
+if (outFs.exists(outputPath)) {
+  throw new IOException(Archive path: 
+  + outputPath.toString() +  already exists);
+}
+if (outFs.isFile(dest)) {
+  throw new IOException(Destination  + dest.toString()
+  +  should be a directory but is a file);
 }
 conf.set(DST_DIR_LABEL, outputPath.toString());
 Path stagingArea;
@@ -846,8 +851,8 @@ public class HadoopArchives implements Tool {
   Path argPath = new Path(args[i]);
   if (argPath.isAbsolute()) {
 System.out.println(usage);
-throw new IOException(source path  + argPath +
- is not relative  to + parentPath);
+throw new IOException(Source path  + argPath +
+ is not relative to + parentPath);
   }
   srcPaths.add(new Path(parentPath, argPath));
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/92c38e41/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
--
diff --git 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
index 3fa5919..101cb06 100644
--- 
a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
+++ 
b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.tools;
 import java.io.ByteArrayOutputStream;
 import java.io.FilterInputStream;
 import java.io.IOException;
+import java.io.OutputStream;
 import java.io.PrintStream;
 import java.net.URI;
 import java.util.ArrayList;
@@ -175,8 +176,47 @@ public class TestHadoopArchives {
 final ListString harPaths = lsr(shell, fullHarPathStr);
 Assert.assertEquals(originalPaths, harPaths);
   }
-  
-@Test
+
+  @Test
+  public void testOutputPathValidity() throws Exception {
+final String inputPathStr = inputPath.toUri().getPath();
+final URI uri = fs.getUri();
+final 

[30/37] hadoop git commit: HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are causing Release Audit Warnings. (Contributed by Ruth Wisniewski)

2015-05-14 Thread jitendra
HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are causing 
Release Audit Warnings. (Contributed by Ruth Wisniewski)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54fa9b42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54fa9b42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54fa9b42

Branch: refs/heads/HDFS-7240
Commit: 54fa9b421797885c1d3f20db215a883c8fca84e1
Parents: 4356e8a
Author: Arpit Agarwal a...@apache.org
Authored: Wed May 13 19:43:53 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Wed May 13 19:43:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hadoop/hdfs/server/namenode/TestHostsFiles.java | 12 +++-
 .../hadoop/hdfs/server/namenode/TestNameNodeMXBean.java | 12 
 3 files changed, 22 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fa9b42/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 932b500..2e51086 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8380. Always call addStoredBlock on blocks which have been shifted
 from one storage to another (cmccabe)
 
+HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are
+causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fa9b42/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
index 1806d82..a93cc2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
@@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.management.ManagementFactory;
+import java.io.File;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
@@ -126,7 +128,12 @@ public class TestHostsFiles {
   assertTrue(Live nodes should contain the decommissioned node,
   nodes.contains(Decommissioned));
 } finally {
-  cluster.shutdown();
+  if (cluster != null) {
+cluster.shutdown();
+  }
+  if (localFileSys.exists(dir)) {
+FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
+  }
 }
   }
 
@@ -167,6 +174,9 @@ public class TestHostsFiles {
   if (cluster != null) {
 cluster.shutdown();
   }
+  if (localFileSys.exists(dir)) {
+FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
+  }
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54fa9b42/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
index c649621..681e8a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.commons.io.FileUtils;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -214,6 +215,8 @@ public class TestNameNodeMXBean {
 conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
 conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
 MiniDFSCluster cluster = null;
+FileSystem localFileSys = null;
+Path dir = null;
 
 try {
   cluster = new 

[04/37] hadoop git commit: MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, introducing compile error (Contributed by Arshad Mohammad)

2015-05-14 Thread jitendra
MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, introducing 
compile error (Contributed by Arshad Mohammad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/360dff59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/360dff59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/360dff59

Branch: refs/heads/HDFS-7240
Commit: 360dff5903085c3c7f02ccf9f17d71a842275e48
Parents: 987abc9
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 12 12:49:16 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 12 12:49:16 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../mapred/TestMapreduceConfigFields.java   | 76 
 .../mapreduce/TestMapreduceConfigFields.java| 76 
 3 files changed, 79 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/360dff59/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f48f847..ca92a97 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -411,6 +411,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5465. Tasks are often killed before they exit on their own
 (Ming Ma via jlowe)
 
+MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, 
+introducing compile error (Arshad Mohammad via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/360dff59/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
deleted file mode 100644
index 7f18714..000
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestMapreduceConfigFields.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapreduce;
-
-import java.util.HashSet;
-
-import org.apache.hadoop.conf.TestConfigurationFieldsBase;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.ShuffleHandler;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
-
-/**
- * Unit test class to compare the following MR Configuration classes:
- * p/p
- * {@link org.apache.hadoop.mapreduce.MRJobConfig}
- * {@link org.apache.hadoop.mapreduce.MRConfig}
- * {@link org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig}
- * {@link org.apache.hadoop.mapred.ShuffleHandler}
- * {@link org.apache.hadoop.mapreduce.lib.output.FileOutputFormat}
- * {@link org.apache.hadoop.mapreduce.lib.input.FileInputFormat}
- * {@link org.apache.hadoop.mapreduce.Job}
- * {@link org.apache.hadoop.mapreduce.lib.input.NLineInputFormat}
- * {@link org.apache.hadoop.mapred.JobConf}
- * p/p
- * against mapred-default.xml for missing properties.  Currently only
- * throws an error if the class is missing a property.
- * p/p
- * Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
- * for how this class works.
- */
-public class TestMapreduceConfigFields extends TestConfigurationFieldsBase {
-
-  @SuppressWarnings(deprecation)
-  @Override
-  public void initializeMemberVariables() {
-

[36/37] hadoop git commit: MAPREDUCE-5708. Duplicate String.format in YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.

2015-05-14 Thread jitendra
MAPREDUCE-5708. Duplicate String.format in
YarnOutputFiles.getSpillFileForWrite. Contributed by Konstantin Weitz.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05ff54c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05ff54c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05ff54c6

Branch: refs/heads/HDFS-7240
Commit: 05ff54c66c49301c4ec2549704d9d459e784572c
Parents: def9136
Author: Devaraj K deva...@apache.org
Authored: Thu May 14 22:09:54 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Thu May 14 22:09:54 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java  | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ff54c6/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 7fe8483..bc1f427 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -422,6 +422,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
 doesn't work. (Takuya Fukudome via ozawa)
 
+MAPREDUCE-5708. Duplicate String.format in 
YarnOutputFiles.getSpillFileForWrite.
+(Konstantin Weitz via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05ff54c6/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
index e08e093..e099b8f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnOutputFiles.java
@@ -157,8 +157,8 @@ public class YarnOutputFiles extends MapOutputFile {
   public Path getSpillFileForWrite(int spillNumber, long size)
   throws IOException {
 return lDirAlloc.getLocalPathForWrite(
-String.format(String.format(SPILL_FILE_PATTERN,
-conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber)), size, conf);
+String.format(SPILL_FILE_PATTERN,
+conf.get(JobContext.TASK_ATTEMPT_ID), spillNumber), size, conf);
   }
 
   /**



[24/37] hadoop git commit: YARN-3521. Support return structured NodeLabel objects in REST API (Sunil G via wangda)

2015-05-14 Thread jitendra
YARN-3521. Support return structured NodeLabel objects in REST API (Sunil G via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f19e7a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f19e7a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f19e7a2

Branch: refs/heads/HDFS-7240
Commit: 7f19e7a2549a098236d06b29b7076bb037533f05
Parents: d4f53fc
Author: Wangda Tan wan...@apache.org
Authored: Wed May 13 13:43:17 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed May 13 13:43:17 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   8 +-
 .../resourcemanager/webapp/NodeIDsInfo.java |   2 +
 .../resourcemanager/webapp/RMWebServices.java   |  71 +++---
 .../webapp/dao/LabelsToNodesInfo.java   |   6 +-
 .../webapp/dao/NodeLabelInfo.java   |  86 +++
 .../webapp/dao/NodeLabelsInfo.java  |  56 -
 .../webapp/dao/NodeToLabelsEntry.java   |  54 +
 .../webapp/dao/NodeToLabelsEntryList.java   |  41 
 .../webapp/dao/NodeToLabelsInfo.java|  12 +-
 .../webapp/TestRMWebServicesNodeLabels.java | 225 ---
 10 files changed, 427 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f19e7a2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 16c2dd9..38f5e81 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -233,6 +233,11 @@ Release 2.8.0 - UNRELEASED
 YARN-3613. TestContainerManagerSecurity should init and start Yarn cluster 
in 
 setup instead of individual methods. (nijel via kasha)
 
+YARN-3579. CommonNodeLabelsManager should support NodeLabel instead of 
string 
+label name when getting node-to-label/label-to-label mappings. (Sunil G 
via wangda)
+
+YARN-3521. Support return structured NodeLabel objects in REST API (Sunil 
G via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -420,9 +425,6 @@ Release 2.7.1 - UNRELEASED
 YARN-3539. Updated timeline server documentation and marked REST APIs 
evolving.
 (Steve Loughran via zjshen)
 
-YARN-3579. CommonNodeLabelsManager should support NodeLabel instead of 
string 
-label name when getting node-to-label/label-to-label mappings. (Sunil G 
via wangda)
-
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f19e7a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index 39d636d..c23b02a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
 @XmlRootElement(name = labelsToNodesInfo)
@@ -32,6 +33,7 @@ public class NodeIDsInfo {
   /**
* Set doesn't support default no arg constructor which is req by JAXB
*/
+  @XmlElement(name=nodes)
   protected ArrayListString nodeIDsList = new ArrayListString();
 
   public NodeIDsInfo() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f19e7a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 4ce2b54..a0a3123 100644
--- 

[20/37] hadoop git commit: HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when executed through hadoop-daemon.sh. Contributed by Chris Nauroth.

2015-05-14 Thread jitendra
HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when executed 
through hadoop-daemon.sh. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93b770f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93b770f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93b770f7

Branch: refs/heads/HDFS-7240
Commit: 93b770f7e778835a9dd76854b435c5250835d1a8
Parents: f9a46a0
Author: cnauroth cnaur...@apache.org
Authored: Wed May 13 12:25:06 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed May 13 12:25:06 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93b770f7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2fbecbb..207d144 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -703,6 +703,9 @@ Release 2.7.1 - UNRELEASED
 HADOOP-11663. Remove description about Java 6 from docs.
 (Masatake Iwasaki via aajisaka)
 
+HADOOP-11966. Variable cygwin is undefined in hadoop-config.sh when 
executed
+through hadoop-daemon.sh. (cnauroth)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[05/37] hadoop git commit: YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug log for overall resource usage by all containers. Contributed by Naganarasimha G R.

2015-05-14 Thread jitendra
YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
log for overall resource usage by all containers.  Contributed by
Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8badd82c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8badd82c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8badd82c

Branch: refs/heads/HDFS-7240
Commit: 8badd82ce256e4dc8c234961120d62a88358ab39
Parents: 360dff5
Author: Devaraj K deva...@apache.org
Authored: Tue May 12 16:54:38 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Tue May 12 16:54:38 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../monitor/ContainersMonitorImpl.java  | 27 ++--
 2 files changed, 22 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8badd82c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2412dce..d5fc259 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -227,6 +227,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc. 
 projects. (Gabor Liptak via junping_du)
 
+YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
+log for overall resource usage by all containers. (Naganarasimha G R via 
devaraj)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8badd82c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 76bbda1..d1e5e01 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -389,8 +389,10 @@ public class ContainersMonitorImpl extends AbstractService 
implements
 
 // Now do the monitoring for the trackingContainers
 // Check memory usage and kill any overflowing containers
-long vmemStillInUsage = 0;
-long pmemStillInUsage = 0;
+long vmemUsageByAllContainers = 0;
+long pmemByAllContainers = 0;
+long cpuUsagePercentPerCoreByAllContainers = 0;
+long cpuUsageTotalCoresByAllContainers = 0;
 for (IteratorMap.EntryContainerId, ProcessTreeInfo it =
 trackingContainers.entrySet().iterator(); it.hasNext();) {
 
@@ -504,6 +506,13 @@ public class ContainersMonitorImpl extends AbstractService 
implements
   containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
 }
 
+// Accounting the total memory in usage for all containers
+vmemUsageByAllContainers += currentVmemUsage;
+pmemByAllContainers += currentPmemUsage;
+// Accounting the total cpu usage for all containers
+cpuUsagePercentPerCoreByAllContainers += cpuUsagePercentPerCore;
+cpuUsageTotalCoresByAllContainers += cpuUsagePercentPerCore;
+
 if (isMemoryOverLimit) {
   // Virtual or physical memory over limit. Fail the container and
   // remove
@@ -520,12 +529,6 @@ public class ContainersMonitorImpl extends AbstractService 
implements
   containerExitStatus, msg));
   it.remove();
   LOG.info(Removed ProcessTree with root  + pId);
-} else {
-  // Accounting the total memory in usage for all containers that
-  // are still
-  // alive and within limits.
-  vmemStillInUsage += currentVmemUsage;
-  pmemStillInUsage += currentPmemUsage;
 }
   } catch (Exception e) {
 // Log the exception and proceed to the next container.
@@ -533,6 +536,14 @@ public class ContainersMonitorImpl extends 

[31/37] hadoop git commit: HDFS-7728. Avoid updating quota usage while loading edits. Contributed by Jing Zhao.

2015-05-14 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c85db8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 0788e75..62aaccd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -45,21 +45,23 @@ public class FileDiffList extends
 
   public void destroyAndCollectSnapshotBlocks(
   BlocksMapUpdateInfo collectedBlocks) {
-for(FileDiff d : asList())
+for (FileDiff d : asList()) {
   d.destroyAndCollectSnapshotBlocks(collectedBlocks);
+}
   }
 
   public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile,
   INodeFileAttributes snapshotCopy, boolean withBlocks) {
 final FileDiff diff =
 super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
-if(withBlocks)  // Store blocks if this is the first update
+if (withBlocks) {  // Store blocks if this is the first update
   diff.setBlocks(iNodeFile.getBlocks());
+}
   }
 
   public BlockInfoContiguous[] findEarlierSnapshotBlocks(int snapshotId) {
 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : Wrong snapshot id;
-if(snapshotId == Snapshot.CURRENT_STATE_ID) {
+if (snapshotId == Snapshot.CURRENT_STATE_ID) {
   return null;
 }
 ListFileDiff diffs = this.asList();
@@ -76,15 +78,15 @@ public class FileDiffList extends
 
   public BlockInfoContiguous[] findLaterSnapshotBlocks(int snapshotId) {
 assert snapshotId != Snapshot.NO_SNAPSHOT_ID : Wrong snapshot id;
-if(snapshotId == Snapshot.CURRENT_STATE_ID) {
+if (snapshotId == Snapshot.CURRENT_STATE_ID) {
   return null;
 }
 ListFileDiff diffs = this.asList();
 int i = Collections.binarySearch(diffs, snapshotId);
 BlockInfoContiguous[] blocks = null;
-for(i = i = 0 ? i+1 : -i-1; i  diffs.size(); i++) {
+for (i = i = 0 ? i+1 : -i-1; i  diffs.size(); i++) {
   blocks = diffs.get(i).getBlocks();
-  if(blocks != null) {
+  if (blocks != null) {
 break;
   }
 }
@@ -99,7 +101,7 @@ public class FileDiffList extends
   void combineAndCollectSnapshotBlocks(
   INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
 BlockInfoContiguous[] removedBlocks = removed.getBlocks();
-if(removedBlocks == null) {
+if (removedBlocks == null) {
   FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
   assert sf != null : FileWithSnapshotFeature is null;
   if(sf.isCurrentFileDeleted())
@@ -109,8 +111,9 @@ public class FileDiffList extends
 int p = getPrior(removed.getSnapshotId(), true);
 FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : 
getDiffById(p);
 // Copy blocks to the previous snapshot if not set already
-if(earlierDiff != null)
+if (earlierDiff != null) {
   earlierDiff.setBlocks(removedBlocks);
+}
 BlockInfoContiguous[] earlierBlocks =
 (earlierDiff == null ? new BlockInfoContiguous[]{} : 
earlierDiff.getBlocks());
 // Find later snapshot (or file itself) with blocks
@@ -128,13 +131,13 @@ public class FileDiffList extends
 // Check if last block is part of truncate recovery
 BlockInfoContiguous lastBlock = file.getLastBlock();
 Block dontRemoveBlock = null;
-if(lastBlock != null  lastBlock.getBlockUCState().equals(
+if (lastBlock != null  lastBlock.getBlockUCState().equals(
 HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
   dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
   .getTruncateBlock();
 }
 // Collect the remaining blocks of the file, ignoring truncate block
-for(;i  removedBlocks.length; i++) {
+for (;i  removedBlocks.length; i++) {
   if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) 
{
 reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c85db8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileWithSnapshotFeature.java
index 213c186..555a662 100644
--- 

[12/37] hadoop git commit: HDFS-6184. Capture NN's thread dump when it fails over. Contributed by Ming Ma.

2015-05-14 Thread jitendra
HDFS-6184. Capture NN's thread dump when it fails over. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2463666e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2463666e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2463666e

Branch: refs/heads/HDFS-7240
Commit: 2463666ecb553dbde1b8c540a21ad3d599239acf
Parents: f24452d
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed May 13 11:37:22 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Wed May 13 11:37:22 2015 +0900

--
 .../apache/hadoop/ha/ZKFailoverController.java  |   5 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +-
 .../hdfs/tools/DFSZKFailoverController.java |  60 +
 .../src/main/resources/hdfs-default.xml |  11 +
 .../ha/TestDFSZKFailoverController.java | 226 -
 .../hdfs/tools/TestDFSZKFailoverController.java | 243 +++
 7 files changed, 322 insertions(+), 230 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2463666e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 9eb1ff8..788d48e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -844,12 +844,11 @@ public abstract class ZKFailoverController {
* @return the last health state passed to the FC
* by the HealthMonitor.
*/
-  @VisibleForTesting
-  synchronized State getLastHealthState() {
+  protected synchronized State getLastHealthState() {
 return lastHealthState;
   }
 
-  private synchronized void setLastHealthState(HealthMonitor.State newState) {
+  protected synchronized void setLastHealthState(HealthMonitor.State newState) 
{
 LOG.info(Local service  + localTarget +
  entered state:  + newState);
 lastHealthState = newState;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2463666e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd477af..135b50c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -546,6 +546,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8255. Rename getBlockReplication to getPreferredBlockReplication.
 (Contributed by Zhe Zhang)
 
+HDFS-6184. Capture NN's thread dump when it fails over.
+(Ming Ma via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2463666e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 4356b9b..f8e9f3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -543,7 +543,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT = false;
   public static final String DFS_HA_ZKFC_PORT_KEY = dfs.ha.zkfc.port;
   public static final int DFS_HA_ZKFC_PORT_DEFAULT = 8019;
-  
+  public static final String DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY = 
dfs.ha.zkfc.nn.http.timeout.ms;
+  public static final int DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY_DEFAULT = 2;
+
   // Security-related configs
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = 
dfs.encrypt.data.transfer;
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2463666e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
 

[33/37] hadoop git commit: HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by Suresh Srinivas)

2015-05-14 Thread jitendra
HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by 
Suresh Srinivas)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0daa5ada
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0daa5ada
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0daa5ada

Branch: refs/heads/HDFS-7240
Commit: 0daa5ada68db483275aaa7f2ed9a2b5eaf5bb9bd
Parents: b2c85db
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 14:17:36 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 14:17:36 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../src/main/java/org/apache/hadoop/fs/Path.java| 12 ++--
 2 files changed, 5 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0daa5ada/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5e6b9ea..bf39c94 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -677,6 +677,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-8174. Remove confusing comment in Path#isAbsolute()
+(Suresh Srinivas via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0daa5ada/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index caeb7a1..a38a46c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /** Names a file or directory in a {@link FileSystem}.
- * Path strings use slash as the directory separator.  A path string is
- * absolute if it begins with a slash.
+ * Path strings use slash as the directory separator.
  */
 @Stringable
 @InterfaceAudience.Public
@@ -312,14 +311,7 @@ public class Path implements Comparable {
 return uri.getPath().startsWith(SEPARATOR, start);
}
   
-  /** True if the path component of this URI is absolute. */
-  /**
-   * There is some ambiguity here. An absolute path is a slash
-   * relative name without a scheme or an authority.
-   * So either this method was incorrectly named or its
-   * implementation is incorrect. This method returns true
-   * even if there is a scheme and authority.
-   */
+  /** True if the path is not a relative path and starts with root. */
   public boolean isAbsolute() {
  return isUriPathAbsolute();
   }



[17/37] hadoop git commit: HDFS-6300. Prevent multiple balancers from running simultaneously (Contributed by Rakesh R)

2015-05-14 Thread jitendra
HDFS-6300. Prevent multiple balancers from running simultaneously (Contributed 
by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/065d8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/065d8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/065d8f2a

Branch: refs/heads/HDFS-7240
Commit: 065d8f2a34296b566e7ca541a284f7991212f14c
Parents: 92c38e4
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed May 13 17:27:34 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed May 13 17:27:34 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/balancer/NameNodeConnector.java | 16 +++-
 .../hdfs/server/balancer/TestBalancer.java  | 77 
 3 files changed, 92 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/065d8f2a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 135b50c..4fabf97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -826,6 +826,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (Rushabh S Shah  via kihwal)
 
+HDFS-6300. Prevent multiple balancers from running simultaneously
+(Rakesh R via vinayakumarb)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/065d8f2a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index cf5f36f..2e4f214 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -219,12 +219,20 @@ public class NameNodeConnector implements Closeable {
*/
   private OutputStream checkAndMarkRunning() throws IOException {
 try {
-  final FSDataOutputStream out = fs.create(idPath);
+  if (fs.exists(idPath)) {
+// try appending to it so that it will fail fast if another balancer is
+// running.
+IOUtils.closeStream(fs.append(idPath));
+fs.delete(idPath, true);
+  }
+  final FSDataOutputStream fsout = fs.create(idPath, false);
+  // mark balancer idPath to be deleted during filesystem closure
+  fs.deleteOnExit(idPath);
   if (write2IdFile) {
-out.writeBytes(InetAddress.getLocalHost().getHostName());
-out.hflush();
+fsout.writeBytes(InetAddress.getLocalHost().getHostName());
+fsout.hflush();
   }
-  return out;
+  return fsout;
 } catch(RemoteException e) {
   
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
 return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/065d8f2a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index edffb82..e756f0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -28,6 +28,7 @@ import static org.junit.Assume.assumeTrue;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintWriter;
+import java.net.InetAddress;
 import java.net.URI;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -45,6 +46,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
@@ -1370,6 +1372,81 @@ public class TestBalancer {
   cluster.shutdown();
 }
   }
+
+  /**
+   * Test running many balancer 

[03/37] hadoop git commit: HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java (Contributed by Arshad Mohammad)

2015-05-14 Thread jitendra
HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java (Contributed by 
Arshad Mohammad)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/987abc99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/987abc99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/987abc99

Branch: refs/heads/HDFS-7240
Commit: 987abc99b0309a07f0a342746b2a5048d5c36ce0
Parents: 3d28611
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 12 12:09:13 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 12 12:09:13 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/987abc99/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b67caed..7cff8d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -738,6 +738,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8351. Remove namenode -finalize option from document. (aajisaka)
 
+HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java
+(Arshad Mohammad via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/987abc99/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 0e75d81..a1f8a3c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.hdfs.tools;
+package org.apache.hadoop.tools;
 
 import java.util.HashSet;
 



[08/37] hadoop git commit: YARN-3629. NodeID is always printed as null in node manager initialization log. Contributed by nijel.

2015-05-14 Thread jitendra
YARN-3629. NodeID is always printed as null in node manager
initialization log. Contributed by nijel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c2f05cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c2f05cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c2f05cd

Branch: refs/heads/HDFS-7240
Commit: 5c2f05cd9bad9bf9beb0f4ca18f4ae1bc3e84499
Parents: f4e2b3c
Author: Devaraj K deva...@apache.org
Authored: Tue May 12 22:20:25 2015 +0530
Committer: Devaraj K deva...@apache.org
Committed: Tue May 12 22:20:25 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2f05cd/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d5fc259..131161f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -394,6 +394,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3602. 
TestResourceLocalizationService.testPublicResourceInitializesLocalDir
 fails Intermittently due to IOException from cleanup. (zhihai xu via xgong)
 
+YARN-3629. NodeID is always printed as null in node manager 
initialization log.
+(nijel via devaraj)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2f05cd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index b1ab5f1..0eb7ff4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -199,7 +199,7 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 + durationToTrackStoppedContainers);
 }
 super.serviceInit(conf);
-LOG.info(Initialized nodemanager for  + nodeId + : +
+LOG.info(Initialized nodemanager with : +
  physical-memory= + memoryMb +  virtual-memory= + virtualMemoryMb +
  virtual-cores= + virtualCores);
 
@@ -213,6 +213,7 @@ public class NodeStatusUpdaterImpl extends AbstractService 
implements
 
 // NodeManager is the last service to start, so NodeId is available.
 this.nodeId = this.context.getNodeId();
+LOG.info(Node ID assigned is :  + this.nodeId);
 this.httpPort = this.context.getHttpPort();
 this.nodeManagerVersionId = YarnVersionInfo.getVersion();
 try {



[11/37] hadoop git commit: MAPREDUCE-6251. Added a new config for JobClient to retry JobStatus calls so that they don't fail on history-server backed by DFSes with not so strong guarantees. Contribute

2015-05-14 Thread jitendra
MAPREDUCE-6251. Added a new config for JobClient to retry JobStatus calls so 
that they don't fail on history-server backed by DFSes with not so strong 
guarantees. Contributed by Craig Welch.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24452d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24452d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24452d1

Branch: refs/heads/HDFS-7240
Commit: f24452d14e9ba48cdb82e5e6e5c10ce5b1407308
Parents: fe0df59
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue May 12 12:11:42 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Tue May 12 12:11:42 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  5 ++
 .../org/apache/hadoop/mapred/JobClient.java | 51 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java| 15 +
 .../src/main/resources/mapred-default.xml   | 17 +
 .../apache/hadoop/mapred/JobClientUnitTest.java | 65 
 5 files changed, 142 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24452d1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 15cdf90..fc98376 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -328,6 +328,7 @@ Release 2.8.0 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
+
 MAPREDUCE-6314. TestPipeApplication fails on trunk.
 (Varun Vasudev via harsh)
 
@@ -450,6 +451,10 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6259. IllegalArgumentException due to missing job submit time
 (zhihai xu via jlowe)
 
+MAPREDUCE-6251. Added a new config for JobClient to retry JobStatus calls 
so
+that they don't fail on history-server backed by DFSes with not so strong
+guarantees. (Craig Welch via vinodkv)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24452d1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index e91fbfe..cf123c7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
 import org.apache.hadoop.mapreduce.Cluster;
 import org.apache.hadoop.mapreduce.ClusterMetrics;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TaskType;
@@ -154,6 +155,10 @@ public class JobClient extends CLI {
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
   private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
   
+  private int maxRetry = MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES;
+  private long retryInterval =
+  MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL;
+
   static{
 ConfigUtil.loadResources();
   }
@@ -469,6 +474,14 @@ public class JobClient extends CLI {
 setConf(conf);
 cluster = new Cluster(conf);
 clientUgi = UserGroupInformation.getCurrentUser();
+
+maxRetry = conf.getInt(MRJobConfig.MR_CLIENT_JOB_MAX_RETRIES,
+  MRJobConfig.DEFAULT_MR_CLIENT_JOB_MAX_RETRIES);
+
+retryInterval =
+  conf.getLong(MRJobConfig.MR_CLIENT_JOB_RETRY_INTERVAL,
+MRJobConfig.DEFAULT_MR_CLIENT_JOB_RETRY_INTERVAL);
+
   }
 
   /**
@@ -581,16 +594,8 @@ public class JobClient extends CLI {
   }
 });
   }
-  /**
-   * Get an {@link RunningJob} object to track an ongoing job.  Returns
-   * null if the id does not correspond to any known job.
-   * 
-   * @param jobid the jobid of the job.
-   * @return the {@link RunningJob} handle to track the job, null if the 
-   * codejobid/code doesn't correspond to any known job.
-   * @throws IOException
-   */
-  public RunningJob getJob(final JobID jobid) throws IOException {
+
+  protected RunningJob getJobInner(final JobID jobid) throws IOException {
 try {
   
   Job job = 

[28/37] hadoop git commit: YARN-3362. Add node label usage in RM CapacityScheduler web UI. (Naganarasimha G R via wangda)

2015-05-14 Thread jitendra
YARN-3362. Add node label usage in RM CapacityScheduler web UI. (Naganarasimha 
G R via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e85044e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e85044e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e85044e

Branch: refs/heads/HDFS-7240
Commit: 0e85044e26da698c45185585310ae0e99448cd80
Parents: 281d47a
Author: Wangda Tan wan...@apache.org
Authored: Wed May 13 17:00:36 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed May 13 17:00:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../scheduler/capacity/AbstractCSQueue.java |   2 +-
 .../webapp/CapacitySchedulerPage.java   | 161 +++
 .../resourcemanager/webapp/RMWebServices.java   |   6 +-
 .../webapp/dao/CapacitySchedulerInfo.java   |  37 +++--
 .../dao/CapacitySchedulerLeafQueueInfo.java |   4 +-
 .../webapp/dao/CapacitySchedulerQueueInfo.java  |  29 ++--
 .../capacity/TestCapacityScheduler.java |   9 +-
 8 files changed, 191 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e85044e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index af8d26f..0346c54 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -238,6 +238,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3521. Support return structured NodeLabel objects in REST API (Sunil 
G via wangda)
 
+YARN-3362. Add node label usage in RM CapacityScheduler web UI.
+(Naganarasimha G R via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e85044e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 8b4637a..cd5bd8d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -556,7 +556,7 @@ public abstract class AbstractCSQueue implements CSQueue {
 queueUsage, nodePartition, cluster, schedulingMode);
   }
   
-  boolean accessibleToPartition(String nodePartition) {
+  public boolean accessibleToPartition(String nodePartition) {
 // if queue's label is *, it can access any node
 if (accessibleLabels != null
  accessibleLabels.contains(RMNodeLabelsManager.ANY)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e85044e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index 4381a34..255150e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -22,14 +22,18 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
 import 

[22/37] hadoop git commit: YARN-3626. On Windows localized resources are not moved to the front of the classpath when they should be. Contributed by Craig Welch

2015-05-14 Thread jitendra
YARN-3626. On Windows localized resources are not moved to the front of the 
classpath when they should be. Contributed by Craig Welch


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f959214
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f959214
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f959214

Branch: refs/heads/HDFS-7240
Commit: 0f95921447ea547bdf9caf18f7fde46bc66031f8
Parents: 341a476
Author: Xuan xg...@apache.org
Authored: Wed May 13 13:10:53 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Wed May 13 13:10:53 2015 -0700

--
 .../apache/hadoop/mapreduce/v2/util/MRApps.java |   7 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 ++
 .../launcher/ContainerLaunch.java   |  35 -
 .../launcher/TestContainerLaunch.java   | 129 +++
 5 files changed, 181 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f959214/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 6b115b3..6d82104 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -241,7 +241,12 @@ public class MRApps extends Apps {
 boolean userClassesTakesPrecedence = 
   conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
 
-String classpathEnvVar = 
+if (userClassesTakesPrecedence) {
+  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH_PREPEND_DISTCACHE,
+true);
+}
+
+String classpathEnvVar =
   conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, false)
 ? Environment.APP_CLASSPATH.name() : Environment.CLASSPATH.name();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f959214/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d61f7fc..e5d5ecf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -480,6 +480,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3493. RM fails to come up with error Failed to load/recover state
 when mem settings are changed. (Jian He via wangda)
 
+YARN-3626. On Windows localized resources are not moved to the front
+of the classpath when they should be. (Craig Welch via xgong)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f959214/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 48a75c0..94f3e60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1249,6 +1249,16 @@ public class YarnConfiguration extends Configuration {
   + application.classpath;
 
   /**
+   * Whether or not entries from the distributed cache should be preferred over
+   * the rest of the YARN CLASSPATH
+   */
+  public static final String YARN_APPLICATION_CLASSPATH_PREPEND_DISTCACHE =
+YARN_PREFIX + application.classpath.prepend.distcache;
+
+  public static final boolean
+DEFAULT_YARN_APPLICATION_CLASSPATH_PREPEND_DISTCACHE = false;
+
+  /**
* Default platform-agnostic CLASSPATH for YARN applications. A
* comma-separated list of CLASSPATH entries. The parameter expansion marker
* will be replaced with real parameter expansion marker ('%' for Windows and


[07/37] hadoop git commit: MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between copySucceeded() in one thread and copyFailed() in another thread on the same host. Contributed by Jun

2015-05-14 Thread jitendra
MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between 
copySucceeded() in one thread and copyFailed() in another thread on the same 
host. Contributed by Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4e2b3cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4e2b3cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4e2b3cc

Branch: refs/heads/HDFS-7240
Commit: f4e2b3cc0b1f4e49c306bc09a90495225bb2
Parents: 6d5da94
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 13 00:28:17 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed May 13 00:28:17 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|  4 ++
 .../task/reduce/ShuffleSchedulerImpl.java   | 14 +++-
 .../task/reduce/TestShuffleScheduler.java   | 70 
 3 files changed, 85 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4e2b3cc/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ca92a97..15cdf90 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -414,6 +414,10 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6360. TestMapreduceConfigFields is placed in wrong dir, 
 introducing compile error (Arshad Mohammad via vinayakumarb)
 
+MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+copySucceeded() in one thread and copyFailed() in another thread on the
+same host. (Junping Du via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4e2b3cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
index 8317672..ff0bb4f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleSchedulerImpl.java
@@ -239,7 +239,7 @@ public class ShuffleSchedulerImplK,V implements 
ShuffleSchedulerK,V {
   }
   
   private void updateStatus() {
-updateStatus(null);
+updateStatus(null);
   }
 
   public synchronized void hostFailed(String hostname) {
@@ -263,9 +263,17 @@ public class ShuffleSchedulerImplK,V implements 
ShuffleSchedulerK,V {
   failureCounts.put(mapId, new IntWritable(1));
 }
 String hostname = host.getHostName();
+IntWritable hostFailedNum = hostFailures.get(hostname);
+// MAPREDUCE-6361: hostname could get cleanup from hostFailures in another
+// thread with copySucceeded.
+// In this case, add back hostname to hostFailures to get rid of NPE issue.
+if (hostFailedNum == null) {
+  hostFailures.put(hostname, new IntWritable(1));
+}
 //report failure if already retried maxHostFailures times
-boolean hostFail = hostFailures.get(hostname).get()  getMaxHostFailures() 
? true : false;
-
+boolean hostFail = hostFailures.get(hostname).get() 
+getMaxHostFailures() ? true : false;
+
 if (failures = abortFailureLimit) {
   try {
 throw new IOException(failures +  failures downloading  + mapId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4e2b3cc/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
index 6ac2320..654b748 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestShuffleScheduler.java
+++ 

[32/37] hadoop git commit: HDFS-7728. Avoid updating quota usage while loading edits. Contributed by Jing Zhao.

2015-05-14 Thread jitendra
HDFS-7728. Avoid updating quota usage while loading edits. Contributed by Jing 
Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2c85db8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2c85db8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2c85db8

Branch: refs/heads/HDFS-7240
Commit: b2c85db86c9a62b0a03ee87547265077f664970a
Parents: 54fa9b4
Author: Haohui Mai whe...@apache.org
Authored: Wed May 13 21:50:35 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed May 13 21:50:35 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../hdfs/server/namenode/FSDirDeleteOp.java |  61 -
 .../hdfs/server/namenode/FSDirRenameOp.java |  24 ++--
 .../hdfs/server/namenode/FSDirSnapshotOp.java   |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |  18 +++
 .../hdfs/server/namenode/FSEditLogLoader.java   |   7 +-
 .../hadoop/hdfs/server/namenode/FSImage.java|   4 +-
 .../hadoop/hdfs/server/namenode/INode.java  | 136 ---
 .../hdfs/server/namenode/INodeDirectory.java|  81 ++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  83 ++-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |  11 +-
 .../hdfs/server/namenode/INodeReference.java| 118 
 .../hdfs/server/namenode/INodeSymlink.java  |  24 +---
 .../hdfs/server/namenode/QuotaCounts.java   |   6 +-
 .../namenode/snapshot/AbstractINodeDiff.java|  12 +-
 .../snapshot/AbstractINodeDiffList.java |  23 ++--
 .../snapshot/DirectorySnapshottableFeature.java |  15 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  | 117 +++-
 .../hdfs/server/namenode/snapshot/FileDiff.java |  20 +--
 .../server/namenode/snapshot/FileDiffList.java  |  23 ++--
 .../snapshot/FileWithSnapshotFeature.java   |  27 ++--
 .../namenode/snapshot/SnapshotManager.java  |  14 +-
 .../server/namenode/TestQuotaByStorageType.java |  21 ++-
 .../snapshot/TestFileWithSnapshotFeature.java   |   8 +-
 .../snapshot/TestRenameWithSnapshots.java   |   1 +
 .../namenode/snapshot/TestSnapshotDeletion.java |  12 +-
 .../namenode/snapshot/TestSnapshotManager.java  |   5 +-
 27 files changed, 465 insertions(+), 417 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c85db8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2e51086..0e6508b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -755,6 +755,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are
 causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal)
 
+HDFS-7728. Avoid updating quota usage while loading edits.
+(Jing Zhao via wheat9)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2c85db8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
index f99e50c..962f4b4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 import java.io.IOException;
@@ -39,24 +40,26 @@ class FSDirDeleteOp {
* @param removedINodes INodes that should be removed from inodeMap
* @return the number of files that have been removed
*/
-  static long delete(
-  FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
-  ListINode removedINodes, ListLong removedUCFiles,
-  long mtime) throws IOException {
+  static long delete(FSDirectory fsd, INodesInPath iip,
+  BlocksMapUpdateInfo collectedBlocks, ListINode removedINodes,
+  ListLong removedUCFiles, long mtime) throws IOException {
 if (NameNode.stateChangeLog.isDebugEnabled()) {
   NameNode.stateChangeLog.debug(DIR* 

[26/37] hadoop git commit: YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when exceptions happen in stopping NM's sub-services. Contributed by Junping Du

2015-05-14 Thread jitendra
YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when 
exceptions happen in stopping NM's sub-services. Contributed by Junping Du


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/711d77cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/711d77cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/711d77cc

Branch: refs/heads/HDFS-7240
Commit: 711d77cc54a64b2c3db70bdacc6bf2245c896a4b
Parents: f7de619
Author: Jason Lowe jl...@apache.org
Authored: Wed May 13 21:06:47 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Wed May 13 21:06:47 2015 +

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 .../hadoop/yarn/server/nodemanager/NodeManager.java  | 11 ---
 2 files changed, 11 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/711d77cc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 146690b..af8d26f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -488,6 +488,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
 invoked (Brahma Reddy Battula via jlowe)
 
+YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when
+exceptions happen in stopping NM's sub-services. (Junping Du via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/711d77cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index 4ac06d0..03e17c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -326,9 +326,14 @@ public class NodeManager extends CompositeService
 if (isStopping.getAndSet(true)) {
   return;
 }
-super.serviceStop();
-stopRecoveryStore();
-DefaultMetricsSystem.shutdown();
+try {
+  super.serviceStop();
+  DefaultMetricsSystem.shutdown();
+} finally {
+  // YARN-3641: NM's services stop get failed shouldn't block the
+  // release of NMLevelDBStore.
+  stopRecoveryStore();
+}
   }
 
   public String getName() {



[13/37] hadoop git commit: YARN-3539. Updated timeline server documentation and marked REST APIs evolving. Contributed by Steve Loughran.

2015-05-14 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fcd0702c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index cb8a5d3..acdd8ff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -12,8 +12,8 @@
   limitations under the License. See accompanying LICENSE file.
 --
 
-YARN Timeline Server
-
+The YARN Timeline Server
+
 
 * [Overview](#Overview)
 * [Introduction](#Introduction)
@@ -21,111 +21,188 @@ YARN Timeline Server
 * [Timeline Structure](#Timeline_Structure)
 * [Deployment](#Deployment)
 * [Configurations](#Configurations)
-* [Running Timeline server](#Running_Timeline_server)
+* [Running the Timeline Server](#Running_Timeline_Server)
 * [Accessing generic-data via 
command-line](#Accessing_generic-data_via_command-line)
 * [Publishing of application specific 
data](#Publishing_of_application_specific_data)
+* [Timeline Server REST API](#Timeline_Server_REST_API_v1)
+* [Generic Data REST APIs](#GENERIC_DATA_REST_APIS)
 
-Overview
+a name=Overview/aOverview
 -
 
-### Introduction  
+### a name=Introduction/aIntroduction
 
- Storage and retrieval of application's current as well as historic 
information in a generic fashion is solved in YARN through the Timeline Server. 
This serves two responsibilities:
+The Storage and retrieval of application's current and historic information in 
a
+generic fashion is addressed in YARN through the Timeline Server. It has two 
responsibilities:
 
- Application specific information
+ Persisting Application Specific Information
 
-  Supports collection of information completely specific to an application or 
framework. For example, Hadoop MapReduce framework can include pieces of 
information like number of map tasks, reduce tasks, counters etc. Application 
developers can publish the specific information to the Timeline server via 
TimelineClient, the ApplicationMaster and/or the application's containers. This 
information is then queryable via REST APIs for rendering by 
application/framework specific UIs.
+The collection and retrieval of information completely specific to an 
application or framework.
+For example, the Hadoop MapReduce framework can include pieces of information 
like number of
+map tasks, reduce tasks, counters...etc.
+Application developers can publish the specific information to the Timeline 
server via `TimelineClient`
+in the Application Master and/or the application's containers.
 
- Generic information about completed applications
-  
-  Previously this was done by Application History Server but with  timeline 
server its just one use case of Timeline server functionality. Generic 
information includes application level data like queue-name, user information 
etc in the ApplicationSubmissionContext, list of application-attempts that ran 
for an application, information about each application-attempt, list of 
containers run under each application-attempt, and information about each 
container. Generic data is published by ResourceManager to the timeline store 
and used by the web-UI to display information about completed applications.
- 
+This information is then queryable via REST APIs for rendering by 
application/framework specific UIs.
 
-### Current Status
+ Persisting Generic Information about Completed Applications
 
-  The essential functionality of the timeline server have been completed and 
it can work in both secure and non secure modes. The generic history service is 
also built on timeline store. In subsequent releases we will be rolling out 
next generation timeline service which is scalable and reliable. Currently, 
Application specific information is only available via RESTful APIs using JSON 
type content. The ability to install framework specific UIs in YARN is not 
supported yet.
+Previously this was supported purely for MapReduce jobs by the Application 
History Server.
+With the introduction of the timeline server, the Application History Server 
becomes just one use of
+the Timeline Server.
 
-### Timeline Structure
+Generic information includes application level data such as 
+* queue-name, 
+* user information and the like set in the `ApplicationSubmissionContext`,
+* a list of application-attempts that ran for an application
+* information about each application-attempt
+* the list of containers run under each application-attempt
+* information about each container.
 
-![Timeline Structure] (./images/timeline_structure.jpg)
+Generic data is published by the YARN Resource Manager to the timeline 

[35/37] hadoop git commit: HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed by J.Andreina)

2015-05-14 Thread jitendra
HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed 
by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def9136e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def9136e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def9136e

Branch: refs/heads/HDFS-7240
Commit: def9136e0259e118e6fd7b656260765d28ac9ae6
Parents: ffbb574
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 15:54:51 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 15:54:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java   |  8 
 .../apache/hadoop/hdfs/TestGetFileChecksum.java  | 19 +++
 .../snapshot/TestSnapshotFileLength.java | 17 +
 4 files changed, 43 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0e6508b..4df18ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -758,6 +758,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7728. Avoid updating quota usage while loading edits.
 (Jing Zhao via wheat9)
 
+HDFS-8150. Make getFileChecksum fail for blocks under construction
+(J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 191ebc9..7908451 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1872,6 +1872,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 ListLocatedBlock locatedblocks = blockLocations.getLocatedBlocks();
 final DataOutputBuffer md5out = new DataOutputBuffer();
 int bytesPerCRC = -1;
@@ -1891,6 +1895,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 locatedblocks = blockLocations.getLocatedBlocks();
 refetchBlocks = false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
index 0e56ba7..814261f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.junit.After;
@@ -68,6 +73,20 @@ public class TestGetFileChecksum {
   }
 
   @Test
+  public void testGetFileChecksumForBlocksUnderConstruction() {
+try {
+  FSDataOutputStream file = dfs.create(new Path(/testFile));
+  file.write(Performance Testing.getBytes());
+  dfs.getFileChecksum(new Path(/testFile));
+  fail(getFileChecksum should fail for files 
+  + with blocks under construction);
+} catch (IOException ie) {
+  Assert.assertTrue(ie.getMessage().contains(
+  Fail to get checksum, since file /testFile 

[19/37] hadoop git commit: HDFS-8358. TestTraceAdmin fails. Contributed by Masatake Iwasaki.

2015-05-14 Thread jitendra
HDFS-8358. TestTraceAdmin fails. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9a46a00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9a46a00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9a46a00

Branch: refs/heads/HDFS-7240
Commit: f9a46a00d2020c6d6466fbc829ada0521cb78dc0
Parents: cdec12d
Author: Kihwal Lee kih...@apache.org
Authored: Wed May 13 14:15:27 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Wed May 13 14:15:27 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/tracing/TestTraceAdmin.java  | 8 
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a46a00/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4fabf97..f4e40b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -747,6 +747,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8362. Java Compilation Error in TestHdfsConfigFields.java
 (Arshad Mohammad via vinayakumarb)
 
+HDFS-8358. TestTraceAdmin fails (Masatake Iwasaki via kihwal)
+
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9a46a00/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
index 4a102a3..acd0dbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
@@ -72,10 +72,10 @@ public class TestTraceAdmin {
   Assert.assertEquals(ret:0, [no span receivers found] + NEWLINE,
   runTraceCommand(trace, -list, -host, getHostPortForNN(cluster)));
   Assert.assertEquals(ret:0, Added trace span receiver 1 with  +
-  configuration local-file-span-receiver.path =  + tracePath + 
NEWLINE,
+  configuration dfs.htrace.local-file-span-receiver.path =  + 
tracePath + NEWLINE,
   runTraceCommand(trace, -add, -host, getHostPortForNN(cluster),
   -class, org.apache.htrace.impl.LocalFileSpanReceiver,
-  -Clocal-file-span-receiver.path= + tracePath));
+  -Cdfs.htrace.local-file-span-receiver.path= + tracePath));
   String list =
   runTraceCommand(trace, -list, -host, getHostPortForNN(cluster));
   Assert.assertTrue(list.startsWith(ret:0));
@@ -86,10 +86,10 @@ public class TestTraceAdmin {
   Assert.assertEquals(ret:0, [no span receivers found] + NEWLINE,
   runTraceCommand(trace, -list, -host, getHostPortForNN(cluster)));
   Assert.assertEquals(ret:0, Added trace span receiver 2 with  +
-  configuration local-file-span-receiver.path =  + tracePath + 
NEWLINE,
+  configuration dfs.htrace.local-file-span-receiver.path =  + 
tracePath + NEWLINE,
   runTraceCommand(trace, -add, -host, getHostPortForNN(cluster),
   -class, LocalFileSpanReceiver,
-  -Clocal-file-span-receiver.path= + tracePath));
+  -Cdfs.htrace.local-file-span-receiver.path= + tracePath));
   Assert.assertEquals(ret:0, Removed trace span receiver 2 + NEWLINE,
   runTraceCommand(trace, -remove, 2, -host,
   getHostPortForNN(cluster)));



[02/37] hadoop git commit: Move YARN-3493 in CHANGES.txt from 2.8 to 2.7.1

2015-05-14 Thread jitendra
Move YARN-3493 in CHANGES.txt from 2.8 to 2.7.1


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d28611c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d28611c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d28611c

Branch: refs/heads/HDFS-7240
Commit: 3d28611cc6850de129b831158c420f9487103213
Parents: d6f6741
Author: Wangda Tan wan...@apache.org
Authored: Mon May 11 18:06:54 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon May 11 18:06:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d28611c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b5cb0a5..2412dce 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -298,9 +298,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3021. YARN's delegation-token handling disallows certain trust setups
 to operate properly over DistCp. (Yongjun Zhang via jianhe)
 
-YARN-3493. RM fails to come up with error Failed to load/recover state 
-when mem settings are changed. (Jian He via wangda)
-
 YARN-3136. Fixed a synchronization problem of
 AbstractYarnScheduler#getTransferredContainers. (Sunil G via jianhe)
 
@@ -465,6 +462,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3434. Interaction between reservations and userlimit can result in 
 significant ULF violation (tgraves)
 
+YARN-3493. RM fails to come up with error Failed to load/recover state
+when mem settings are changed. (Jian He via wangda)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



[15/37] hadoop git commit: MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort doesn't work. Contributed by Takuya Fukudome.

2015-05-14 Thread jitendra
MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort doesn't 
work. Contributed by Takuya Fukudome.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e82067bf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e82067bf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e82067bf

Branch: refs/heads/HDFS-7240
Commit: e82067bfe680ce04acc0153693cce3cd385e5567
Parents: fcd0702
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed May 13 16:44:37 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed May 13 16:44:50 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/examples/terasort/TeraSort.java   | 1 -
 .../org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java   | 2 +-
 3 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82067bf/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index fc98376..7fe8483 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -419,6 +419,9 @@ Release 2.8.0 - UNRELEASED
 copySucceeded() in one thread and copyFailed() in another thread on the
 same host. (Junping Du via ozawa)
 
+MAPREDUCE-6366. mapreduce.terasort.final.sync configuration in TeraSort
+doesn't work. (Takuya Fukudome via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82067bf/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
index 5d586e6..9beff3e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSort.java
@@ -328,7 +328,6 @@ public class TeraSort extends Configured implements Tool {
 }
 
 job.getConfiguration().setInt(dfs.replication, 
getOutputReplication(job));
-TeraOutputFormat.setFinalSync(job, true);
 int ret = job.waitForCompletion(true) ? 0 : 1;
 LOG.info(done);
 return ret;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e82067bf/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
index 0822a50..0e7a534 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraSortConfigKeys.java
@@ -70,7 +70,7 @@ public enum TeraSortConfigKeys {
   public static final long DEFAULT_NUM_ROWS = 0L;
   public static final int DEFAULT_NUM_PARTITIONS = 10;
   public static final long DEFAULT_SAMPLE_SIZE = 10L;
-  public static final boolean DEFAULT_FINAL_SYNC_ATTRIBUTE = false;
+  public static final boolean DEFAULT_FINAL_SYNC_ATTRIBUTE = true;
   public static final boolean DEFAULT_USE_TERA_SCHEDULER = true;
   public static final boolean DEFAULT_USE_SIMPLE_PARTITIONER = false;
   public static final int DEFAULT_OUTPUT_REPLICATION = 1;



[23/37] hadoop git commit: YARN-3579. CommonNodeLabelsManager should support NodeLabel instead of string label name when getting node-to-label/label-to-label mappings. (Sunil G via wangda)

2015-05-14 Thread jitendra
YARN-3579. CommonNodeLabelsManager should support NodeLabel instead of string 
label name when getting node-to-label/label-to-label mappings. (Sunil G via 
wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4f53fc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4f53fc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4f53fc9

Branch: refs/heads/HDFS-7240
Commit: d4f53fc9631d682cd79ba440aefa6750dcc898be
Parents: 0f95921
Author: Wangda Tan wan...@apache.org
Authored: Wed May 13 13:29:09 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed May 13 13:29:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../nodelabels/CommonNodeLabelsManager.java | 149 ---
 .../hadoop/yarn/nodelabels/RMNodeLabel.java |   7 +-
 .../yarn/nodelabels/NodeLabelTestBase.java  |  30 
 .../nodelabels/TestCommonNodeLabelsManager.java |  22 +++
 5 files changed, 186 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f53fc9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e5d5ecf..16c2dd9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -420,6 +420,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3539. Updated timeline server documentation and marked REST APIs 
evolving.
 (Steve Loughran via zjshen)
 
+YARN-3579. CommonNodeLabelsManager should support NodeLabel instead of 
string 
+label name when getting node-to-label/label-to-label mappings. (Sunil G 
via wangda)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f53fc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index f2ff0f6..bf34837 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -64,6 +64,8 @@ public class CommonNodeLabelsManager extends AbstractService {
   private static final int MAX_LABEL_LENGTH = 255;
   public static final SetString EMPTY_STRING_SET = Collections
   .unmodifiableSet(new HashSetString(0));
+  public static final SetNodeLabel EMPTY_NODELABEL_SET = Collections
+  .unmodifiableSet(new HashSetNodeLabel(0));
   public static final String ANY = *;
   public static final SetString ACCESS_ANY_LABEL_SET = ImmutableSet.of(ANY);
   private static final Pattern LABEL_PATTERN = Pattern
@@ -716,23 +718,53 @@ public class CommonNodeLabelsManager extends 
AbstractService {
* @return nodes to labels map
*/
   public MapNodeId, SetString getNodeLabels() {
+MapNodeId, SetString nodeToLabels =
+generateNodeLabelsInfoPerNode(String.class);
+return nodeToLabels;
+  }
+
+  /**
+   * Get mapping of nodes to label info
+   *
+   * @return nodes to labels map
+   */
+  public MapNodeId, SetNodeLabel getNodeLabelsInfo() {
+MapNodeId, SetNodeLabel nodeToLabels =
+generateNodeLabelsInfoPerNode(NodeLabel.class);
+return nodeToLabels;
+  }
+
+  @SuppressWarnings(unchecked)
+  private T MapNodeId, SetT generateNodeLabelsInfoPerNode(ClassT type) 
{
 try {
   readLock.lock();
-  MapNodeId, SetString nodeToLabels =
-  new HashMapNodeId, SetString();
+  MapNodeId, SetT nodeToLabels = new HashMap();
   for (EntryString, Host entry : nodeCollections.entrySet()) {
 String hostName = entry.getKey();
 Host host = entry.getValue();
 for (NodeId nodeId : host.nms.keySet()) {
-  SetString nodeLabels = getLabelsByNode(nodeId);
-  if (nodeLabels == null || nodeLabels.isEmpty()) {
-continue;
+  if (type.isAssignableFrom(String.class)) {
+SetString nodeLabels = getLabelsByNode(nodeId);
+if (nodeLabels == null || nodeLabels.isEmpty()) {
+  continue;
+}
+nodeToLabels.put(nodeId, (SetT) nodeLabels);
+  } else {
+SetNodeLabel nodeLabels = getLabelsInfoByNode(nodeId);
+if (nodeLabels == null || nodeLabels.isEmpty()) {
+  continue;
+}
+

[34/37] hadoop git commit: HADOOP-10993. Dump java command line to *.out file (Contributed by Kengo Seki)

2015-05-14 Thread jitendra
HADOOP-10993. Dump java command line to *.out file (Contributed by Kengo Seki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffbb5746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffbb5746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffbb5746

Branch: refs/heads/HDFS-7240
Commit: ffbb574623c2a1dbcead201e9ae2dad3f77998d0
Parents: 0daa5ad
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 15:24:35 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 15:24:35 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop-common/src/main/bin/hadoop-functions.sh  | 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffbb5746/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bf39c94..359a38b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -215,6 +215,9 @@ Trunk (Unreleased)
 HADOOP-11590. Update sbin commands and documentation to use new --slaves
 option (aw)
 
+HADOOP-10993. Dump java command line to *.out file
+(Kengo Seki via vinayakumarb)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffbb5746/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 85f8200..67e8870 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1148,6 +1148,10 @@ function hadoop_java_exec
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JAVA_HOME: ${JAVA_HOME}
+  hadoop_debug java: ${JAVA}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   export CLASSPATH
   #shellcheck disable=SC2086
@@ -1174,6 +1178,10 @@ function hadoop_start_daemon
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JAVA_HOME: ${JAVA_HOME}
+  hadoop_debug java: ${JAVA}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   # this is for the non-daemon pid creation
   #shellcheck disable=SC2086
@@ -1300,6 +1308,10 @@ function hadoop_start_secure_daemon
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JSVC_HOME: ${JSVC_HOME}
+  hadoop_debug jsvc: ${jsvc}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   #shellcheck disable=SC2086
   echo $$  ${privpidfile} 2/dev/null



[2/2] hadoop git commit: HADOOP-11713. ViewFileSystem should support snapshot methods. Contributed by Rakesh R.

2015-05-14 Thread cnauroth
HADOOP-11713. ViewFileSystem should support snapshot methods. Contributed by 
Rakesh R.

(cherry picked from commit 09fe16f166392a99e1e54001a9112c6a4632dfc8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa082e1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa082e1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa082e1a

Branch: refs/heads/branch-2
Commit: fa082e1a0a32012de1befd38e34c18c7af303006
Parents: bc13c7d
Author: cnauroth cnaur...@apache.org
Authored: Thu May 14 14:55:58 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Thu May 14 14:56:13 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../hadoop/fs/viewfs/ChRootedFileSystem.java| 17 +++
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java | 19 ++-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java | 48 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 51 +--
 .../fs/viewfs/TestChRootedFileSystem.java   | 52 
 .../apache/hadoop/fs/viewfs/TestChRootedFs.java | 41 +++
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 20 
 .../apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 21 
 9 files changed, 267 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa082e1a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e43540b..412719d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -95,6 +95,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-9723. Improve error message when hadoop archive output path already
 exists. (Jean-Baptiste Onofré and Yongjun Zhang via aajisak)
 
+HADOOP-11713. ViewFileSystem should support snapshot methods.
+(Rakesh R via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa082e1a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 18e2391..f7a93e7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -364,6 +364,23 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public Path createSnapshot(Path path, String name) throws IOException {
+return super.createSnapshot(fullPath(path), name);
+  }
+
+  @Override
+  public void renameSnapshot(Path path, String snapshotOldName,
+  String snapshotNewName) throws IOException {
+super.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName);
+  }
+
+  @Override
+  public void deleteSnapshot(Path snapshotDir, String snapshotName)
+  throws IOException {
+super.deleteSnapshot(fullPath(snapshotDir), snapshotName);
+  }
+
+  @Override
   public Path resolvePath(final Path p) throws IOException {
 return super.resolvePath(fullPath(p));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa082e1a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 68e756a8..a05a700 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -361,7 +361,24 @@ class ChRootedFs extends AbstractFileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) 
+  public Path createSnapshot(Path path, String name) throws IOException {
+return myFs.createSnapshot(fullPath(path), name);
+  }
+
+  @Override
+  public void renameSnapshot(Path path, String snapshotOldName,
+  String snapshotNewName) throws IOException {
+myFs.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName);
+  }
+
+  @Override
+  public void deleteSnapshot(Path snapshotDir, String snapshotName)
+  

[1/2] hadoop git commit: HADOOP-11713. ViewFileSystem should support snapshot methods. Contributed by Rakesh R.

2015-05-14 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 bc13c7d84 - fa082e1a0
  refs/heads/trunk 15ccd967e - 09fe16f16


HADOOP-11713. ViewFileSystem should support snapshot methods. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09fe16f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09fe16f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09fe16f1

Branch: refs/heads/trunk
Commit: 09fe16f166392a99e1e54001a9112c6a4632dfc8
Parents: 15ccd96
Author: cnauroth cnaur...@apache.org
Authored: Thu May 14 14:55:58 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Thu May 14 14:55:58 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../hadoop/fs/viewfs/ChRootedFileSystem.java| 17 +++
 .../org/apache/hadoop/fs/viewfs/ChRootedFs.java | 19 ++-
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java | 48 +-
 .../org/apache/hadoop/fs/viewfs/ViewFs.java | 51 +--
 .../fs/viewfs/TestChRootedFileSystem.java   | 52 
 .../apache/hadoop/fs/viewfs/TestChRootedFs.java | 41 +++
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 20 
 .../apache/hadoop/fs/viewfs/ViewFsBaseTest.java | 21 
 9 files changed, 267 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09fe16f1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 359a38b..2f8acb0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -566,6 +566,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-9723. Improve error message when hadoop archive output path already
 exists. (Jean-Baptiste Onofré and Yongjun Zhang via aajisak)
 
+HADOOP-11713. ViewFileSystem should support snapshot methods.
+(Rakesh R via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09fe16f1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 18e2391..f7a93e7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -364,6 +364,23 @@ class ChRootedFileSystem extends FilterFileSystem {
   }
 
   @Override
+  public Path createSnapshot(Path path, String name) throws IOException {
+return super.createSnapshot(fullPath(path), name);
+  }
+
+  @Override
+  public void renameSnapshot(Path path, String snapshotOldName,
+  String snapshotNewName) throws IOException {
+super.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName);
+  }
+
+  @Override
+  public void deleteSnapshot(Path snapshotDir, String snapshotName)
+  throws IOException {
+super.deleteSnapshot(fullPath(snapshotDir), snapshotName);
+  }
+
+  @Override
   public Path resolvePath(final Path p) throws IOException {
 return super.resolvePath(fullPath(p));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09fe16f1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 68e756a8..a05a700 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -361,7 +361,24 @@ class ChRootedFs extends AbstractFileSystem {
   }
 
   @Override
-  public void setVerifyChecksum(final boolean verifyChecksum) 
+  public Path createSnapshot(Path path, String name) throws IOException {
+return myFs.createSnapshot(fullPath(path), name);
+  }
+
+  @Override
+  public void renameSnapshot(Path path, String snapshotOldName,
+  String snapshotNewName) throws IOException {
+myFs.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName);
+  }
+
+  @Override
+  public void 

hadoop git commit: YARN-1519. Check in container-executor if sysconf is implemented before using it (Radim Kolar and Eric Payne via raviprak)

2015-05-14 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fa082e1a0 - 4caadaa8b


YARN-1519. Check in container-executor if sysconf is implemented before using 
it (Radim Kolar and Eric Payne via raviprak)

(cherry picked from commit 53fe4eff09fdaeed75a8cad3a26156bf963a8d37)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4caadaa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4caadaa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4caadaa8

Branch: refs/heads/branch-2
Commit: 4caadaa8bb61e6514c002a81b31bf4a649a685b5
Parents: fa082e1
Author: Ravi Prakash ravip...@apache.org
Authored: Thu May 14 15:55:37 2015 -0700
Committer: Ravi Prakash ravip...@apache.org
Committed: Thu May 14 15:56:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../main/native/container-executor/impl/container-executor.c  | 7 +--
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4caadaa8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c642b12..3ce0ab5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -363,6 +363,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2921. Fix MockRM/MockAM#waitForState sleep too long. 
 (Tsuyoshi Ozawa via wangda)
 
+YARN-1519. Check in container-executor if sysconf is implemented before
+using it (Radim Kolar and Eric Payne via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4caadaa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 6727838..962d52a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -631,8 +631,11 @@ static int create_container_directories(const char* user, 
const char *app_id,
  */
 static struct passwd* get_user_info(const char* user) {
   int string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
-  void* buffer = malloc(string_size + sizeof(struct passwd));
   struct passwd *result = NULL;
+  if(string_size  1024) {
+string_size = 1024;
+  }
+  void* buffer = malloc(string_size + sizeof(struct passwd));
   if (getpwnam_r(user, buffer, buffer + sizeof(struct passwd), string_size,
 result) != 0) {
 free(buffer);
@@ -1425,7 +1428,7 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
  
   dp = opendir(dir_path);
   if (dp != NULL) {
-while (ep = readdir(dp)) {
+while ((ep = readdir(dp)) != NULL) {
   stpncpy(buf, ep-d_name, strlen(ep-d_name));
   buf[strlen(ep-d_name)] = '\0';
   change_owner(path_tmp, uid, gid);



hadoop git commit: YARN-1519. Check in container-executor if sysconf is implemented before using it (Radim Kolar and Eric Payne via raviprak)

2015-05-14 Thread raviprak
Repository: hadoop
Updated Branches:
  refs/heads/trunk 09fe16f16 - 53fe4eff0


YARN-1519. Check in container-executor if sysconf is implemented before using 
it (Radim Kolar and Eric Payne via raviprak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53fe4eff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53fe4eff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53fe4eff

Branch: refs/heads/trunk
Commit: 53fe4eff09fdaeed75a8cad3a26156bf963a8d37
Parents: 09fe16f
Author: Ravi Prakash ravip...@apache.org
Authored: Thu May 14 15:55:37 2015 -0700
Committer: Ravi Prakash ravip...@apache.org
Committed: Thu May 14 15:55:37 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 3 +++
 .../native/container-executor/impl/container-executor.c | 9 ++---
 2 files changed, 9 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53fe4eff/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e0f2c52..f2a518e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -408,6 +408,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2921. Fix MockRM/MockAM#waitForState sleep too long. 
 (Tsuyoshi Ozawa via wangda)
 
+YARN-1519. Check in container-executor if sysconf is implemented before
+using it (Radim Kolar and Eric Payne via raviprak)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53fe4eff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 485399a..ff28d30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -631,8 +631,11 @@ static int create_container_directories(const char* user, 
const char *app_id,
  */
 static struct passwd* get_user_info(const char* user) {
   int string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
-  void* buffer = malloc(string_size + sizeof(struct passwd));
   struct passwd *result = NULL;
+  if(string_size  1024) {
+string_size = 1024;
+  }
+  void* buffer = malloc(string_size + sizeof(struct passwd));
   if (getpwnam_r(user, buffer, buffer + sizeof(struct passwd), string_size,
 result) != 0) {
 free(buffer);
@@ -1425,7 +1428,7 @@ void chown_dir_contents(const char *dir_path, uid_t uid, 
gid_t gid) {
  
   dp = opendir(dir_path);
   if (dp != NULL) {
-while (ep = readdir(dp)) {
+while ((ep = readdir(dp)) != NULL) {
   stpncpy(buf, ep-d_name, strlen(ep-d_name));
   buf[strlen(ep-d_name)] = '\0';
   change_owner(path_tmp, uid, gid);
@@ -1545,4 +1548,4 @@ int traffic_control_read_state(char *command_file) {
  */
 int traffic_control_read_stats(char *command_file) {
   return run_traffic_control(TC_READ_STATS_OPTS, command_file);
-}
\ No newline at end of file
+}



hadoop git commit: MAPREDUCE-6337. Added a mode to replay MR job history files and put them into the timeline service v2. Contributed by Sangjin Lee.

2015-05-14 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 b059dd488 - 463e070a8


MAPREDUCE-6337. Added a mode to replay MR job history files and put them into 
the timeline service v2. Contributed by Sangjin Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/463e070a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/463e070a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/463e070a

Branch: refs/heads/YARN-2928
Commit: 463e070a8e7c882706a96eaa20ea49bfe9982875
Parents: b059dd4
Author: Zhijie Shen zjs...@apache.org
Authored: Thu May 14 15:16:33 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Thu May 14 15:22:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|   3 +
 .../hadoop/mapred/JobHistoryFileParser.java |  53 
 .../mapred/JobHistoryFileReplayMapper.java  | 301 +++
 .../hadoop/mapred/SimpleEntityWriter.java   | 139 +
 .../hadoop/mapred/TimelineEntityConverter.java  | 207 +
 .../mapred/TimelineServicePerformanceV2.java| 191 
 .../collector/TimelineCollectorManager.java |  10 +-
 .../storage/FileSystemTimelineWriterImpl.java   |  23 +-
 .../timelineservice/storage/package-info.java   |  24 ++
 9 files changed, 810 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/463e070a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9912b6d..6ab48e7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -12,6 +12,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 MAPREDUCE-6335. Created MR job based performance test driver for the
 timeline service v2. (Sangjin Lee via zjshen)
 
+MAPREDUCE-6337. Added a mode to replay MR job history files and put them
+into the timeline service v2. (Sangjin Lee via zjshen)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/463e070a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
new file mode 100644
index 000..9d051df
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobHistoryFileParser.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+
+class JobHistoryFileParser {
+  private static final Log LOG = LogFactory.getLog(JobHistoryFileParser.class);
+
+  private final FileSystem fs;
+
+  public JobHistoryFileParser(FileSystem fs) {
+LOG.info(JobHistoryFileParser created with  + fs);
+this.fs = fs;
+  }
+
+  public JobInfo parseHistoryFile(Path path) throws IOException {
+LOG.info(parsing job history file  + path);
+JobHistoryParser parser = new JobHistoryParser(fs, path);
+return parser.parse();
+  }
+
+  public Configuration parseConfiguration(Path path) throws IOException {
+LOG.info(parsing job configuration file  + path);
+Configuration conf = new Configuration(false);
+

[1/2] hadoop git commit: Fixing MR intermediate spills. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53fe4eff0 - 9a2a9553e


Fixing MR intermediate spills. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b710a42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b710a42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b710a42

Branch: refs/heads/trunk
Commit: 6b710a42e00acca405e085724c89cda016cf7442
Parents: 53fe4ef
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:07:56 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 16:07:56 2015 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   | 10 +++
 .../hadoop/mapred/TaskAttemptListenerImpl.java  | 17 +--
 .../org/apache/hadoop/mapred/YarnChild.java | 18 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 24 +++-
 .../java/org/apache/hadoop/mapred/Task.java | 25 
 .../apache/hadoop/mapreduce/CryptoUtils.java| 17 ++-
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 16 ---
 .../hadoop/mapreduce/security/TokenCache.java   | 10 +++
 .../mapreduce/task/reduce/LocalFetcher.java |  6 ++--
 .../src/site/markdown/EncryptedShuffle.md   |  8 ++
 .../mapreduce/task/reduce/TestMerger.java   |  2 +-
 .../TestMRIntermediateDataEncryption.java   | 30 ++--
 .../apache/hadoop/mapred/TestMapProgress.java   | 14 +
 13 files changed, 156 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b710a42/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 52b3497..9d8b4a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -83,6 +83,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final ClassLoader jobClassLoader;
   private ExecutorService taskRunner;
   private Thread eventHandler;
+  private byte[] encryptedSpillKey = new byte[] {0};
   private BlockingQueueContainerLauncherEvent eventQueue =
   new LinkedBlockingQueueContainerLauncherEvent();
 
@@ -176,6 +177,11 @@ public class LocalContainerLauncher extends 
AbstractService implements
 }
   }
 
+  public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
+if (encryptedSpillKey != null) {
+  this.encryptedSpillKey = encryptedSpillKey;
+}
+  }
 
   /*
* Uber-AM lifecycle/ordering (normal case):
@@ -382,6 +388,10 @@ public class LocalContainerLauncher extends 
AbstractService implements
 // map to handle)
 conf.setBoolean(mapreduce.task.uberized, true);
 
+// Check and handle Encrypted spill key
+task.setEncryptedSpillKey(encryptedSpillKey);
+YarnChild.setEncryptedSpillKeyIfRequired(task);
+
 // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
 // etc.), or just assume/hope the state machine(s) and uber-AM work
 // as expected?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b710a42/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index c8f2427..49a00c5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -84,20 +84,30 @@ public class TaskAttemptListenerImpl extends 
CompositeService
 jvmIDToActiveAttemptMap
   = new ConcurrentHashMapWrappedJvmID, org.apache.hadoop.mapred.Task();
   private 

[2/2] hadoop git commit: Fixing HDFS state-store. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Fixing HDFS state-store. Contributed by Arun Suresh.

(cherry picked from commit 9a2a9553eee454ecd18120535d3e845f86fc3584)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ad3196e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ad3196e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ad3196e0

Branch: refs/heads/branch-2
Commit: ad3196e01667bd6798a1988fddb0c0ae32f6687c
Parents: 8786297
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:13:51 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 16:57:03 2015 -0700

--
 .../recovery/FileSystemRMStateStore.java|  83 +++---
 .../recovery/RMStateStoreTestBase.java  |  16 +-
 .../recovery/TestFSRMStateStore.java| 151 ---
 .../src/site/markdown/ResourceManagerHA.md  |   2 +-
 4 files changed, 207 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ad3196e0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 68d26bb..6920bb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -25,6 +25,7 @@ import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -38,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -84,7 +86,8 @@ public class FileSystemRMStateStore extends RMStateStore {
   protected static final String AMRMTOKEN_SECRET_MANAGER_NODE =
   AMRMTokenSecretManagerNode;
 
-  @VisibleForTesting
+  private static final String UNREADABLE_BY_SUPERUSER_XATTRIB =
+  security.hdfs.unreadable.by.superuser;
   protected FileSystem fs;
   @VisibleForTesting
   protected Configuration fsConf;
@@ -97,6 +100,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
+  private boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;
@@ -141,11 +145,17 @@ public class FileSystemRMStateStore extends RMStateStore {
 }
 
 fs = fsWorkingPath.getFileSystem(fsConf);
+isHDFS = fs.getScheme().toLowerCase().contains(hdfs);
 mkdirsWithRetries(rmDTSecretManagerRoot);
 mkdirsWithRetries(rmAppRoot);
 mkdirsWithRetries(amrmTokenSecretManagerRoot);
   }
 
+  @VisibleForTesting
+  void setIsHDFS(boolean isHDFS) {
+this.isHDFS = isHDFS;
+  }
+
   @Override
   protected synchronized void closeInternal() throws Exception {
 closeWithRetries();
@@ -175,9 +185,9 @@ public class FileSystemRMStateStore extends RMStateStore {
 byte[] data =
 ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
 if (existsWithRetries(versionNodePath)) {
-  updateFile(versionNodePath, data);
+  updateFile(versionNodePath, data, false);
 } else {
-  writeFileWithRetries(versionNodePath, data);
+  writeFileWithRetries(versionNodePath, data, false);
 }
   }
   
@@ -194,12 +204,12 @@ public class FileSystemRMStateStore extends RMStateStore {
   // increment epoch and store it
   byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
   .toByteArray();
-  updateFile(epochNodePath, storeData);
+  updateFile(epochNodePath, storeData, false);
 } else {
   // initialize epoch file with 1 for the next time.
   byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
   .toByteArray();
-   

[1/2] hadoop git commit: Fixing MR intermediate spills. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4caadaa8b - ad3196e01


Fixing MR intermediate spills. Contributed by Arun Suresh.

(cherry picked from commit 6b710a42e00acca405e085724c89cda016cf7442)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87862970
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87862970
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87862970

Branch: refs/heads/branch-2
Commit: 87862970f15e980eaf0b25e3eaf507becf349ae5
Parents: 4caadaa
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:07:56 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 16:56:56 2015 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   | 10 +++
 .../hadoop/mapred/TaskAttemptListenerImpl.java  | 13 ++---
 .../org/apache/hadoop/mapred/YarnChild.java | 18 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 24 +++-
 .../mapred/TestTaskAttemptFinishingMonitor.java |  2 +-
 .../mapred/TestTaskAttemptListenerImpl.java |  4 +--
 .../hadoop/mapreduce/v2/app/TestFail.java   |  2 +-
 .../java/org/apache/hadoop/mapred/Task.java | 25 
 .../apache/hadoop/mapreduce/CryptoUtils.java| 17 ++-
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 16 ---
 .../hadoop/mapreduce/security/TokenCache.java   | 10 +++
 .../mapreduce/task/reduce/LocalFetcher.java |  6 ++--
 .../src/site/markdown/EncryptedShuffle.md   |  8 ++
 .../mapreduce/task/reduce/TestMerger.java   |  2 +-
 .../TestMRIntermediateDataEncryption.java   | 30 ++--
 .../apache/hadoop/mapred/TestMapProgress.java   | 14 +
 16 files changed, 155 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87862970/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 52b3497..9d8b4a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -83,6 +83,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final ClassLoader jobClassLoader;
   private ExecutorService taskRunner;
   private Thread eventHandler;
+  private byte[] encryptedSpillKey = new byte[] {0};
   private BlockingQueueContainerLauncherEvent eventQueue =
   new LinkedBlockingQueueContainerLauncherEvent();
 
@@ -176,6 +177,11 @@ public class LocalContainerLauncher extends 
AbstractService implements
 }
   }
 
+  public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
+if (encryptedSpillKey != null) {
+  this.encryptedSpillKey = encryptedSpillKey;
+}
+  }
 
   /*
* Uber-AM lifecycle/ordering (normal case):
@@ -382,6 +388,10 @@ public class LocalContainerLauncher extends 
AbstractService implements
 // map to handle)
 conf.setBoolean(mapreduce.task.uberized, true);
 
+// Check and handle Encrypted spill key
+task.setEncryptedSpillKey(encryptedSpillKey);
+YarnChild.setEncryptedSpillKeyIfRequired(task);
+
 // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
 // etc.), or just assume/hope the state machine(s) and uber-AM work
 // as expected?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87862970/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index c6b90bc..6627604 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ 

[2/2] hadoop git commit: Fixing HDFS state-store. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Fixing HDFS state-store. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a2a9553
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a2a9553
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a2a9553

Branch: refs/heads/trunk
Commit: 9a2a9553eee454ecd18120535d3e845f86fc3584
Parents: 6b710a4
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:13:51 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 16:13:51 2015 -0700

--
 .../recovery/FileSystemRMStateStore.java|  83 +++---
 .../recovery/RMStateStoreTestBase.java  |  16 +-
 .../recovery/TestFSRMStateStore.java| 151 ---
 .../src/site/markdown/ResourceManagerHA.md  |   2 +-
 4 files changed, 207 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a2a9553/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 68d26bb..6920bb5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -25,6 +25,7 @@ import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -38,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -84,7 +86,8 @@ public class FileSystemRMStateStore extends RMStateStore {
   protected static final String AMRMTOKEN_SECRET_MANAGER_NODE =
   AMRMTokenSecretManagerNode;
 
-  @VisibleForTesting
+  private static final String UNREADABLE_BY_SUPERUSER_XATTRIB =
+  security.hdfs.unreadable.by.superuser;
   protected FileSystem fs;
   @VisibleForTesting
   protected Configuration fsConf;
@@ -97,6 +100,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
+  private boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;
@@ -141,11 +145,17 @@ public class FileSystemRMStateStore extends RMStateStore {
 }
 
 fs = fsWorkingPath.getFileSystem(fsConf);
+isHDFS = fs.getScheme().toLowerCase().contains(hdfs);
 mkdirsWithRetries(rmDTSecretManagerRoot);
 mkdirsWithRetries(rmAppRoot);
 mkdirsWithRetries(amrmTokenSecretManagerRoot);
   }
 
+  @VisibleForTesting
+  void setIsHDFS(boolean isHDFS) {
+this.isHDFS = isHDFS;
+  }
+
   @Override
   protected synchronized void closeInternal() throws Exception {
 closeWithRetries();
@@ -175,9 +185,9 @@ public class FileSystemRMStateStore extends RMStateStore {
 byte[] data =
 ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
 if (existsWithRetries(versionNodePath)) {
-  updateFile(versionNodePath, data);
+  updateFile(versionNodePath, data, false);
 } else {
-  writeFileWithRetries(versionNodePath, data);
+  writeFileWithRetries(versionNodePath, data, false);
 }
   }
   
@@ -194,12 +204,12 @@ public class FileSystemRMStateStore extends RMStateStore {
   // increment epoch and store it
   byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
   .toByteArray();
-  updateFile(epochNodePath, storeData);
+  updateFile(epochNodePath, storeData, false);
 } else {
   // initialize epoch file with 1 for the next time.
   byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
   .toByteArray();
-  writeFileWithRetries(epochNodePath, storeData);
+  

[1/2] hadoop git commit: Fixing MR intermediate spills. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 516175143 - 3c51654d5


Fixing MR intermediate spills. Contributed by Arun Suresh.

(cherry picked from commit 6b710a42e00acca405e085724c89cda016cf7442)
(cherry picked from commit 87862970f15e980eaf0b25e3eaf507becf349ae5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9d7bbd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9d7bbd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9d7bbd9

Branch: refs/heads/branch-2.7
Commit: d9d7bbd99b533da5ca570deb3b8dc8a959c6b4db
Parents: 5161751
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:07:56 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 17:00:56 2015 -0700

--
 .../hadoop/mapred/LocalContainerLauncher.java   | 10 +++
 .../hadoop/mapred/TaskAttemptListenerImpl.java  | 13 ++---
 .../org/apache/hadoop/mapred/YarnChild.java | 18 
 .../hadoop/mapreduce/v2/app/MRAppMaster.java| 24 +++-
 .../mapred/TestTaskAttemptListenerImpl.java |  4 +--
 .../hadoop/mapreduce/v2/app/TestFail.java   |  2 +-
 .../java/org/apache/hadoop/mapred/Task.java | 25 
 .../apache/hadoop/mapreduce/CryptoUtils.java| 17 ++-
 .../apache/hadoop/mapreduce/JobSubmitter.java   | 15 --
 .../hadoop/mapreduce/security/TokenCache.java   | 10 +++
 .../mapreduce/task/reduce/LocalFetcher.java |  6 ++--
 .../src/site/markdown/EncryptedShuffle.md   |  8 ++
 .../mapreduce/task/reduce/TestMerger.java   |  2 +-
 .../TestMRIntermediateDataEncryption.java   | 30 ++--
 .../apache/hadoop/mapred/TestMapProgress.java   | 14 +
 15 files changed, 154 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9d7bbd9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
index 218ac83..b30a695 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
@@ -82,6 +82,7 @@ public class LocalContainerLauncher extends AbstractService 
implements
   private final TaskUmbilicalProtocol umbilical;
   private ExecutorService taskRunner;
   private Thread eventHandler;
+  private byte[] encryptedSpillKey = new byte[] {0};
   private BlockingQueueContainerLauncherEvent eventQueue =
   new LinkedBlockingQueueContainerLauncherEvent();
 
@@ -156,6 +157,11 @@ public class LocalContainerLauncher extends 
AbstractService implements
 }
   }
 
+  public void setEncryptedSpillKey(byte[] encryptedSpillKey) {
+if (encryptedSpillKey != null) {
+  this.encryptedSpillKey = encryptedSpillKey;
+}
+  }
 
   /*
* Uber-AM lifecycle/ordering (normal case):
@@ -354,6 +360,10 @@ public class LocalContainerLauncher extends 
AbstractService implements
 // map to handle)
 conf.setBoolean(mapreduce.task.uberized, true);
 
+// Check and handle Encrypted spill key
+task.setEncryptedSpillKey(encryptedSpillKey);
+YarnChild.setEncryptedSpillKeyIfRequired(task);
+
 // META-FIXME: do we want the extra sanity-checking (doneWithMaps,
 // etc.), or just assume/hope the state machine(s) and uber-AM work
 // as expected?

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9d7bbd9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index c6b90bc..6627604 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ 

[2/2] hadoop git commit: Fixing HDFS state-store. Contributed by Arun Suresh.

2015-05-14 Thread vinodkv
Fixing HDFS state-store. Contributed by Arun Suresh.

(cherry picked from commit 9a2a9553eee454ecd18120535d3e845f86fc3584)
(cherry picked from commit ad3196e01667bd6798a1988fddb0c0ae32f6687c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c51654d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c51654d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c51654d

Branch: refs/heads/branch-2.7
Commit: 3c51654d574e80e245ff4a5c184a8dca17782516
Parents: d9d7bbd
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu May 14 16:13:51 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu May 14 18:33:51 2015 -0700

--
 .../recovery/FileSystemRMStateStore.java|  82 +++---
 .../recovery/RMStateStoreTestBase.java  |  16 +-
 .../recovery/TestFSRMStateStore.java| 151 ---
 .../src/site/markdown/ResourceManagerHA.md  |   2 +-
 4 files changed, 207 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c51654d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
index 8147597..0f68365 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java
@@ -25,6 +25,7 @@ import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -38,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@@ -84,6 +86,8 @@ public class FileSystemRMStateStore extends RMStateStore {
   protected static final String AMRMTOKEN_SECRET_MANAGER_NODE =
   AMRMTokenSecretManagerNode;
 
+  private static final String UNREADABLE_BY_SUPERUSER_XATTRIB =
+  security.hdfs.unreadable.by.superuser;
   protected FileSystem fs;
 
   private Path rootDirPath;
@@ -94,6 +98,7 @@ public class FileSystemRMStateStore extends RMStateStore {
   private Path dtSequenceNumberPath = null;
   private int fsNumRetries;
   private long fsRetryInterval;
+  private boolean isHDFS;
 
   @VisibleForTesting
   Path fsWorkingPath;
@@ -129,11 +134,17 @@ public class FileSystemRMStateStore extends RMStateStore {
 conf.set(dfs.client.retry.policy.spec, retryPolicy);
 
 fs = fsWorkingPath.getFileSystem(conf);
+isHDFS = fs.getScheme().toLowerCase().contains(hdfs);
 mkdirsWithRetries(rmDTSecretManagerRoot);
 mkdirsWithRetries(rmAppRoot);
 mkdirsWithRetries(amrmTokenSecretManagerRoot);
   }
 
+  @VisibleForTesting
+  void setIsHDFS(boolean isHDFS) {
+this.isHDFS = isHDFS;
+  }
+
   @Override
   protected synchronized void closeInternal() throws Exception {
 closeWithRetries();
@@ -163,9 +174,9 @@ public class FileSystemRMStateStore extends RMStateStore {
 byte[] data =
 ((VersionPBImpl) CURRENT_VERSION_INFO).getProto().toByteArray();
 if (existsWithRetries(versionNodePath)) {
-  updateFile(versionNodePath, data);
+  updateFile(versionNodePath, data, false);
 } else {
-  writeFileWithRetries(versionNodePath, data);
+  writeFileWithRetries(versionNodePath, data, false);
 }
   }
   
@@ -182,12 +193,12 @@ public class FileSystemRMStateStore extends RMStateStore {
   // increment epoch and store it
   byte[] storeData = Epoch.newInstance(currentEpoch + 1).getProto()
   .toByteArray();
-  updateFile(epochNodePath, storeData);
+  updateFile(epochNodePath, storeData, false);
 } else {
   // initialize epoch file with 1 for the next time.
   byte[] storeData = 

[2/2] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

2015-05-14 Thread aajisaka
HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee7beda6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee7beda6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee7beda6

Branch: refs/heads/trunk
Commit: ee7beda6e3c640685c02185a76bed56eb85731fa
Parents: cbc01ed
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 15 13:54:35 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 15 13:54:35 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |3 +
 .../src/main/docs/changes/ChangesFancyStyle.css |  170 --
 .../main/docs/changes/ChangesSimpleStyle.css|   49 -
 .../src/main/docs/changes/changes2html.pl   |  286 
 .../hadoop-hdfs/src/main/docs/releasenotes.html |1 -
 .../src/main/docs/src/documentation/README.txt  |7 -
 .../classes/CatalogManager.properties   |   40 -
 .../main/docs/src/documentation/conf/cli.xconf  |  327 
 .../src/documentation/content/xdocs/index.xml   |   46 -
 .../src/documentation/content/xdocs/site.xml|  289 
 .../src/documentation/content/xdocs/tabs.xml|   37 -
 .../src/documentation/content/xdocs/webhdfs.xml | 1577 --
 .../resources/images/FI-framework.gif   |  Bin 30985 - 0 bytes
 .../resources/images/FI-framework.odg   |  Bin 80461 - 0 bytes
 .../resources/images/architecture.gif   |  Bin 15461 - 0 bytes
 .../resources/images/core-logo.gif  |  Bin 6665 - 0 bytes
 .../documentation/resources/images/favicon.ico  |  Bin 766 - 0 bytes
 .../resources/images/hadoop-logo-big.jpg|  Bin 127869 - 0 bytes
 .../resources/images/hadoop-logo.jpg|  Bin 9443 - 0 bytes
 .../resources/images/request-identify.jpg   |  Bin 39731 - 0 bytes
 .../main/docs/src/documentation/skinconf.xml|  366 
 .../hadoop-hdfs/src/main/docs/status.xml|   75 -
 22 files changed, 3 insertions(+), 3270 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b90a773..445b7c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -549,6 +549,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6184. Capture NN's thread dump when it fails over.
 (Ming Ma via aajisaka)
 
+HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
+(Brahma Reddy Battula via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
deleted file mode 100644
index 5eef241..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the License); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an AS IS BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  

[1/2] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

2015-05-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk cbc01ed08 - ee7beda6e


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee7beda6/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
deleted file mode 100644
index c8e0c62..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
+++ /dev/null
@@ -1,1577 +0,0 @@
-?xml version=1.0?
-!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the License); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an AS IS BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
---
-
-!DOCTYPE document PUBLIC -//APACHE//DTD Documentation V2.0//EN 
http://forrest.apache.org/dtd/document-v20.dtd;
-
-document
-  header
-titleWebHDFS REST API/title
-  /header
-
-  body
-section
-  titleDocument Conventions/title
-table
-trtdcodeMonospaced/code/tdtdUsed for commands, HTTP request and 
responses and code blocks./td/tr
-trtdcodelt;Monospacedgt;/code/tdtdUser entered values./td/tr
-trtdcode[Monospaced]/code/tdtdOptional values.  When the value is 
not specified, the default value is used./td/tr
-trtdemItalics/em/tdtdImportant phrases and words./td/tr
-/table
-/section
-!-- 
* 
--
-section
-  titleIntroduction/title
-p
-  The HTTP REST API supports the complete FileSystem interface for HDFS.
-  The operations and the corresponding FileSystem methods are shown in the 
next section.
-  The Section a href=#ParameterDictionaryHTTP Query Parameter 
Dictionary/a specifies the parameter details
-  such as the defaults and the valid values.
-/p
-  section id=Operations
-titleOperations/title
-ul
-  liHTTP GET
-  ul
-lia href=#OPENcodeOPEN/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/openFileSystem.open/a)
-/li
-lia href=#GETFILESTATUScodeGETFILESTATUS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getFileStatusFileSystem.getFileStatus/a)
-/li
-lia href=#LISTSTATUScodeLISTSTATUS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/listStatusFileSystem.listStatus/a)
-/li
-lia href=#GETCONTENTSUMMARYcodeGETCONTENTSUMMARY/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getContentSummaryFileSystem.getContentSummary/a)
-/li
-lia href=#GETFILECHECKSUMcodeGETFILECHECKSUM/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getFileChecksumFileSystem.getFileChecksum/a)
-/li
-lia href=#GETHOMEDIRECTORYcodeGETHOMEDIRECTORY/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getHomeDirectoryFileSystem.getHomeDirectory/a)
-/li
-lia href=#GETDELEGATIONTOKENcodeGETDELEGATIONTOKEN/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getDelegationTokenFileSystem.getDelegationToken/a)
-/li
-  /ul/li
-  liHTTP PUT
-  ul
-lia href=#CREATEcodeCREATE/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/createFileSystem.create/a)
-/li
-lia href=#MKDIRScodeMKDIRS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/mkdirsFileSystem.mkdirs/a)
-/li
-lia href=#RENAMEcodeRENAME/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/renameFileSystem.rename/a)
-/li
-lia href=#SETREPLICATIONcodeSETREPLICATION/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setReplicationFileSystem.setReplication/a)
-/li
-lia href=#SETOWNERcodeSETOWNER/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setOwnerFileSystem.setOwner/a)
-/li
-lia href=#SETPERMISSIONcodeSETPERMISSION/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setPermissionFileSystem.setPermission/a)
-/li
-lia href=#SETTIMEScodeSETTIMES/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setTimesFileSystem.setTimes/a)
-/li
-lia href=#RENEWDELEGATIONTOKENcodeRENEWDELEGATIONTOKEN/code/a
-(see DistributedFileSystem.renewDelegationToken)
-  

[1/2] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

2015-05-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e5b7f24a - 7fb3486eb


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fb3486e/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
deleted file mode 100644
index c8e0c62..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/webhdfs.xml
+++ /dev/null
@@ -1,1577 +0,0 @@
-?xml version=1.0?
-!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the License); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an AS IS BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
---
-
-!DOCTYPE document PUBLIC -//APACHE//DTD Documentation V2.0//EN 
http://forrest.apache.org/dtd/document-v20.dtd;
-
-document
-  header
-titleWebHDFS REST API/title
-  /header
-
-  body
-section
-  titleDocument Conventions/title
-table
-trtdcodeMonospaced/code/tdtdUsed for commands, HTTP request and 
responses and code blocks./td/tr
-trtdcodelt;Monospacedgt;/code/tdtdUser entered values./td/tr
-trtdcode[Monospaced]/code/tdtdOptional values.  When the value is 
not specified, the default value is used./td/tr
-trtdemItalics/em/tdtdImportant phrases and words./td/tr
-/table
-/section
-!-- 
* 
--
-section
-  titleIntroduction/title
-p
-  The HTTP REST API supports the complete FileSystem interface for HDFS.
-  The operations and the corresponding FileSystem methods are shown in the 
next section.
-  The Section a href=#ParameterDictionaryHTTP Query Parameter 
Dictionary/a specifies the parameter details
-  such as the defaults and the valid values.
-/p
-  section id=Operations
-titleOperations/title
-ul
-  liHTTP GET
-  ul
-lia href=#OPENcodeOPEN/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/openFileSystem.open/a)
-/li
-lia href=#GETFILESTATUScodeGETFILESTATUS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getFileStatusFileSystem.getFileStatus/a)
-/li
-lia href=#LISTSTATUScodeLISTSTATUS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/listStatusFileSystem.listStatus/a)
-/li
-lia href=#GETCONTENTSUMMARYcodeGETCONTENTSUMMARY/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getContentSummaryFileSystem.getContentSummary/a)
-/li
-lia href=#GETFILECHECKSUMcodeGETFILECHECKSUM/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getFileChecksumFileSystem.getFileChecksum/a)
-/li
-lia href=#GETHOMEDIRECTORYcodeGETHOMEDIRECTORY/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getHomeDirectoryFileSystem.getHomeDirectory/a)
-/li
-lia href=#GETDELEGATIONTOKENcodeGETDELEGATIONTOKEN/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/getDelegationTokenFileSystem.getDelegationToken/a)
-/li
-  /ul/li
-  liHTTP PUT
-  ul
-lia href=#CREATEcodeCREATE/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/createFileSystem.create/a)
-/li
-lia href=#MKDIRScodeMKDIRS/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/mkdirsFileSystem.mkdirs/a)
-/li
-lia href=#RENAMEcodeRENAME/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/renameFileSystem.rename/a)
-/li
-lia href=#SETREPLICATIONcodeSETREPLICATION/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setReplicationFileSystem.setReplication/a)
-/li
-lia href=#SETOWNERcodeSETOWNER/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setOwnerFileSystem.setOwner/a)
-/li
-lia href=#SETPERMISSIONcodeSETPERMISSION/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setPermissionFileSystem.setPermission/a)
-/li
-lia href=#SETTIMEScodeSETTIMES/code/a
-(see a 
href=ext:api/org/apache/hadoop/fs/filesystem/setTimesFileSystem.setTimes/a)
-/li
-lia href=#RENEWDELEGATIONTOKENcodeRENEWDELEGATIONTOKEN/code/a
-(see DistributedFileSystem.renewDelegationToken)

[2/2] hadoop git commit: HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. Contributed by Brahma Reddy Battula.

2015-05-14 Thread aajisaka
HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff. 
Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fb3486e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fb3486e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fb3486e

Branch: refs/heads/branch-2
Commit: 7fb3486eb33aa6537031312b08d452f2c480f302
Parents: 2e5b7f2
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 15 13:57:58 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 15 13:57:58 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |3 +
 .../src/main/docs/changes/ChangesFancyStyle.css |  170 --
 .../main/docs/changes/ChangesSimpleStyle.css|   49 -
 .../src/main/docs/changes/changes2html.pl   |  282 
 .../hadoop-hdfs/src/main/docs/releasenotes.html |1 -
 .../src/main/docs/src/documentation/README.txt  |7 -
 .../classes/CatalogManager.properties   |   40 -
 .../main/docs/src/documentation/conf/cli.xconf  |  327 
 .../src/documentation/content/xdocs/index.xml   |   46 -
 .../src/documentation/content/xdocs/site.xml|  289 
 .../src/documentation/content/xdocs/tabs.xml|   37 -
 .../src/documentation/content/xdocs/webhdfs.xml | 1577 --
 .../resources/images/FI-framework.gif   |  Bin 30985 - 0 bytes
 .../resources/images/FI-framework.odg   |  Bin 80461 - 0 bytes
 .../resources/images/architecture.gif   |  Bin 15461 - 0 bytes
 .../resources/images/core-logo.gif  |  Bin 6665 - 0 bytes
 .../documentation/resources/images/favicon.ico  |  Bin 766 - 0 bytes
 .../resources/images/hadoop-logo-big.jpg|  Bin 127869 - 0 bytes
 .../resources/images/hadoop-logo.jpg|  Bin 9443 - 0 bytes
 .../resources/images/request-identify.jpg   |  Bin 39731 - 0 bytes
 .../main/docs/src/documentation/skinconf.xml|  366 
 .../hadoop-hdfs/src/main/docs/status.xml|   75 -
 22 files changed, 3 insertions(+), 3266 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fb3486e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a6ef0d5..3105bb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -216,6 +216,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6184. Capture NN's thread dump when it fails over.
 (Ming Ma via aajisaka)
 
+HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
+(Brahma Reddy Battula via aajisaka)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fb3486e/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
deleted file mode 100644
index 5eef241..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/changes/ChangesFancyStyle.css
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the License); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an AS IS BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-/**
- * General
- */
-
-img { border: 0; }
-
-#content table {
-  border: 0;
-  width: 100%;
-}
-/*Hack to get IE to render the table at 100%*/
-* html #content table { margin-left: -3px; }
-
-#content th,
-#content td {
-  margin: 0;
-  padding: 0;
-  vertical-align: top;
-}
-
-.clearboth {
-  clear: both;
-}
-
-.note, .warning, .fixme {
-  border: solid black 1px;
-  margin: 1em 3em;
-}
-
-.note .label {
-  background: #369;
-  color: white;
-  font-weight: bold;
-  padding: 5px 10px;
-}
-.note .content {
-  background: #F0F0FF;
-  color: black;
-  line-height: 120%;
-  font-size: 90%;
-  padding: 5px 10px;
-}
-.warning .label {
-  

hadoop git commit: HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver properties. Contributed by Ray Chiang.

2015-05-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9a2a9553e - cbc01ed08


HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver 
properties. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbc01ed0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbc01ed0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbc01ed0

Branch: refs/heads/trunk
Commit: cbc01ed08ea36f70afca6112ccdbf7331567070b
Parents: 9a2a955
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 15 12:14:03 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 15 12:14:47 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java  | 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc01ed0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4df18ec..b90a773 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -761,6 +761,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8150. Make getFileChecksum fail for blocks under construction
 (J.Andreina via vinayakumarb)
 
+HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver
+properties. (Ray Chiang via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbc01ed0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index a1f8a3c..ec0450a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -76,5 +76,9 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
 
 // Some properties have moved to HdfsClientConfigKeys
 
xmlPropsToSkipCompare.add(dfs.client.short.circuit.replica.stale.threshold.ms);
+
+// Ignore SpanReceiveHost properties
+xmlPropsToSkipCompare.add(dfs.htrace.spanreceiver.classes);
+xmlPropsToSkipCompare.add(dfs.client.htrace.spanreceiver.classes);
   }
 }



hadoop git commit: HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver properties. Contributed by Ray Chiang.

2015-05-14 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ad3196e01 - 2e5b7f24a


HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver 
properties. Contributed by Ray Chiang.

(cherry picked from commit cbc01ed08ea36f70afca6112ccdbf7331567070b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e5b7f24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e5b7f24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e5b7f24

Branch: refs/heads/branch-2
Commit: 2e5b7f24ab6bedb2544563c80c40b5edba66f920
Parents: ad3196e
Author: Akira Ajisaka aajis...@apache.org
Authored: Fri May 15 12:14:03 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Fri May 15 12:15:26 2015 +0900

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java  | 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5b7f24/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5961d34..a6ef0d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -431,6 +431,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8150. Make getFileChecksum fail for blocks under construction
 (J.Andreina via vinayakumarb)
 
+HDFS-8371. Fix test failure in TestHdfsConfigFields for spanreceiver
+properties. (Ray Chiang via aajisaka)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e5b7f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index a1f8a3c..ec0450a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -76,5 +76,9 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
 
 // Some properties have moved to HdfsClientConfigKeys
 
xmlPropsToSkipCompare.add(dfs.client.short.circuit.replica.stale.threshold.ms);
+
+// Ignore SpanReceiveHost properties
+xmlPropsToSkipCompare.add(dfs.htrace.spanreceiver.classes);
+xmlPropsToSkipCompare.add(dfs.client.htrace.spanreceiver.classes);
   }
 }



[2/2] hadoop git commit: HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by Dushyanth.

2015-05-14 Thread cnauroth
HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by 
Dushyanth.

(cherry picked from commit cb8e69a80cecb95abdfc93a787bea0bedef275ed)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91855c23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91855c23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91855c23

Branch: refs/heads/branch-2
Commit: 91855c23404c36dc08d240a621098ccfb43c909d
Parents: 7fb3486
Author: cnauroth cnaur...@apache.org
Authored: Thu May 14 22:22:24 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Thu May 14 22:22:46 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../fs/azure/AzureNativeFileSystemStore.java|   5 +
 .../TestNativeAzureFileSystemClientLogging.java | 130 +++
 3 files changed, 138 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91855c23/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 412719d..438f1a2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -98,6 +98,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11713. ViewFileSystem should support snapshot methods.
 (Rakesh R via cnauroth)
 
+HADOOP-11960. Enable Azure-Storage Client Side logging.
+(Dushyanth via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91855c23/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 5dc0963..3267d8b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -148,6 +148,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   private static final String KEY_SELF_THROTTLE_READ_FACTOR = 
fs.azure.selfthrottling.read.factor;
   private static final String KEY_SELF_THROTTLE_WRITE_FACTOR = 
fs.azure.selfthrottling.write.factor;
 
+  private static final String KEY_ENABLE_STORAGE_CLIENT_LOGGING = 
fs.azure.storage.client.logging;
+
   private static final String PERMISSION_METADATA_KEY = hdi_permission;
   private static final String OLD_PERMISSION_METADATA_KEY = asv_permission;
   private static final String IS_FOLDER_METADATA_KEY = hdi_isfolder;
@@ -681,6 +683,9 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 selfThrottlingWriteFactor = sessionConfiguration.getFloat(
 KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
 
+OperationContext.setLoggingEnabledByDefault(sessionConfiguration.
+getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
+
 if (LOG.isDebugEnabled()) {
   LOG.debug(String
   .format(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91855c23/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
new file mode 100644
index 000..da39fa3
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language 

[1/2] hadoop git commit: HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by Dushyanth.

2015-05-14 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7fb3486eb - 91855c234
  refs/heads/trunk ee7beda6e - cb8e69a80


HADOOP-11960. Enable Azure-Storage Client Side logging. Contributed by 
Dushyanth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb8e69a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb8e69a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb8e69a8

Branch: refs/heads/trunk
Commit: cb8e69a80cecb95abdfc93a787bea0bedef275ed
Parents: ee7beda
Author: cnauroth cnaur...@apache.org
Authored: Thu May 14 22:22:24 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Thu May 14 22:22:24 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 .../fs/azure/AzureNativeFileSystemStore.java|   5 +
 .../TestNativeAzureFileSystemClientLogging.java | 130 +++
 3 files changed, 138 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2f8acb0..aecfde4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -569,6 +569,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11713. ViewFileSystem should support snapshot methods.
 (Rakesh R via cnauroth)
 
+HADOOP-11960. Enable Azure-Storage Client Side logging.
+(Dushyanth via cnauroth)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 5dc0963..3267d8b 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -148,6 +148,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   private static final String KEY_SELF_THROTTLE_READ_FACTOR = 
fs.azure.selfthrottling.read.factor;
   private static final String KEY_SELF_THROTTLE_WRITE_FACTOR = 
fs.azure.selfthrottling.write.factor;
 
+  private static final String KEY_ENABLE_STORAGE_CLIENT_LOGGING = 
fs.azure.storage.client.logging;
+
   private static final String PERMISSION_METADATA_KEY = hdi_permission;
   private static final String OLD_PERMISSION_METADATA_KEY = asv_permission;
   private static final String IS_FOLDER_METADATA_KEY = hdi_isfolder;
@@ -681,6 +683,9 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 selfThrottlingWriteFactor = sessionConfiguration.getFloat(
 KEY_SELF_THROTTLE_WRITE_FACTOR, DEFAULT_SELF_THROTTLE_WRITE_FACTOR);
 
+OperationContext.setLoggingEnabledByDefault(sessionConfiguration.
+getBoolean(KEY_ENABLE_STORAGE_CLIENT_LOGGING, false));
+
 if (LOG.isDebugEnabled()) {
   LOG.debug(String
   .format(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb8e69a8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
new file mode 100644
index 000..da39fa3
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemClientLogging.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 

hadoop git commit: HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)

2015-05-14 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 91855c234 - d9455c790


HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)

(cherry picked from commit 7f2e89fa7082840bfa3e8e593c93db050a80d04f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9455c79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9455c79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9455c79

Branch: refs/heads/branch-2
Commit: d9455c790f6a7539ba411280bd836945977e39ab
Parents: 91855c2
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 15 11:05:01 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 15 11:05:26 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../src/main/resources/hdfs-default.xml |   9 ++
 .../server/namenode/TestAuditLogAtDebug.java| 131 +++
 5 files changed, 152 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9455c79/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3105bb4..62b1d97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -219,6 +219,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
 (Brahma Reddy Battula via aajisaka)
 
+HDFS-6888. Allow selectively audit logging ops (Chen He via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9455c79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7d2a25b..3669685 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -339,6 +339,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT 
= false;
   public static final String  DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = 
dfs.namenode.audit.log.async;
   public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
+  public static final String  DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = 
dfs.namenode.audit.log.debug.cmdlist;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = 
dfs.balancer.movedWinWidth;
   public static final longDFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9455c79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bf92c71..8fe32fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8143,15 +8143,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* defined in the config file. It can also be explicitly listed in the
* config file.
*/
-  private static class DefaultAuditLogger extends HdfsAuditLogger {
+  @VisibleForTesting
+  static class DefaultAuditLogger extends HdfsAuditLogger {
 
 private boolean logTokenTrackingId;
+private SetString debugCmdSet = new HashSetString();
 
 @Override
 public void initialize(Configuration conf) {
   logTokenTrackingId = conf.getBoolean(
   DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
   DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT);
+
+  debugCmdSet.addAll(Arrays.asList(conf.getTrimmedStrings(
+  DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST)));
 }
 
 @Override
@@ -8159,7 +8164,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 InetAddress addr, String cmd, String src, String dst,
 

hadoop git commit: HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)

2015-05-14 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb8e69a80 - 3bef7c80a


HDFS-6888. Allow selectively audit logging ops (Contributed by Chen He)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3bef7c80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3bef7c80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3bef7c80

Branch: refs/heads/trunk
Commit: 3bef7c80a97709b367781180b2e11fc50653d3c8
Parents: cb8e69a
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri May 15 11:05:01 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Fri May 15 11:07:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../src/main/resources/hdfs-default.xml |   9 ++
 .../server/namenode/TestAuditLogAtDebug.java| 131 +++
 5 files changed, 152 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 445b7c2..6c0923c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -552,6 +552,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-8350. Remove old webhdfs.xml and other outdated documentation stuff.
 (Brahma Reddy Battula via aajisaka)
 
+HDFS-6888. Allow selectively audit logging ops (Chen He via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ae056fa..1d0cf4b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -341,6 +341,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT 
= false;
   public static final String  DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = 
dfs.namenode.audit.log.async;
   public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
+  public static final String  DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = 
dfs.namenode.audit.log.debug.cmdlist;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = 
dfs.balancer.movedWinWidth;
   public static final longDFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3bef7c80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 33aaa72..4d82fab 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8149,15 +8149,20 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* defined in the config file. It can also be explicitly listed in the
* config file.
*/
-  private static class DefaultAuditLogger extends HdfsAuditLogger {
+  @VisibleForTesting
+  static class DefaultAuditLogger extends HdfsAuditLogger {
 
 private boolean logTokenTrackingId;
+private SetString debugCmdSet = new HashSetString();
 
 @Override
 public void initialize(Configuration conf) {
   logTokenTrackingId = conf.getBoolean(
   DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY,
   DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT);
+
+  debugCmdSet.addAll(Arrays.asList(conf.getTrimmedStrings(
+  DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST)));
 }
 
 @Override
@@ -8165,7 +8170,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 InetAddress addr, String cmd, String src, String dst,
 FileStatus status, UserGroupInformation ugi,
 

[1/2] hadoop git commit: HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by Suresh Srinivas)

2015-05-14 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4b5915be9 - 085841d9a
  refs/heads/trunk b2c85db86 - 0daa5ada6


HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by 
Suresh Srinivas)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0daa5ada
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0daa5ada
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0daa5ada

Branch: refs/heads/trunk
Commit: 0daa5ada68db483275aaa7f2ed9a2b5eaf5bb9bd
Parents: b2c85db
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 14:17:36 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 14:17:36 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../src/main/java/org/apache/hadoop/fs/Path.java| 12 ++--
 2 files changed, 5 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0daa5ada/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5e6b9ea..bf39c94 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -677,6 +677,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-8174. Remove confusing comment in Path#isAbsolute()
+(Suresh Srinivas via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0daa5ada/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index caeb7a1..a38a46c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /** Names a file or directory in a {@link FileSystem}.
- * Path strings use slash as the directory separator.  A path string is
- * absolute if it begins with a slash.
+ * Path strings use slash as the directory separator.
  */
 @Stringable
 @InterfaceAudience.Public
@@ -312,14 +311,7 @@ public class Path implements Comparable {
 return uri.getPath().startsWith(SEPARATOR, start);
}
   
-  /** True if the path component of this URI is absolute. */
-  /**
-   * There is some ambiguity here. An absolute path is a slash
-   * relative name without a scheme or an authority.
-   * So either this method was incorrectly named or its
-   * implementation is incorrect. This method returns true
-   * even if there is a scheme and authority.
-   */
+  /** True if the path is not a relative path and starts with root. */
   public boolean isAbsolute() {
  return isUriPathAbsolute();
   }



[2/2] hadoop git commit: HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by Suresh Srinivas)

2015-05-14 Thread vinayakumarb
HADOOP-8174. Remove confusing comment in Path#isAbsolute() (Contributed by 
Suresh Srinivas)

(cherry picked from commit 0daa5ada68db483275aaa7f2ed9a2b5eaf5bb9bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/085841d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/085841d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/085841d9

Branch: refs/heads/branch-2
Commit: 085841d9ac7325a19a7db878654d331adae724c3
Parents: 4b5915b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 14:17:36 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 14:18:00 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../src/main/java/org/apache/hadoop/fs/Path.java| 12 ++--
 2 files changed, 5 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/085841d9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 583db4c..e43540b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -215,6 +215,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11361. Fix a race condition in MetricsSourceAdapter.updateJmxCache.
 (Brahma Reddy Battula via ozawa)
 
+HADOOP-8174. Remove confusing comment in Path#isAbsolute()
+(Suresh Srinivas via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/085841d9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index caeb7a1..a38a46c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /** Names a file or directory in a {@link FileSystem}.
- * Path strings use slash as the directory separator.  A path string is
- * absolute if it begins with a slash.
+ * Path strings use slash as the directory separator.
  */
 @Stringable
 @InterfaceAudience.Public
@@ -312,14 +311,7 @@ public class Path implements Comparable {
 return uri.getPath().startsWith(SEPARATOR, start);
}
   
-  /** True if the path component of this URI is absolute. */
-  /**
-   * There is some ambiguity here. An absolute path is a slash
-   * relative name without a scheme or an authority.
-   * So either this method was incorrectly named or its
-   * implementation is incorrect. This method returns true
-   * even if there is a scheme and authority.
-   */
+  /** True if the path is not a relative path and starts with root. */
   public boolean isAbsolute() {
  return isUriPathAbsolute();
   }



hadoop git commit: HADOOP-10993. Dump java command line to *.out file (Contributed by Kengo Seki)

2015-05-14 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0daa5ada6 - ffbb57462


HADOOP-10993. Dump java command line to *.out file (Contributed by Kengo Seki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffbb5746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffbb5746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffbb5746

Branch: refs/heads/trunk
Commit: ffbb574623c2a1dbcead201e9ae2dad3f77998d0
Parents: 0daa5ad
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 15:24:35 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 15:24:35 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop-common/src/main/bin/hadoop-functions.sh  | 12 
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffbb5746/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bf39c94..359a38b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -215,6 +215,9 @@ Trunk (Unreleased)
 HADOOP-11590. Update sbin commands and documentation to use new --slaves
 option (aw)
 
+HADOOP-10993. Dump java command line to *.out file
+(Kengo Seki via vinayakumarb)
+
   BUG FIXES
 
 HADOOP-11473. test-patch says -1 overall even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffbb5746/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 85f8200..67e8870 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1148,6 +1148,10 @@ function hadoop_java_exec
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JAVA_HOME: ${JAVA_HOME}
+  hadoop_debug java: ${JAVA}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   export CLASSPATH
   #shellcheck disable=SC2086
@@ -1174,6 +1178,10 @@ function hadoop_start_daemon
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JAVA_HOME: ${JAVA_HOME}
+  hadoop_debug java: ${JAVA}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   # this is for the non-daemon pid creation
   #shellcheck disable=SC2086
@@ -1300,6 +1308,10 @@ function hadoop_start_secure_daemon
 
   hadoop_debug Final CLASSPATH: ${CLASSPATH}
   hadoop_debug Final HADOOP_OPTS: ${HADOOP_OPTS}
+  hadoop_debug Final JSVC_HOME: ${JSVC_HOME}
+  hadoop_debug jsvc: ${jsvc}
+  hadoop_debug Class name: ${class}
+  hadoop_debug Command line options: $*
 
   #shellcheck disable=SC2086
   echo $$  ${privpidfile} 2/dev/null



[1/2] hadoop git commit: HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed by J.Andreina)

2015-05-14 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 085841d9a - 4bbcffa51
  refs/heads/trunk ffbb57462 - def9136e0


HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed 
by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def9136e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def9136e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def9136e

Branch: refs/heads/trunk
Commit: def9136e0259e118e6fd7b656260765d28ac9ae6
Parents: ffbb574
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 15:54:51 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 15:54:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java   |  8 
 .../apache/hadoop/hdfs/TestGetFileChecksum.java  | 19 +++
 .../snapshot/TestSnapshotFileLength.java | 17 +
 4 files changed, 43 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0e6508b..4df18ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -758,6 +758,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7728. Avoid updating quota usage while loading edits.
 (Jing Zhao via wheat9)
 
+HDFS-8150. Make getFileChecksum fail for blocks under construction
+(J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 191ebc9..7908451 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1872,6 +1872,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 ListLocatedBlock locatedblocks = blockLocations.getLocatedBlocks();
 final DataOutputBuffer md5out = new DataOutputBuffer();
 int bytesPerCRC = -1;
@@ -1891,6 +1895,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 locatedblocks = blockLocations.getLocatedBlocks();
 refetchBlocks = false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def9136e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
index 0e56ba7..814261f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.junit.After;
@@ -68,6 +73,20 @@ public class TestGetFileChecksum {
   }
 
   @Test
+  public void testGetFileChecksumForBlocksUnderConstruction() {
+try {
+  FSDataOutputStream file = dfs.create(new Path(/testFile));
+  file.write(Performance Testing.getBytes());
+  dfs.getFileChecksum(new Path(/testFile));
+  fail(getFileChecksum should fail for files 
+  + with blocks under construction);
+} catch 

[2/2] hadoop git commit: HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed by J.Andreina)

2015-05-14 Thread vinayakumarb
HDFS-8150. Make getFileChecksum fail for blocks under construction (Contributed 
by J.Andreina)

(cherry picked from commit def9136e0259e118e6fd7b656260765d28ac9ae6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bbcffa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bbcffa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bbcffa5

Branch: refs/heads/branch-2
Commit: 4bbcffa510f7f93822c68204c408a1acebf431bc
Parents: 085841d
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu May 14 15:54:51 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu May 14 15:55:20 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java   |  8 
 .../apache/hadoop/hdfs/TestGetFileChecksum.java  | 19 +++
 .../snapshot/TestSnapshotFileLength.java | 17 +
 4 files changed, 43 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bbcffa5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 17ee336..5961d34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -428,6 +428,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7728. Avoid updating quota usage while loading edits.
 (Jing Zhao via wheat9)
 
+HDFS-8150. Make getFileChecksum fail for blocks under construction
+(J.Andreina via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bbcffa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 216c843..60c53b0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1871,6 +1871,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 ListLocatedBlock locatedblocks = blockLocations.getLocatedBlocks();
 final DataOutputBuffer md5out = new DataOutputBuffer();
 int bytesPerCRC = -1;
@@ -1890,6 +1894,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (null == blockLocations) {
   throw new FileNotFoundException(File does not exist:  + src);
 }
+if (blockLocations.isUnderConstruction()) {
+  throw new IOException(Fail to get checksum, since file  + src
+  +  is under construction.);
+}
 locatedblocks = blockLocations.getLocatedBlocks();
 refetchBlocks = false;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bbcffa5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
index 0e56ba7..814261f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.junit.After;
@@ -68,6 +73,20 @@ public class TestGetFileChecksum {
   }
 
   @Test
+  public void testGetFileChecksumForBlocksUnderConstruction() {
+try {
+  FSDataOutputStream file = dfs.create(new Path(/testFile));
+  file.write(Performance Testing.getBytes());
+  dfs.getFileChecksum(new Path(/testFile));
+  fail(getFileChecksum should fail for files 
+  + with blocks under construction);
+} catch (IOException ie) {
+