hadoop git commit: YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info to debug. Contributed by Anubhav Dhoot.

2015-04-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2daa478a6 - c69ba8149


YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info 
to debug. Contributed by Anubhav Dhoot.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c69ba814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c69ba814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c69ba814

Branch: refs/heads/trunk
Commit: c69ba81497ae4da329ddb34ba712a64a7eec479f
Parents: 2daa478
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 1 17:44:25 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 1 17:44:25 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../monitor/ContainersMonitorImpl.java | 13 -
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c69ba814/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f6093e2..55004ee 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -91,6 +91,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3428. Debug log resources to be localized for a container. (kasha)
 
+YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
+from info to debug. (Anubhav Dhoot via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c69ba814/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 5153051..cce749e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -455,11 +455,14 @@ public class ContainersMonitorImpl extends 
AbstractService implements
 long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
 long vmemLimit = ptInfo.getVmemLimit();
 long pmemLimit = ptInfo.getPmemLimit();
-LOG.info(String.format(
-Memory usage of ProcessTree %s for container-id %s: ,
- pId, containerId.toString()) +
-formatUsageString(
-currentVmemUsage, vmemLimit, currentPmemUsage, pmemLimit));
+if (LOG.isDebugEnabled()) {
+  LOG.debug(String.format(
+  Memory usage of ProcessTree %s for container-id %s: ,
+  pId, containerId.toString()) +
+  formatUsageString(
+  currentVmemUsage, vmemLimit,
+  currentPmemUsage, pmemLimit));
+}
 
 // Add usage to container metrics
 if (containerMetricsEnabled) {



hadoop git commit: YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info to debug. Contributed by Anubhav Dhoot.

2015-04-01 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 93b29d962 - 80755edb7


YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info 
to debug. Contributed by Anubhav Dhoot.

(cherry picked from commit c69ba81497ae4da329ddb34ba712a64a7eec479f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80755edb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80755edb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80755edb

Branch: refs/heads/branch-2
Commit: 80755edb7aab0c1827ccf51aca1c0c69d8f721c0
Parents: 93b29d9
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 1 17:44:25 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Wed Apr 1 17:44:40 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../monitor/ContainersMonitorImpl.java | 13 -
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/80755edb/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 30c1826..9318db0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -43,6 +43,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3428. Debug log resources to be localized for a container. (kasha)
 
+YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
+from info to debug. (Anubhav Dhoot via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/80755edb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 5153051..cce749e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -455,11 +455,14 @@ public class ContainersMonitorImpl extends 
AbstractService implements
 long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
 long vmemLimit = ptInfo.getVmemLimit();
 long pmemLimit = ptInfo.getPmemLimit();
-LOG.info(String.format(
-Memory usage of ProcessTree %s for container-id %s: ,
- pId, containerId.toString()) +
-formatUsageString(
-currentVmemUsage, vmemLimit, currentPmemUsage, pmemLimit));
+if (LOG.isDebugEnabled()) {
+  LOG.debug(String.format(
+  Memory usage of ProcessTree %s for container-id %s: ,
+  pId, containerId.toString()) +
+  formatUsageString(
+  currentVmemUsage, vmemLimit,
+  currentPmemUsage, pmemLimit));
+}
 
 // Add usage to container metrics
 if (containerMetricsEnabled) {



hadoop git commit: YARN-3248. Display count of nodes blacklisted by apps in the web UI. Contributed by Varun Vasudev

2015-04-01 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 80755edb7 - e26b6e55e


YARN-3248. Display count of nodes blacklisted by apps in the web UI.
Contributed by Varun Vasudev

(cherry picked from commit 4728bdfa15809db4b8b235faa286c65de4a48cf6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e26b6e55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e26b6e55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e26b6e55

Branch: refs/heads/branch-2
Commit: e26b6e55e96b763063dfbd39977096367eafc1e3
Parents: 80755ed
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:19:18 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Wed Apr 1 04:20:37 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/server/webapp/AppAttemptBlock.java |  66 +
 .../hadoop/yarn/server/webapp/AppBlock.java |  37 +++--
 .../hadoop/yarn/server/webapp/AppsBlock.java|  85 ++-
 .../scheduler/AppSchedulingInfo.java|   4 +
 .../scheduler/SchedulerApplicationAttempt.java  |   4 +
 .../webapp/AppsBlockWithMetrics.java|   2 +-
 .../webapp/CapacitySchedulerPage.java   |   2 +-
 .../webapp/RMAppAttemptBlock.java   |  67 +
 .../resourcemanager/webapp/RMAppBlock.java  | 110 ++
 .../resourcemanager/webapp/RMAppsBlock.java | 146 +++
 .../resourcemanager/webapp/RMWebServices.java   |   3 +-
 .../webapp/dao/AppAttemptInfo.java  |  19 ++-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 14 files changed, 468 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e26b6e55/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 9318db0..e7c07a1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -819,6 +819,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e26b6e55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index dca39d6..8df94e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -72,7 +72,7 @@ public class AppAttemptBlock extends HtmlBlock {
 }
 
 UserGroupInformation callerUGI = getCallerUGI();
-ApplicationAttemptReport appAttemptReport = null;
+ApplicationAttemptReport appAttemptReport;
 try {
   final GetApplicationAttemptReportRequest request =
   GetApplicationAttemptReportRequest.newInstance(appAttemptId);
@@ -135,34 +135,7 @@ public class AppAttemptBlock extends HtmlBlock {
  appAttempt.getRpcPort()  65536) {
   node = appAttempt.getHost() + : + appAttempt.getRpcPort();
 }
-info(Application Attempt Overview)
-  ._(
-Application Attempt State:,
-appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
-  .getAppAttemptState())
-  ._(
-AM Container:,
-appAttempt.getAmContainerId() == null || containers == null
-|| !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-? null : root_url(container, appAttempt.getAmContainerId()),
-String.valueOf(appAttempt.getAmContainerId()))
-  ._(Node:, node)
-  ._(
-Tracking URL:,
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE ? null
-: root_url(appAttempt.getTrackingUrl()),
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE
-? Unassigned
-: appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.FINISHED
-|| appAttempt.getAppAttemptState() == 

hadoop git commit: YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.

2015-04-01 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e26b6e55e - 5e4d45767


YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.

(cherry picked from commit 2e79f1c2125517586c165a84e99d3c4d38ca0938)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e4d4576
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e4d4576
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e4d4576

Branch: refs/heads/branch-2
Commit: 5e4d457674d4c72faed5beb32c0ee69b324d0825
Parents: e26b6e5
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:32:11 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Wed Apr 1 04:33:32 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e4d4576/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e7c07a1..f3b9ce4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -46,6 +46,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
 from info to debug. (Anubhav Dhoot via ozawa)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -819,9 +822,6 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
-YARN-3248. Display count of nodes blacklisted by apps in the web UI.
-(Varun Vasudev via xgong)
-
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.

2015-04-01 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4728bdfa1 - 2e79f1c21


YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e79f1c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e79f1c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e79f1c2

Branch: refs/heads/trunk
Commit: 2e79f1c2125517586c165a84e99d3c4d38ca0938
Parents: 4728bdf
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:32:11 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Wed Apr 1 04:32:11 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e79f1c2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d2a1ae3..2888a65 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
 from info to debug. (Anubhav Dhoot via ozawa)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -864,9 +867,6 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
-YARN-3248. Display count of nodes blacklisted by apps in the web UI.
-(Varun Vasudev via xgong)
-
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



hadoop git commit: YARN-3248. Display count of nodes blacklisted by apps in the web UI. Contributed by Varun Vasudev

2015-04-01 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk c69ba8149 - 4728bdfa1


YARN-3248. Display count of nodes blacklisted by apps in the web UI.
Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4728bdfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4728bdfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4728bdfa

Branch: refs/heads/trunk
Commit: 4728bdfa15809db4b8b235faa286c65de4a48cf6
Parents: c69ba81
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:19:18 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Wed Apr 1 04:19:18 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/server/webapp/AppAttemptBlock.java |  66 +
 .../hadoop/yarn/server/webapp/AppBlock.java |  37 +++--
 .../hadoop/yarn/server/webapp/AppsBlock.java|  85 ++-
 .../scheduler/AppSchedulingInfo.java|   4 +
 .../scheduler/SchedulerApplicationAttempt.java  |   4 +
 .../webapp/AppsBlockWithMetrics.java|   2 +-
 .../webapp/CapacitySchedulerPage.java   |   2 +-
 .../webapp/RMAppAttemptBlock.java   |  67 +
 .../resourcemanager/webapp/RMAppBlock.java  | 110 ++
 .../resourcemanager/webapp/RMAppsBlock.java | 146 +++
 .../resourcemanager/webapp/RMWebServices.java   |   3 +-
 .../webapp/dao/AppAttemptInfo.java  |  19 ++-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 14 files changed, 468 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4728bdfa/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 55004ee..d2a1ae3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -864,6 +864,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4728bdfa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index dca39d6..8df94e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -72,7 +72,7 @@ public class AppAttemptBlock extends HtmlBlock {
 }
 
 UserGroupInformation callerUGI = getCallerUGI();
-ApplicationAttemptReport appAttemptReport = null;
+ApplicationAttemptReport appAttemptReport;
 try {
   final GetApplicationAttemptReportRequest request =
   GetApplicationAttemptReportRequest.newInstance(appAttemptId);
@@ -135,34 +135,7 @@ public class AppAttemptBlock extends HtmlBlock {
  appAttempt.getRpcPort()  65536) {
   node = appAttempt.getHost() + : + appAttempt.getRpcPort();
 }
-info(Application Attempt Overview)
-  ._(
-Application Attempt State:,
-appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
-  .getAppAttemptState())
-  ._(
-AM Container:,
-appAttempt.getAmContainerId() == null || containers == null
-|| !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-? null : root_url(container, appAttempt.getAmContainerId()),
-String.valueOf(appAttempt.getAmContainerId()))
-  ._(Node:, node)
-  ._(
-Tracking URL:,
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE ? null
-: root_url(appAttempt.getTrackingUrl()),
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE
-? Unassigned
-: appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.FINISHED
-|| appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.FAILED
-|| 

[2/2] hadoop git commit: Update CHANGES-HDFS-EC-7285.txt

2015-04-01 Thread drankye
Update CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/455b89d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/455b89d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/455b89d0

Branch: refs/heads/HDFS-7285
Commit: 455b89d069684e9ac76b2a88106ad60666cac5e4
Parents: 2f9119a
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 2 05:15:58 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Thu Apr 2 05:15:58 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/455b89d0/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b69e69a..01280db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -29,3 +29,6 @@
 
 HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
 ( Kai Zheng )
+
+HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
+( Xinwei Qin via Kai Zheng )



hadoop git commit: HDFS-8036. Use snapshot path as source when using snapshot diff report in DistCp. Contributed by Jing Zhao.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6f753da4a - 9e114ee60


HDFS-8036. Use snapshot path as source when using snapshot diff report in 
DistCp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e114ee6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e114ee6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e114ee6

Branch: refs/heads/branch-2
Commit: 9e114ee6078ce5c427275c1c4d05edd131c7051c
Parents: 6f753da
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:50:59 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 16:51:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 21 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 40 +++-
 4 files changed, 63 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e114ee6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8139b02..8f9fcd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1041,6 +1041,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7748. Separate ECN flags from the Status in the 
DataTransferPipelineAck.
 (Anu Engineer and Haohui Mai via wheat9)
 
+HDFS-8036. Use snapshot path as source when using snapshot diff report in
+DistCp. (Jing Zhao via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e114ee6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 26d7eb4..8e71b6f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 
 import java.io.IOException;
@@ -86,6 +87,22 @@ class DistCpSync {
 } finally {
   deleteTargetTmpDir(targetFs, tmpDir);
   // TODO: since we have tmp directory, we can support undo with failures
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
+}
+  }
+
+  private static String getSnapshotName(String name) {
+return Path.CUR_DIR.equals(name) ?  : name;
+  }
+
+  private static Path getSourceSnapshotPath(Path sourceDir, String 
snapshotName) {
+if (Path.CUR_DIR.equals(snapshotName)) {
+  return sourceDir;
+} else {
+  return new Path(sourceDir,
+  HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName);
 }
   }
 
@@ -136,8 +153,10 @@ class DistCpSync {
   static DiffInfo[] getDiffs(DistCpOptions inputOptions,
   DistributedFileSystem fs, Path sourceDir, Path targetDir) {
 try {
+  final String from = getSnapshotName(inputOptions.getFromSnapshot());
+  final String to = getSnapshotName(inputOptions.getToSnapshot());
   SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
-  inputOptions.getFromSnapshot(), inputOptions.getToSnapshot());
+  from, to);
   return DiffInfo.getDiffs(sourceDiff, targetDir);
 } catch (IOException e) {
   DistCp.LOG.warn(Failed to compute snapshot diff on  + sourceDir, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e114ee6/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 9ec57f4..2b1e510 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 

hadoop git commit: HDFS-8036. Use snapshot path as source when using snapshot diff report in DistCp. Contributed by Jing Zhao.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3c7adaaf3 - 75cb1d42a


HDFS-8036. Use snapshot path as source when using snapshot diff report in 
DistCp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75cb1d42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75cb1d42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75cb1d42

Branch: refs/heads/trunk
Commit: 75cb1d42abec54ef5484636e020949ceebe189e9
Parents: 3c7adaa
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:50:59 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 16:50:59 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 21 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 40 +++-
 4 files changed, 63 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75cb1d42/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f265ead..1d9e200 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1345,6 +1345,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7748. Separate ECN flags from the Status in the 
DataTransferPipelineAck.
 (Anu Engineer and Haohui Mai via wheat9)
 
+HDFS-8036. Use snapshot path as source when using snapshot diff report in
+DistCp. (Jing Zhao via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75cb1d42/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 26d7eb4..8e71b6f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 
 import java.io.IOException;
@@ -86,6 +87,22 @@ class DistCpSync {
 } finally {
   deleteTargetTmpDir(targetFs, tmpDir);
   // TODO: since we have tmp directory, we can support undo with failures
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
+}
+  }
+
+  private static String getSnapshotName(String name) {
+return Path.CUR_DIR.equals(name) ?  : name;
+  }
+
+  private static Path getSourceSnapshotPath(Path sourceDir, String 
snapshotName) {
+if (Path.CUR_DIR.equals(snapshotName)) {
+  return sourceDir;
+} else {
+  return new Path(sourceDir,
+  HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName);
 }
   }
 
@@ -136,8 +153,10 @@ class DistCpSync {
   static DiffInfo[] getDiffs(DistCpOptions inputOptions,
   DistributedFileSystem fs, Path sourceDir, Path targetDir) {
 try {
+  final String from = getSnapshotName(inputOptions.getFromSnapshot());
+  final String to = getSnapshotName(inputOptions.getToSnapshot());
   SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
-  inputOptions.getFromSnapshot(), inputOptions.getToSnapshot());
+  from, to);
   return DiffInfo.getDiffs(sourceDiff, targetDir);
 } catch (IOException e) {
   DistCp.LOG.warn(Failed to compute snapshot diff on  + sourceDir, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75cb1d42/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 9ec57f4..2b1e510 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 

hadoop git commit: HDFS-7922. ShortCircuitCache#close is not releasing ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)

2015-04-01 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk c94d594a5 - 3c7adaaf3


HDFS-7922. ShortCircuitCache#close is not releasing 
ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c7adaaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c7adaaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c7adaaf

Branch: refs/heads/trunk
Commit: 3c7adaaf3571c91fee80585472d2a81402a53e2b
Parents: c94d594
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Apr 1 16:02:39 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Apr 1 16:02:39 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/shortcircuit/ShortCircuitCache.java| 28 
 .../shortcircuit/TestShortCircuitCache.java |  2 +-
 3 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c7adaaf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b5591e0..f265ead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6945. BlockManager should remove a block from excessReplicateMap and
 decrement ExcessBlocks metric when the block is removed. (aajisaka)
 
+HDFS-7922. ShortCircuitCache#close is not releasing
+ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c7adaaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 73c52d5..d1ec3b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -916,6 +916,34 @@ public class ShortCircuitCache implements Closeable {
 } finally {
   lock.unlock();
 }
+
+releaserExecutor.shutdown();
+cleanerExecutor.shutdown();
+// wait for existing tasks to terminate
+try {
+  if (!releaserExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing SlotReleaserThreadPool to shutdown!);
+releaserExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  releaserExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for SlotReleaserThreadPool 
+  + to terminate, e);
+}
+
+// wait for existing tasks to terminate
+try {
+  if (!cleanerExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing CleanerThreadPool to shutdown!);
+cleanerExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  cleanerExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for CleanerThreadPool 
+  + to terminate, e);
+}
 IOUtils.cleanup(LOG, shmManager);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c7adaaf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7daabd0..7d26dee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -203,7 +203,7 @@ public class TestShortCircuitCache {
 cache.close();
   }
 
-  @Test(timeout=6)
+  @Test(timeout=10)
   public void testExpiry() throws Exception {
 final ShortCircuitCache cache =
 new ShortCircuitCache(2, 1, 1, 1000, 1, 1000, 0);



hadoop git commit: HDFS-7922. ShortCircuitCache#close is not releasing ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)

2015-04-01 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4b74aa718 - 6f753da4a


HDFS-7922. ShortCircuitCache#close is not releasing 
ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)

(cherry picked from commit 3c7adaaf3571c91fee80585472d2a81402a53e2b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f753da4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f753da4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f753da4

Branch: refs/heads/branch-2
Commit: 6f753da4a9bd2defcfe8f5d3558177c77865bb7e
Parents: 4b74aa7
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Apr 1 16:02:39 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Apr 1 16:11:44 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/shortcircuit/ShortCircuitCache.java| 28 
 .../shortcircuit/TestShortCircuitCache.java |  2 +-
 3 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f753da4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7f1640e..8139b02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6945. BlockManager should remove a block from excessReplicateMap and
 decrement ExcessBlocks metric when the block is removed. (aajisaka)
 
+HDFS-7922. ShortCircuitCache#close is not releasing
+ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f753da4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 73c52d5..d1ec3b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -916,6 +916,34 @@ public class ShortCircuitCache implements Closeable {
 } finally {
   lock.unlock();
 }
+
+releaserExecutor.shutdown();
+cleanerExecutor.shutdown();
+// wait for existing tasks to terminate
+try {
+  if (!releaserExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing SlotReleaserThreadPool to shutdown!);
+releaserExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  releaserExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for SlotReleaserThreadPool 
+  + to terminate, e);
+}
+
+// wait for existing tasks to terminate
+try {
+  if (!cleanerExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing CleanerThreadPool to shutdown!);
+cleanerExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  cleanerExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for CleanerThreadPool 
+  + to terminate, e);
+}
 IOUtils.cleanup(LOG, shmManager);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f753da4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7daabd0..7d26dee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -203,7 +203,7 @@ public class TestShortCircuitCache {
 cache.close();
   }
 
-  @Test(timeout=6)
+  @Test(timeout=10)
   public void testExpiry() throws Exception {
 final ShortCircuitCache cache =
 new ShortCircuitCache(2, 1, 1, 1000, 1, 1000, 0);



hadoop git commit: HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi Catherinot

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1bde06aca - a445f8b67


HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi 
Catherinot

(cherry picked from commit 4d14816c269f110445e1ad3e03ac53b0c1cdb58b)
(cherry picked from commit 27970b3f46674ef7824a4478141d968ef2c16ebb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a445f8b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a445f8b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a445f8b6

Branch: refs/heads/branch-2.7
Commit: a445f8b675e1bba3bfdac6ae36b8a89eaffd212a
Parents: 1bde06a
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:20:24 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:30:00 2015 -0700

--
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a445f8b6/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 268abba..161f3e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -187,7 +187,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 clientCache = new DFSClientCache(config);
 replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
 DFSConfigKeys.DFS_REPLICATION_DEFAULT);
-blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
 DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
 bufferSize = config.getInt(
 CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a445f8b6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 53896be..b4e2778 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -947,6 +947,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8036. Use snapshot path as source when using snapshot diff report in
 DistCp. (Jing Zhao via wheat9)
 
+HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize
+(Remi Catherinot via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B)

2015-04-01 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 27970b3f4 - beb0fd0d6


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)

(cherry picked from commit 867d5d2675b8fb73c40fac1e581b02b005459d95)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/beb0fd0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/beb0fd0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/beb0fd0d

Branch: refs/heads/branch-2
Commit: beb0fd0d601aff0ba993c2d48b83fe52edfb9065
Parents: 27970b3
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 2 08:12:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu Apr 2 08:12:51 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ReportBadBlockAction.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb0fd0d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fc81ae0..bae6148 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -97,6 +97,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/beb0fd0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index fd01a01..991b56d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -58,8 +59,11 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 dnArr, uuids, types) };
 
 try {
-  bpNamenode.reportBadBlocks(locatedBlock);  
-} catch (IOException e){
+  bpNamenode.reportBadBlocks(locatedBlock);
+} catch (StandbyException e) {
+  DataNode.LOG.warn(Failed to report bad block  + block
+  +  to standby namenode);
+} catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );
 }



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B)

2015-04-01 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk f383fd9b6 - 867d5d267


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/867d5d26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/867d5d26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/867d5d26

Branch: refs/heads/trunk
Commit: 867d5d2675b8fb73c40fac1e581b02b005459d95
Parents: f383fd9
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 2 08:12:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu Apr 2 08:12:00 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ReportBadBlockAction.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/867d5d26/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 518df9f..80d958d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -412,6 +412,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/867d5d26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index fd01a01..991b56d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -58,8 +59,11 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 dnArr, uuids, types) };
 
 try {
-  bpNamenode.reportBadBlocks(locatedBlock);  
-} catch (IOException e){
+  bpNamenode.reportBadBlocks(locatedBlock);
+} catch (StandbyException e) {
+  DataNode.LOG.warn(Failed to report bad block  + block
+  +  to standby namenode);
+} catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );
 }



hadoop git commit: HDFS-8008. Support client-side back off when the datanodes are congested. Contributed by Haohui Mai.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9e114ee60 - cfcf79549


HDFS-8008. Support client-side back off when the datanodes are congested. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfcf7954
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfcf7954
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfcf7954

Branch: refs/heads/branch-2
Commit: cfcf795492f960faa7891044cc79ea9d2051387b
Parents: 9e114ee
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:54:46 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 16:54:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 63 
 .../hdfs/protocol/datatransfer/PipelineAck.java |  4 ++
 .../apache/hadoop/hdfs/TestDFSOutputStream.java | 42 +
 4 files changed, 112 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfcf7954/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8f9fcd9..3bace16 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -559,6 +559,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7742. Favoring decommissioning node for replication can cause a block 
 to stay underreplicated for long periods (Nathan Roberts via kihwal)
 
+HDFS-8008. Support client-side back off when the datanodes are congested.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfcf7954/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 9c437ba..6ff4c24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -218,6 +218,13 @@ class DataStreamer extends Daemon {
   private boolean failPacket = false;
   private final long dfsclientSlowLogThresholdMs;
   private long artificialSlowdown = 0;
+  // List of congested data nodes. The stream will back off if the DataNodes
+  // are congested
+  private final ArrayListDatanodeInfo congestedNodes = new ArrayList();
+  private static final int CONGESTION_BACKOFF_MEAN_TIME_IN_MS = 5000;
+  private static final int CONGESTION_BACK_OFF_MAX_TIME_IN_MS =
+  CONGESTION_BACKOFF_MEAN_TIME_IN_MS * 10;
+  private int lastCongestionBackoffTime;
 
   private final LoadingCacheDatanodeInfo, DatanodeInfo excludedNodes;
 
@@ -386,6 +393,11 @@ class DataStreamer extends Daemon {
 one = createHeartbeatPacket();
 assert one != null;
   } else {
+try {
+  backOffIfNecessary();
+} catch (InterruptedException e) {
+  DFSClient.LOG.warn(Caught exception , e);
+}
 one = dataQueue.getFirst(); // regular data packet
 long parents[] = one.getTraceParents();
 if (parents.length  0) {
@@ -815,9 +827,14 @@ class DataStreamer extends Daemon {
 
   long seqno = ack.getSeqno();
   // processes response status from datanodes.
+  ArrayListDatanodeInfo congestedNodesFromAck = new ArrayList();
   for (int i = ack.getNumOfReplies()-1; i =0   
dfsClient.clientRunning; i--) {
 final Status reply = PipelineAck.getStatusFromHeader(ack
 .getHeaderFlag(i));
+if (PipelineAck.getECNFromHeader(ack.getHeaderFlag(i)) ==
+PipelineAck.ECN.CONGESTED) {
+  congestedNodesFromAck.add(targets[i]);
+}
 // Restart will not be treated differently unless it is
 // the local node or the only one in the pipeline.
 if (PipelineAck.isRestartOOBStatus(reply) 
@@ -839,6 +856,18 @@ class DataStreamer extends Daemon {
 }
   }
 
+  if (!congestedNodesFromAck.isEmpty()) {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  congestedNodes.addAll(congestedNodesFromAck);
+}
+  } else {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  lastCongestionBackoffTime = 0;
+}
+  }
+
   

hadoop git commit: HDFS-8008. Support client-side back off when the datanodes are congested. Contributed by Haohui Mai.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75cb1d42a - 6ccf4fbf8


HDFS-8008. Support client-side back off when the datanodes are congested. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ccf4fbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ccf4fbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ccf4fbf

Branch: refs/heads/trunk
Commit: 6ccf4fbf8a8374c289370f67b26ac05abad30ebc
Parents: 75cb1d4
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:54:46 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 16:54:46 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 63 
 .../hdfs/protocol/datatransfer/PipelineAck.java |  4 ++
 .../apache/hadoop/hdfs/TestDFSOutputStream.java | 42 +
 4 files changed, 112 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ccf4fbf/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1d9e200..34c0556 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -868,6 +868,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7742. Favoring decommissioning node for replication can cause a block 
 to stay underreplicated for long periods (Nathan Roberts via kihwal)
 
+HDFS-8008. Support client-side back off when the datanodes are congested.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ccf4fbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 9c437ba..6ff4c24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -218,6 +218,13 @@ class DataStreamer extends Daemon {
   private boolean failPacket = false;
   private final long dfsclientSlowLogThresholdMs;
   private long artificialSlowdown = 0;
+  // List of congested data nodes. The stream will back off if the DataNodes
+  // are congested
+  private final ArrayListDatanodeInfo congestedNodes = new ArrayList();
+  private static final int CONGESTION_BACKOFF_MEAN_TIME_IN_MS = 5000;
+  private static final int CONGESTION_BACK_OFF_MAX_TIME_IN_MS =
+  CONGESTION_BACKOFF_MEAN_TIME_IN_MS * 10;
+  private int lastCongestionBackoffTime;
 
   private final LoadingCacheDatanodeInfo, DatanodeInfo excludedNodes;
 
@@ -386,6 +393,11 @@ class DataStreamer extends Daemon {
 one = createHeartbeatPacket();
 assert one != null;
   } else {
+try {
+  backOffIfNecessary();
+} catch (InterruptedException e) {
+  DFSClient.LOG.warn(Caught exception , e);
+}
 one = dataQueue.getFirst(); // regular data packet
 long parents[] = one.getTraceParents();
 if (parents.length  0) {
@@ -815,9 +827,14 @@ class DataStreamer extends Daemon {
 
   long seqno = ack.getSeqno();
   // processes response status from datanodes.
+  ArrayListDatanodeInfo congestedNodesFromAck = new ArrayList();
   for (int i = ack.getNumOfReplies()-1; i =0   
dfsClient.clientRunning; i--) {
 final Status reply = PipelineAck.getStatusFromHeader(ack
 .getHeaderFlag(i));
+if (PipelineAck.getECNFromHeader(ack.getHeaderFlag(i)) ==
+PipelineAck.ECN.CONGESTED) {
+  congestedNodesFromAck.add(targets[i]);
+}
 // Restart will not be treated differently unless it is
 // the local node or the only one in the pipeline.
 if (PipelineAck.isRestartOOBStatus(reply) 
@@ -839,6 +856,18 @@ class DataStreamer extends Daemon {
 }
   }
 
+  if (!congestedNodesFromAck.isEmpty()) {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  congestedNodes.addAll(congestedNodesFromAck);
+}
+  } else {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  lastCongestionBackoffTime = 0;
+}
+  }
+
   assert 

hadoop git commit: HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi Catherinot

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8e2f1a93e - 27970b3f4


HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi 
Catherinot

(cherry picked from commit 4d14816c269f110445e1ad3e03ac53b0c1cdb58b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27970b3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27970b3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27970b3f

Branch: refs/heads/branch-2
Commit: 27970b3f46674ef7824a4478141d968ef2c16ebb
Parents: 8e2f1a9
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:20:24 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:25:28 2015 -0700

--
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27970b3f/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 268abba..161f3e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -187,7 +187,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 clientCache = new DFSClientCache(config);
 replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
 DFSConfigKeys.DFS_REPLICATION_DEFAULT);
-blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
 DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
 bufferSize = config.getInt(
 CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27970b3f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3bace16..fc81ae0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1047,6 +1047,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8036. Use snapshot path as source when using snapshot diff report in
 DistCp. (Jing Zhao via wheat9)
 
+HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize
+(Remi Catherinot via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode



hadoop git commit: Zero map split input length combine with none zero map split input length will cause MR1 job hung. (zxu via rkanter)

2015-04-01 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 8151679f8 - 5f5138e5b


Zero map split input length combine with none zero map split input length will 
cause MR1 job hung. (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f5138e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f5138e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f5138e5

Branch: refs/heads/branch-1
Commit: 5f5138e5b37c570272ceadfa9020f1896223a04f
Parents: 8151679
Author: Robert Kanter rkan...@apache.org
Authored: Wed Apr 1 15:19:59 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Wed Apr 1 15:19:59 2015 -0700

--
 CHANGES.txt   |  3 +++
 .../org/apache/hadoop/mapred/JobInProgress.java   |  6 +-
 .../apache/hadoop/mapred/ResourceEstimator.java   | 12 ++--
 .../hadoop/mapred/TestResourceEstimation.java | 18 ++
 4 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5138e5/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 57f1cf4..93a0a6b 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -282,6 +282,9 @@ Release 1.3.0 - unreleased
 
 HDFS-6649. Documentation for setrep is wrong. (aajisaka)
 
+Zero map split input length combine with none zero map split input
+length will cause MR1 job hung. (zxu via rkanter)
+
 Release 1.2.2 - unreleased
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5138e5/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
--
diff --git a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java 
b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
index 0861584..f134553 100644
--- a/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
+++ b/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
@@ -938,7 +938,11 @@ public class JobInProgress {
   long getInputLength() {
 return inputLength;
   }
- 
+
+  void setInputLength(long length) {
+inputLength = length;
+  }
+
   boolean isCleanupLaunched() {
 return launchedCleanup;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5138e5/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java
--
diff --git a/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java 
b/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java
index e2f8fc6..ec0e64d 100644
--- a/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java
+++ b/src/mapred/org/apache/hadoop/mapred/ResourceEstimator.java
@@ -52,8 +52,16 @@ class ResourceEstimator {
 //-1 indicates error, which we don't average in.
 if(tip.isMapTask()   ts.getOutputSize() != -1)  {
   completedMapsUpdates++;
-
-  completedMapsInputSize+=(tip.getMapInputSize()+1);
+  long inputSize = tip.getMapInputSize();
+  if (inputSize == 0) {
+// if map input size is 0, use map output size as input size
+// to avoid job hung.
+inputSize = ts.getOutputSize();
+// map input size is changed, update JobInProgress.inputLength.
+long length = job.getInputLength() + inputSize;
+job.setInputLength(length);
+  }
+  completedMapsInputSize+=(inputSize+1);
   completedMapsOutputSize+=ts.getOutputSize();
 
   if(LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5138e5/src/test/org/apache/hadoop/mapred/TestResourceEstimation.java
--
diff --git a/src/test/org/apache/hadoop/mapred/TestResourceEstimation.java 
b/src/test/org/apache/hadoop/mapred/TestResourceEstimation.java
index 6a16b72..fed722f 100644
--- a/src/test/org/apache/hadoop/mapred/TestResourceEstimation.java
+++ b/src/test/org/apache/hadoop/mapred/TestResourceEstimation.java
@@ -55,12 +55,13 @@ public class TestResourceEstimation {
 //unfortunately, we can't set job input size from here.
 ResourceEstimator re = new ResourceEstimator(jip);
 
-for(int i = 0; i  maps / 10 ; ++i) {
-
-  long estOutSize = re.getEstimatedMapOutputSize();
-  System.out.println(estOutSize);
-  assertEquals(0, estOutSize);
-  
+for(int i = 0; i  maps; ++i) {
+  if (i  maps / 10) {
+// re.thresholdToUse is maps / 10
+long estOutSize = re.getEstimatedMapOutputSize();
+System.out.println(estOutSize);
+assertEquals(0, estOutSize);
+  }
   TaskStatus ts = new MapTaskStatus();
   ts.setOutputSize(singleMapOutputSize);
   JobSplit.TaskSplitMetaInfo split =
@@ -120,9 +121,10 @@ public class 

hadoop git commit: HADOOP-11731. Rework the changelog and releasenotes (aw)

2015-04-01 Thread aw
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4d14816c2 - f383fd9b6


HADOOP-11731. Rework the changelog and releasenotes (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f383fd9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f383fd9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f383fd9b

Branch: refs/heads/trunk
Commit: f383fd9b6caf4557613250c5c218b1a1b65a212b
Parents: 4d14816
Author: Allen Wittenauer a...@apache.org
Authored: Wed Apr 1 17:52:22 2015 -0700
Committer: Allen Wittenauer a...@apache.org
Committed: Wed Apr 1 17:52:22 2015 -0700

--
 BUILDING.txt|   7 +-
 dev-support/releasedocmaker.py  | 460 +++
 dev-support/relnotes.py | 274 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-common/pom.xml |  51 ++
 hadoop-project/src/site/site.xml|   6 +-
 6 files changed, 518 insertions(+), 282 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f383fd9b/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 02b8610..f3b6853 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -73,7 +73,7 @@ Where to run Maven from?
 
--
 Maven build goals:
 
- * Clean : mvn clean
+ * Clean : mvn clean [-Preleasedocs]
  * Compile   : mvn compile [-Pnative]
  * Run tests : mvn test [-Pnative]
  * Create JAR: mvn package
@@ -84,7 +84,7 @@ Maven build goals:
  * Run clover: mvn test -Pclover 
[-DcloverLicenseLocation=${user.name}/.clover.license]
  * Run Rat   : mvn apache-rat:check
  * Build javadocs: mvn javadoc:javadoc
- * Build distribution: mvn package 
[-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar]
+ * Build distribution: mvn package 
[-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs]
  * Change Hadoop version : mvn versions:set -DnewVersion=NEWVERSION
 
  Build options:
@@ -93,6 +93,7 @@ Maven build goals:
   * Use -Pdocs to generate  bundle the documentation in the distribution 
(using -Pdist)
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
+  * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
 
  Snappy build options:
 
@@ -203,7 +204,7 @@ Create source and binary distributions with native code and 
documentation:
 
 Create a local staging version of the website (in /tmp/hadoop-site)
 
-  $ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+  $ mvn clean site -Preleasedocs; mvn site:stage 
-DstagingDirectory=/tmp/hadoop-site
 
 
--
 Installing Hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f383fd9b/dev-support/releasedocmaker.py
--
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
new file mode 100755
index 000..b00c1a7
--- /dev/null
+++ b/dev-support/releasedocmaker.py
@@ -0,0 +1,460 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from glob import glob
+from optparse import OptionParser
+import os
+import re
+import sys
+import urllib
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+releaseVersion={}
+namePattern = re.compile(r' \([0-9]+\)')
+
+def clean(str):
+  return tableclean(re.sub(namePattern, , str))
+
+def formatComponents(str):
+  str = re.sub(namePattern, '', str).replace(', )
+  if str != :
+ret = str
+  else:
+# some markdown parsers don't like empty tables
+ret = .
+  return clean(ret)
+
+# convert to utf-8
+# protect some known md metachars
+# or chars that screw up doxia

hadoop git commit: Revert YARN-3430. Made headroom data available on app attempt page of RM WebUI. Contributed by Xuan Gong.

2015-04-01 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e4059b9cc - cf33bc105


Revert YARN-3430. Made headroom data available on app attempt page of RM 
WebUI. Contributed by Xuan Gong.

This reverts commit e4059b9cce2703c412abeacc08225c3cdfe415c1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf33bc10
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf33bc10
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf33bc10

Branch: refs/heads/branch-2.7
Commit: cf33bc1050c32a40b861e3031d27befed51e4938
Parents: e4059b9
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Apr 1 16:41:25 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Wed Apr 1 16:41:25 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 ---
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java| 4 +---
 2 files changed, 1 insertion(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf33bc10/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c0ba029..c40da6d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -745,9 +745,6 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
-YARN-3430. Made headroom data available on app attempt page of RM WebUI.
-(Xuan Gong via zjshen)
-
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf33bc10/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index b519581..419c0ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -156,12 +156,10 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 if (attempt != null) {
   if (!isApplicationInFinalState(YarnApplicationAttemptState
   .valueOf(attempt.getAppAttemptState().toString( {
-RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics();
 DIVHamlet pdiv = html._(InfoBlock.class).div(_INFO_WRAP);
 info(Application Attempt Overview).clear();
 info(Application Attempt Metrics)._(
-  Application Attempt Headroom : , metrics == null ? N/A :
-metrics.getApplicationAttemptHeadroom());
+  Application Attempt Headroom : , 0);
 pdiv._();
   }
 }



hadoop git commit: HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP server. Contributed by Brandon Li

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cfcf79549 - 8e2f1a93e


HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP 
server. Contributed by Brandon Li

(cherry picked from commit 60ce825a71850fe0622d551159e8d66f32448bb5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e2f1a93
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e2f1a93
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e2f1a93

Branch: refs/heads/branch-2
Commit: 8e2f1a93e462ed15084c2dfa010086d0112f89bd
Parents: cfcf795
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:04:44 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:05:35 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++-
 .../org/apache/hadoop/mount/MountdBase.java | 26 +--
 .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java| 15 +++--
 .../apache/hadoop/oncrpc/SimpleTcpServer.java   | 31 --
 .../apache/hadoop/oncrpc/SimpleUdpServer.java   | 33 +---
 5 files changed, 82 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e2f1a93/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 52048a8..d17db41 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -770,7 +770,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
 wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
-
+
+HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP
+server (brandonli)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e2f1a93/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 8d7d6dc..92ca7ec 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -60,7 +60,17 @@ abstract public class MountdBase {
 SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-udpServer.run();
+try {
+  udpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the UDP server., e);
+  if (udpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
+udpServer.getBoundPort());
+  }
+  udpServer.shutdown();
+  terminate(1, e);
+}
 udpBoundPort = udpServer.getBoundPort();
   }
 
@@ -69,7 +79,17 @@ abstract public class MountdBase {
 SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-tcpServer.run();
+try {
+  tcpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the TCP server., e);
+  if (tcpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
+tcpServer.getBoundPort());
+  }
+  tcpServer.shutdown();
+  terminate(1, e);
+}
 tcpBoundPort = tcpServer.getBoundPort();
   }
 
@@ -83,7 +103,7 @@ abstract public class MountdBase {
 rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
 rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   } catch (Throwable e) {
-LOG.fatal(Failed to start the server. Cause:, e);
+LOG.fatal(Failed to register the MOUNT service., e);
 terminate(1, e);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e2f1a93/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index 40744bc..80faca5 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -29,7 +29,6 @@ import static 

hadoop git commit: HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP server. Contributed by Brandon Li

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 8ebbbc6eb - 1bde06aca


HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP 
server. Contributed by Brandon Li

(cherry picked from commit 60ce825a71850fe0622d551159e8d66f32448bb5)
(cherry picked from commit 8e2f1a93e462ed15084c2dfa010086d0112f89bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bde06ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bde06ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bde06ac

Branch: refs/heads/branch-2.7
Commit: 1bde06aca2ffac967df0ae52c326226a6390c85e
Parents: 8ebbbc6
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:04:44 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:09:20 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++-
 .../org/apache/hadoop/mount/MountdBase.java | 26 +--
 .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java| 15 +++--
 .../apache/hadoop/oncrpc/SimpleTcpServer.java   | 31 --
 .../apache/hadoop/oncrpc/SimpleUdpServer.java   | 33 +---
 5 files changed, 82 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bde06ac/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index e00bb2c..467be0e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -715,7 +715,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
 wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
-
+
+HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP
+server (brandonli)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bde06ac/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 8d7d6dc..92ca7ec 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -60,7 +60,17 @@ abstract public class MountdBase {
 SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-udpServer.run();
+try {
+  udpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the UDP server., e);
+  if (udpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
+udpServer.getBoundPort());
+  }
+  udpServer.shutdown();
+  terminate(1, e);
+}
 udpBoundPort = udpServer.getBoundPort();
   }
 
@@ -69,7 +79,17 @@ abstract public class MountdBase {
 SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-tcpServer.run();
+try {
+  tcpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the TCP server., e);
+  if (tcpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
+tcpServer.getBoundPort());
+  }
+  tcpServer.shutdown();
+  terminate(1, e);
+}
 tcpBoundPort = tcpServer.getBoundPort();
   }
 
@@ -83,7 +103,7 @@ abstract public class MountdBase {
 rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
 rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   } catch (Throwable e) {
-LOG.fatal(Failed to start the server. Cause:, e);
+LOG.fatal(Failed to register the MOUNT service., e);
 terminate(1, e);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bde06ac/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index 40744bc..80faca5 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 

hadoop git commit: HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi Catherinot

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/trunk 60ce825a7 - 4d14816c2


HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize. Contributed by Remi 
Catherinot


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d14816c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d14816c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d14816c

Branch: refs/heads/trunk
Commit: 4d14816c269f110445e1ad3e03ac53b0c1cdb58b
Parents: 60ce825
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:20:24 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:20:24 2015 -0700

--
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d14816c/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 268abba..161f3e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -187,7 +187,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 clientCache = new DFSClientCache(config);
 replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
 DFSConfigKeys.DFS_REPLICATION_DEFAULT);
-blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+blockSize = config.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
 DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
 bufferSize = config.getInt(
 CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d14816c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 34c0556..518df9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1351,6 +1351,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8036. Use snapshot path as source when using snapshot diff report in
 DistCp. (Jing Zhao via wheat9)
 
+HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize
+(Remi Catherinot via brandonli)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode



hadoop git commit: HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP server. Contributed by Brandon Li

2015-04-01 Thread brandonli
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6ccf4fbf8 - 60ce825a7


HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP 
server. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60ce825a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60ce825a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60ce825a

Branch: refs/heads/trunk
Commit: 60ce825a71850fe0622d551159e8d66f32448bb5
Parents: 6ccf4fb
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:04:44 2015 -0700
Committer: Brandon Li brando...@apache.org
Committed: Wed Apr 1 17:04:44 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++-
 .../org/apache/hadoop/mount/MountdBase.java | 26 +--
 .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java| 15 +++--
 .../apache/hadoop/oncrpc/SimpleTcpServer.java   | 31 --
 .../apache/hadoop/oncrpc/SimpleUdpServer.java   | 33 +---
 5 files changed, 82 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ce825a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 111fb5e..fa98a0c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1186,7 +1186,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
 wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
-
+
+HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP
+server (brandonli)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ce825a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 8d7d6dc..92ca7ec 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -60,7 +60,17 @@ abstract public class MountdBase {
 SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-udpServer.run();
+try {
+  udpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the UDP server., e);
+  if (udpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
+udpServer.getBoundPort());
+  }
+  udpServer.shutdown();
+  terminate(1, e);
+}
 udpBoundPort = udpServer.getBoundPort();
   }
 
@@ -69,7 +79,17 @@ abstract public class MountdBase {
 SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-tcpServer.run();
+try {
+  tcpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the TCP server., e);
+  if (tcpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
+tcpServer.getBoundPort());
+  }
+  tcpServer.shutdown();
+  terminate(1, e);
+}
 tcpBoundPort = tcpServer.getBoundPort();
   }
 
@@ -83,7 +103,7 @@ abstract public class MountdBase {
 rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
 rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   } catch (Throwable e) {
-LOG.fatal(Failed to start the server. Cause:, e);
+LOG.fatal(Failed to register the MOUNT service., e);
 terminate(1, e);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60ce825a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index 40744bc..80faca5 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -29,7 +29,6 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
  * Nfs server. Supports NFS v3 using 

hadoop git commit: YARN-3425. NPE from RMNodeLabelsManager.serviceStop when NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)

2015-04-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5e4d45767 - 865be70b0


YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)

(cherry picked from commit 492239424a3ace9868b6154f44a0f18fa5318235)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/865be70b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/865be70b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/865be70b

Branch: refs/heads/branch-2
Commit: 865be70b027a9636de7d7caebebdecee7be401a0
Parents: 5e4d457
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 1 10:14:48 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 1 10:15:47 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java   | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/865be70b/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f3b9ce4..a7fb336 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -81,6 +81,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3412. RM tests should use MockRM where possible. (kasha)
 
+YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
+NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/865be70b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index a5e2756..fe38164 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -258,7 +258,9 @@ public class CommonNodeLabelsManager extends 
AbstractService {
   // for UT purpose
   protected void stopDispatcher() {
 AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
-asyncDispatcher.stop();
+if (null != asyncDispatcher) {
+  asyncDispatcher.stop();
+}
   }
   
   @Override



hadoop git commit: HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 492239424 - 53471d462


HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53471d46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53471d46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53471d46

Branch: refs/heads/trunk
Commit: 53471d462c987e67ad73b974646a5560a4b5d424
Parents: 4922394
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:56:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 10:56:53 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java| 12 ++--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53471d46/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4247ea6..cba53a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -371,6 +371,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7671. hdfs user guide should point to the common rack awareness doc.
 (Kai Sasaki via aajisaka)
 
+HDFS-8009. Signal congestion on the DataNode. (wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53471d46/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 071aba1..50dccb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -354,6 +354,9 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
 
   private SpanReceiverHost spanReceiverHost;
+  private static final int NUM_CORES = Runtime.getRuntime()
+  .availableProcessors();
+  private static final double CONGESTION_RATIO = 1.5;
 
   /**
* Creates a dummy DataNode for testing purpose.
@@ -484,8 +487,13 @@ public class DataNode extends ReconfigurableBase
* /ul
*/
   public PipelineAck.ECN getECN() {
-return pipelineSupportECN ? PipelineAck.ECN.SUPPORTED : PipelineAck.ECN
-  .DISABLED;
+if (!pipelineSupportECN) {
+  return PipelineAck.ECN.DISABLED;
+}
+double load = ManagementFactory.getOperatingSystemMXBean()
+.getSystemLoadAverage();
+return load  NUM_CORES * CONGESTION_RATIO ? PipelineAck.ECN.CONGESTED :
+PipelineAck.ECN.SUPPORTED;
   }
 
   /**



hadoop git commit: HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 865be70b0 - a5bcfe0d3


HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5bcfe0d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5bcfe0d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5bcfe0d

Branch: refs/heads/branch-2
Commit: a5bcfe0d3339a8dd72b049f8507df9a552fdba8e
Parents: 865be70
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:56:53 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 10:57:07 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java| 12 ++--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5bcfe0d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1d733a0..5b64ee1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -56,6 +56,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7671. hdfs user guide should point to the common rack awareness doc.
 (Kai Sasaki via aajisaka)
 
+HDFS-8009. Signal congestion on the DataNode. (wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5bcfe0d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 9851cb5..6f70168 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -357,6 +357,9 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
 
   private SpanReceiverHost spanReceiverHost;
+  private static final int NUM_CORES = Runtime.getRuntime()
+  .availableProcessors();
+  private static final double CONGESTION_RATIO = 1.5;
 
   /**
* Creates a dummy DataNode for testing purpose.
@@ -487,8 +490,13 @@ public class DataNode extends ReconfigurableBase
* /ul
*/
   public PipelineAck.ECN getECN() {
-return pipelineSupportECN ? PipelineAck.ECN.SUPPORTED : PipelineAck.ECN
-  .DISABLED;
+if (!pipelineSupportECN) {
+  return PipelineAck.ECN.DISABLED;
+}
+double load = ManagementFactory.getOperatingSystemMXBean()
+.getSystemLoadAverage();
+return load  NUM_CORES * CONGESTION_RATIO ? PipelineAck.ECN.CONGESTED :
+PipelineAck.ECN.SUPPORTED;
   }
 
   /**



[2/3] hadoop git commit: HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is wrong on 32-bit Mac. Contributed by Kiran Kumar M R.

2015-04-01 Thread cnauroth
HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is 
wrong on 32-bit Mac. Contributed by Kiran Kumar M R.

(cherry picked from commit a3a96a07faf0c6f6aa3ed33608271c2b1657e437)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/726024fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/726024fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/726024fa

Branch: refs/heads/branch-2
Commit: 726024faa5c14bcdb31ee3ca9afd6a700e813f7f
Parents: 2ca2515
Author: cnauroth cnaur...@apache.org
Authored: Wed Apr 1 11:40:09 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Apr 1 11:40:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/726024fa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 3da0154..52048a8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -768,6 +768,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11754. RM fails to start in non-secure mode due to authentication
 filter failure. (wheat9)
 
+HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
+wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/726024fa/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index 8f0c06d..26e1fa6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -286,7 +286,9 @@ static unsigned long pthreads_thread_id(void)
 #elif defined(__sun)
   thread_id = (unsigned long)pthread_self();
 #elif defined(__APPLE__)
-  (void)pthread_threadid_np(pthread_self(), thread_id);
+  __uint64_t tmp_thread_id;
+  (void)pthread_threadid_np(pthread_self(), tmp_thread_id);
+  thread_id = (unsigned long)tmp_thread_id;
 #else
 #error Platform not supported
 #endif



[3/3] hadoop git commit: HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is wrong on 32-bit Mac. Contributed by Kiran Kumar M R.

2015-04-01 Thread cnauroth
HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is 
wrong on 32-bit Mac. Contributed by Kiran Kumar M R.

(cherry picked from commit a3a96a07faf0c6f6aa3ed33608271c2b1657e437)
(cherry picked from commit 726024faa5c14bcdb31ee3ca9afd6a700e813f7f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa326086
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa326086
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa326086

Branch: refs/heads/branch-2.7
Commit: aa326086cc4924110cdd0a935d26c519ed1335ba
Parents: 3acfde6
Author: cnauroth cnaur...@apache.org
Authored: Wed Apr 1 11:40:09 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Apr 1 11:40:25 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa326086/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 399a22b..e00bb2c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -713,6 +713,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11754. RM fails to start in non-secure mode due to authentication
 filter failure. (wheat9)
 
+HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
+wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa326086/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index 8f0c06d..26e1fa6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -286,7 +286,9 @@ static unsigned long pthreads_thread_id(void)
 #elif defined(__sun)
   thread_id = (unsigned long)pthread_self();
 #elif defined(__APPLE__)
-  (void)pthread_threadid_np(pthread_self(), thread_id);
+  __uint64_t tmp_thread_id;
+  (void)pthread_threadid_np(pthread_self(), tmp_thread_id);
+  thread_id = (unsigned long)tmp_thread_id;
 #else
 #error Platform not supported
 #endif



[1/3] hadoop git commit: HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is wrong on 32-bit Mac. Contributed by Kiran Kumar M R.

2015-04-01 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2ca2515b3 - 726024faa
  refs/heads/branch-2.7 3acfde6d8 - aa326086c
  refs/heads/trunk 796fb2687 - a3a96a07f


HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is 
wrong on 32-bit Mac. Contributed by Kiran Kumar M R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3a96a07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3a96a07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3a96a07

Branch: refs/heads/trunk
Commit: a3a96a07faf0c6f6aa3ed33608271c2b1657e437
Parents: 796fb26
Author: cnauroth cnaur...@apache.org
Authored: Wed Apr 1 11:40:09 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Wed Apr 1 11:40:09 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a96a07/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 13d74fd..111fb5e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1184,6 +1184,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11754. RM fails to start in non-secure mode due to authentication
 filter failure. (wheat9)
 
+HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
+wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3a96a07/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
index 8f0c06d..26e1fa6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
+++ 
b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -286,7 +286,9 @@ static unsigned long pthreads_thread_id(void)
 #elif defined(__sun)
   thread_id = (unsigned long)pthread_self();
 #elif defined(__APPLE__)
-  (void)pthread_threadid_np(pthread_self(), thread_id);
+  __uint64_t tmp_thread_id;
+  (void)pthread_threadid_np(pthread_self(), tmp_thread_id);
+  thread_id = (unsigned long)tmp_thread_id;
 #else
 #error Platform not supported
 #endif



hadoop git commit: Add the missing files for HDFS-8009.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 53471d462 - 796fb2687


Add the missing files for HDFS-8009.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/796fb268
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/796fb268
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/796fb268

Branch: refs/heads/trunk
Commit: 796fb268710aef8445dc97a04464a0579062f6f5
Parents: 53471d4
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:58:32 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 10:58:32 2015 -0700

--
 .../hdfs/server/datanode/TestDataNodeECN.java   | 45 
 1 file changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/796fb268/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
new file mode 100644
index 000..b994386
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestDataNodeECN {
+  @Test
+  public void testECNFlag() throws IOException {
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+  PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
+  Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+}



hadoop git commit: Add the missing files for HDFS-8009.

2015-04-01 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a5bcfe0d3 - 2ca2515b3


Add the missing files for HDFS-8009.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ca2515b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ca2515b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ca2515b

Branch: refs/heads/branch-2
Commit: 2ca2515b3305997c32d0a46efdf1fb5d66554b4b
Parents: a5bcfe0
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:58:32 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Wed Apr 1 10:58:41 2015 -0700

--
 .../hdfs/server/datanode/TestDataNodeECN.java   | 45 
 1 file changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ca2515b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
new file mode 100644
index 000..b994386
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestDataNodeECN {
+  @Test
+  public void testECNFlag() throws IOException {
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+  PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
+  Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+}



hadoop git commit: YARN-3425. NPE from RMNodeLabelsManager.serviceStop when NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)

2015-04-01 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e79f1c21 - 492239424


YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49223942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49223942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49223942

Branch: refs/heads/trunk
Commit: 492239424a3ace9868b6154f44a0f18fa5318235
Parents: 2e79f1c
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 1 10:14:48 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Wed Apr 1 10:14:48 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java   | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49223942/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2888a65..f5dc39d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -129,6 +129,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3412. RM tests should use MockRM where possible. (kasha)
 
+YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
+NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49223942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index a5e2756..fe38164 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -258,7 +258,9 @@ public class CommonNodeLabelsManager extends 
AbstractService {
   // for UT purpose
   protected void stopDispatcher() {
 AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
-asyncDispatcher.stop();
+if (null != asyncDispatcher) {
+  asyncDispatcher.stop();
+}
   }
   
   @Override



hadoop git commit: HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.

2015-04-01 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/trunk a3a96a07f - ed72daa5d


HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed 
by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed72daa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed72daa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed72daa5

Branch: refs/heads/trunk
Commit: ed72daa5df97669906234e8ac9a406d78136b206
Parents: a3a96a0
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 1 12:53:25 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 1 12:54:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 24 +---
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 12 ++
 .../hdfs/server/datanode/BPServiceActor.java|  8 ---
 .../datanode/fsdataset/impl/FsDatasetCache.java |  8 ---
 .../server/namenode/FileJournalManager.java | 22 +++---
 .../hadoop/hdfs/server/namenode/NameNode.java   |  4 +++-
 .../hdfs/shortcircuit/ShortCircuitCache.java|  4 +++-
 .../tools/offlineImageViewer/FSImageLoader.java |  6 +++--
 .../hadoop/hdfs/util/LightWeightHashSet.java|  6 +++--
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  8 +--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 16 +
 12 files changed, 78 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed72daa5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cba53a3..435fdd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8009. Signal congestion on the DataNode. (wheat9)
 
+HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
+(Walter Su via wang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed72daa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..8f33899 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -369,9 +369,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   return null;
 }
 if (clientContext.getDisableLegacyBlockReaderLocal()) {
-  PerformanceAdvisory.LOG.debug(this + : can't construct  +
-  BlockReaderLocalLegacy because  +
-  disableLegacyBlockReaderLocal is set.);
+PerformanceAdvisory.LOG.debug({}: can't construct  +
+BlockReaderLocalLegacy because  +
+disableLegacyBlockReaderLocal is set., this);
   return null;
 }
 IOException ioe = null;
@@ -410,8 +410,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if (!pathInfo.getPathState().getUsableForShortCircuit()) {
-  PerformanceAdvisory.LOG.debug(this + :  + pathInfo +  is not  +
-  usable for short circuit; giving up on BlockReaderLocal.);
+  PerformanceAdvisory.LOG.debug({}: {} is not usable for short circuit;  
+
+  giving up on BlockReaderLocal., this, pathInfo);
   return null;
 }
 ShortCircuitCache cache = clientContext.getShortCircuitCache();
@@ -426,11 +426,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   throw exc;
 }
 if (info.getReplica() == null) {
-  if (LOG.isTraceEnabled()) {
-PerformanceAdvisory.LOG.debug(this + : failed to get  +
-ShortCircuitReplica. Cannot construct  +
-BlockReaderLocal via  + pathInfo.getPath());
-  }
+  PerformanceAdvisory.LOG.debug({}: failed to get  +
+  ShortCircuitReplica. Cannot construct  +
+  BlockReaderLocal via {}, this, pathInfo.getPath());
   return null;
 }
 return new BlockReaderLocal.Builder(conf).
@@ -610,9 +608,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if (!pathInfo.getPathState().getUsableForDataTransfer()) {
-  

hadoop git commit: HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.

2015-04-01 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 726024faa - b17d8a53f


HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed 
by Walter Su.

(cherry picked from commit ed72daa5df97669906234e8ac9a406d78136b206)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17d8a53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17d8a53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17d8a53

Branch: refs/heads/branch-2
Commit: b17d8a53fab23f40bdd407099109c68a7f191756
Parents: 726024f
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 1 12:53:25 2015 -0700
Committer: Andrew Wang w...@apache.org
Committed: Wed Apr 1 12:54:46 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 24 +---
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 12 ++
 .../hdfs/server/datanode/BPServiceActor.java|  8 ---
 .../datanode/fsdataset/impl/FsDatasetCache.java |  8 ---
 .../server/namenode/FileJournalManager.java | 22 +++---
 .../hadoop/hdfs/server/namenode/NameNode.java   |  4 +++-
 .../hdfs/shortcircuit/ShortCircuitCache.java|  4 +++-
 .../tools/offlineImageViewer/FSImageLoader.java |  6 +++--
 .../hadoop/hdfs/util/LightWeightHashSet.java|  6 +++--
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  8 +--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 16 +
 12 files changed, 78 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d8a53/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5b64ee1..ca1a4e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -58,6 +58,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8009. Signal congestion on the DataNode. (wheat9)
 
+HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
+(Walter Su via wang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17d8a53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..8f33899 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -369,9 +369,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   return null;
 }
 if (clientContext.getDisableLegacyBlockReaderLocal()) {
-  PerformanceAdvisory.LOG.debug(this + : can't construct  +
-  BlockReaderLocalLegacy because  +
-  disableLegacyBlockReaderLocal is set.);
+PerformanceAdvisory.LOG.debug({}: can't construct  +
+BlockReaderLocalLegacy because  +
+disableLegacyBlockReaderLocal is set., this);
   return null;
 }
 IOException ioe = null;
@@ -410,8 +410,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if (!pathInfo.getPathState().getUsableForShortCircuit()) {
-  PerformanceAdvisory.LOG.debug(this + :  + pathInfo +  is not  +
-  usable for short circuit; giving up on BlockReaderLocal.);
+  PerformanceAdvisory.LOG.debug({}: {} is not usable for short circuit;  
+
+  giving up on BlockReaderLocal., this, pathInfo);
   return null;
 }
 ShortCircuitCache cache = clientContext.getShortCircuitCache();
@@ -426,11 +426,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   throw exc;
 }
 if (info.getReplica() == null) {
-  if (LOG.isTraceEnabled()) {
-PerformanceAdvisory.LOG.debug(this + : failed to get  +
-ShortCircuitReplica. Cannot construct  +
-BlockReaderLocal via  + pathInfo.getPath());
-  }
+  PerformanceAdvisory.LOG.debug({}: failed to get  +
+  ShortCircuitReplica. Cannot construct  +
+  BlockReaderLocal via {}, this, pathInfo.getPath());
   return null;
 }
 return new BlockReaderLocal.Builder(conf).
@@ -610,9 +608,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if 

hadoop git commit: YARN-3430. Made headroom data available on app attempt page of RM WebUI. Contributed by Xuan Gong.

2015-04-01 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed72daa5d - 8366a36ad


YARN-3430. Made headroom data available on app attempt page of RM WebUI. 
Contributed by Xuan Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8366a36a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8366a36a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8366a36a

Branch: refs/heads/trunk
Commit: 8366a36ad356e6318b8ce6c5c96e201149f811bd
Parents: ed72daa
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Apr 1 13:47:54 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Wed Apr 1 13:47:54 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8366a36a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f5dc39d..962e040 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -870,6 +870,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3430. Made headroom data available on app attempt page of RM WebUI.
+(Xuan Gong via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8366a36a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 1861874..1831920 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -166,10 +166,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 if (attempt != null) {
   if (!isApplicationInFinalState(YarnApplicationAttemptState
   .valueOf(attempt.getAppAttemptState().toString( {
+RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics();
 DIVHamlet pdiv = html._(InfoBlock.class).div(_INFO_WRAP);
 info(Application Attempt Overview).clear();
 info(Application Attempt Metrics)._(
-  Application Attempt Headroom : , 0);
+  Application Attempt Headroom : , metrics == null ? N/A :
+metrics.getApplicationAttemptHeadroom());
 pdiv._();
   }
 }



hadoop git commit: YARN-3430. Made headroom data available on app attempt page of RM WebUI. Contributed by Xuan Gong.

2015-04-01 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 aa326086c - e4059b9cc


YARN-3430. Made headroom data available on app attempt page of RM WebUI. 
Contributed by Xuan Gong.

(cherry picked from commit 8366a36ad356e6318b8ce6c5c96e201149f811bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4059b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4059b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4059b9c

Branch: refs/heads/branch-2.7
Commit: e4059b9cce2703c412abeacc08225c3cdfe415c1
Parents: aa32608
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Apr 1 13:47:54 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Wed Apr 1 13:49:51 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4059b9c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c40da6d..c0ba029 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -745,6 +745,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3430. Made headroom data available on app attempt page of RM WebUI.
+(Xuan Gong via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4059b9c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 419c0ce..b519581 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -156,10 +156,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 if (attempt != null) {
   if (!isApplicationInFinalState(YarnApplicationAttemptState
   .valueOf(attempt.getAppAttemptState().toString( {
+RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics();
 DIVHamlet pdiv = html._(InfoBlock.class).div(_INFO_WRAP);
 info(Application Attempt Overview).clear();
 info(Application Attempt Metrics)._(
-  Application Attempt Headroom : , 0);
+  Application Attempt Headroom : , metrics == null ? N/A :
+metrics.getApplicationAttemptHeadroom());
 pdiv._();
   }
 }



hadoop git commit: HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than DFSOutputStream#writeChunk (cmccabe)

2015-04-01 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8366a36ad - c94d594a5


HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than 
DFSOutputStream#writeChunk (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c94d594a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c94d594a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c94d594a

Branch: refs/heads/trunk
Commit: c94d594a57806dec515e2a2053a1221f8ce48cc4
Parents: 8366a36
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Apr 1 13:55:40 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Wed Apr 1 14:10:10 2015 -0700

--
 .../org/apache/hadoop/fs/FSOutputSummer.java| 20 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ---
 .../org/apache/hadoop/tracing/TestTracing.java  |  4 ++--
 4 files changed, 25 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94d594a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 13a5e26..d2998b6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.htrace.NullScope;
+import org.apache.htrace.TraceScope;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -194,16 +196,26 @@ abstract public class FSOutputSummer extends OutputStream 
{
 return sum.getChecksumSize();
   }
 
+  protected TraceScope createWriteTraceScope() {
+return NullScope.INSTANCE;
+  }
+
   /** Generate checksums for the given data chunks and output chunks  
checksums
* to the underlying output stream.
*/
   private void writeChecksumChunks(byte b[], int off, int len)
   throws IOException {
 sum.calculateChunkedSums(b, off, len, checksum, 0);
-for (int i = 0; i  len; i += sum.getBytesPerChecksum()) {
-  int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
-  int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
-  writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
+TraceScope scope = createWriteTraceScope();
+try {
+  for (int i = 0; i  len; i += sum.getBytesPerChecksum()) {
+int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
+int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
+writeChunk(b, off + i, chunkLen, checksum, ckOffset,
+getChecksumSize());
+  }
+} finally {
+  scope.close();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94d594a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 435fdd7..b5591e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED
 
   OPTIMIZATIONS
 
+HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
+DFSOutputStream#writeChunk (cmccabe)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94d594a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 933d8e6..c88639d 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -372,21 +372,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
 
+  protected TraceScope createWriteTraceScope() {
+return dfsClient.getPathTraceScope(DFSOutputStream#write, src);
+  }
+
   // @see FSOutputSummer#writeChunk()
   @Override
   protected synchronized void 

hadoop git commit: YARN-3430. Made headroom data available on app attempt page of RM WebUI. Contributed by Xuan Gong.

2015-04-01 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b17d8a53f - 6d0bebc91


YARN-3430. Made headroom data available on app attempt page of RM WebUI. 
Contributed by Xuan Gong.

(cherry picked from commit 8366a36ad356e6318b8ce6c5c96e201149f811bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d0bebc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d0bebc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d0bebc9

Branch: refs/heads/branch-2
Commit: 6d0bebc9121de6e448a083348c3f8250d5c80ba4
Parents: b17d8a5
Author: Zhijie Shen zjs...@apache.org
Authored: Wed Apr 1 13:47:54 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Wed Apr 1 13:49:00 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java| 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d0bebc9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a7fb336..fee7253 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -825,6 +825,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3430. Made headroom data available on app attempt page of RM WebUI.
+(Xuan Gong via zjshen)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d0bebc9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 1861874..1831920 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -166,10 +166,12 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 if (attempt != null) {
   if (!isApplicationInFinalState(YarnApplicationAttemptState
   .valueOf(attempt.getAppAttemptState().toString( {
+RMAppAttemptMetrics metrics = attempt.getRMAppAttemptMetrics();
 DIVHamlet pdiv = html._(InfoBlock.class).div(_INFO_WRAP);
 info(Application Attempt Overview).clear();
 info(Application Attempt Metrics)._(
-  Application Attempt Headroom : , 0);
+  Application Attempt Headroom : , metrics == null ? N/A :
+metrics.getApplicationAttemptHeadroom());
 pdiv._();
   }
 }